id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
231,400
litaotao/IPython-Dashboard
dashboard/server/utils.py
build_response
def build_response(content, code=200): """Build response, add headers""" response = make_response( jsonify(content), content['code'] ) response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Headers'] = \ 'Origin, X-Requested-With, Content-Type, Accept, Authorization' return response
python
def build_response(content, code=200): """Build response, add headers""" response = make_response( jsonify(content), content['code'] ) response.headers['Access-Control-Allow-Origin'] = '*' response.headers['Access-Control-Allow-Headers'] = \ 'Origin, X-Requested-With, Content-Type, Accept, Authorization' return response
[ "def", "build_response", "(", "content", ",", "code", "=", "200", ")", ":", "response", "=", "make_response", "(", "jsonify", "(", "content", ")", ",", "content", "[", "'code'", "]", ")", "response", ".", "headers", "[", "'Access-Control-Allow-Origin'", "]", "=", "'*'", "response", ".", "headers", "[", "'Access-Control-Allow-Headers'", "]", "=", "'Origin, X-Requested-With, Content-Type, Accept, Authorization'", "return", "response" ]
Build response, add headers
[ "Build", "response", "add", "headers" ]
b28a6b447c86bcec562e554efe96c64660ddf7a2
https://github.com/litaotao/IPython-Dashboard/blob/b28a6b447c86bcec562e554efe96c64660ddf7a2/dashboard/server/utils.py#L18-L24
231,401
litaotao/IPython-Dashboard
dashboard/server/resources/sql.py
SqlData.post
def post(self): '''return executed sql result to client. post data format: {"options": ['all', 'last', 'first', 'format'], "sql_raw": "raw sql ..."} Returns: sql result. ''' ## format sql data = request.get_json() options, sql_raw = data.get('options'), data.get('sql_raw') if options == 'format': sql_formmated = sqlparse.format(sql_raw, keyword_case='upper', reindent=True) return build_response(dict(data=sql_formmated, code=200)) elif options in ('all', 'selected'): conn = SQL(config.sql_host, config.sql_port, config.sql_user, config.sql_pwd, config.sql_db) result = conn.run(sql_raw) return build_response(dict(data=result, code=200)) else: pass pass
python
def post(self): '''return executed sql result to client. post data format: {"options": ['all', 'last', 'first', 'format'], "sql_raw": "raw sql ..."} Returns: sql result. ''' ## format sql data = request.get_json() options, sql_raw = data.get('options'), data.get('sql_raw') if options == 'format': sql_formmated = sqlparse.format(sql_raw, keyword_case='upper', reindent=True) return build_response(dict(data=sql_formmated, code=200)) elif options in ('all', 'selected'): conn = SQL(config.sql_host, config.sql_port, config.sql_user, config.sql_pwd, config.sql_db) result = conn.run(sql_raw) return build_response(dict(data=result, code=200)) else: pass pass
[ "def", "post", "(", "self", ")", ":", "## format sql", "data", "=", "request", ".", "get_json", "(", ")", "options", ",", "sql_raw", "=", "data", ".", "get", "(", "'options'", ")", ",", "data", ".", "get", "(", "'sql_raw'", ")", "if", "options", "==", "'format'", ":", "sql_formmated", "=", "sqlparse", ".", "format", "(", "sql_raw", ",", "keyword_case", "=", "'upper'", ",", "reindent", "=", "True", ")", "return", "build_response", "(", "dict", "(", "data", "=", "sql_formmated", ",", "code", "=", "200", ")", ")", "elif", "options", "in", "(", "'all'", ",", "'selected'", ")", ":", "conn", "=", "SQL", "(", "config", ".", "sql_host", ",", "config", ".", "sql_port", ",", "config", ".", "sql_user", ",", "config", ".", "sql_pwd", ",", "config", ".", "sql_db", ")", "result", "=", "conn", ".", "run", "(", "sql_raw", ")", "return", "build_response", "(", "dict", "(", "data", "=", "result", ",", "code", "=", "200", ")", ")", "else", ":", "pass", "pass" ]
return executed sql result to client. post data format: {"options": ['all', 'last', 'first', 'format'], "sql_raw": "raw sql ..."} Returns: sql result.
[ "return", "executed", "sql", "result", "to", "client", "." ]
b28a6b447c86bcec562e554efe96c64660ddf7a2
https://github.com/litaotao/IPython-Dashboard/blob/b28a6b447c86bcec562e554efe96c64660ddf7a2/dashboard/server/resources/sql.py#L40-L75
231,402
litaotao/IPython-Dashboard
dashboard/server/resources/home.py
DashListData.get
def get(self, page=0, size=10): """Get dashboard meta info from in page `page` and page size is `size`. Args: page: page number. size: size number. Returns: list of dict containing the dash_id and accordingly meta info. maybe empty list [] when page * size > total dashes in db. that's reasonable. """ dash_list = r_db.zrevrange(config.DASH_ID_KEY, 0, -1, True) id_list = dash_list[page * size : page * size + size] dash_meta = [] data = [] if id_list: dash_meta = r_db.hmget(config.DASH_META_KEY, [i[0] for i in id_list]) data = [json.loads(i) for i in dash_meta] return build_response(dict(data=data, code=200))
python
def get(self, page=0, size=10): """Get dashboard meta info from in page `page` and page size is `size`. Args: page: page number. size: size number. Returns: list of dict containing the dash_id and accordingly meta info. maybe empty list [] when page * size > total dashes in db. that's reasonable. """ dash_list = r_db.zrevrange(config.DASH_ID_KEY, 0, -1, True) id_list = dash_list[page * size : page * size + size] dash_meta = [] data = [] if id_list: dash_meta = r_db.hmget(config.DASH_META_KEY, [i[0] for i in id_list]) data = [json.loads(i) for i in dash_meta] return build_response(dict(data=data, code=200))
[ "def", "get", "(", "self", ",", "page", "=", "0", ",", "size", "=", "10", ")", ":", "dash_list", "=", "r_db", ".", "zrevrange", "(", "config", ".", "DASH_ID_KEY", ",", "0", ",", "-", "1", ",", "True", ")", "id_list", "=", "dash_list", "[", "page", "*", "size", ":", "page", "*", "size", "+", "size", "]", "dash_meta", "=", "[", "]", "data", "=", "[", "]", "if", "id_list", ":", "dash_meta", "=", "r_db", ".", "hmget", "(", "config", ".", "DASH_META_KEY", ",", "[", "i", "[", "0", "]", "for", "i", "in", "id_list", "]", ")", "data", "=", "[", "json", ".", "loads", "(", "i", ")", "for", "i", "in", "dash_meta", "]", "return", "build_response", "(", "dict", "(", "data", "=", "data", ",", "code", "=", "200", ")", ")" ]
Get dashboard meta info from in page `page` and page size is `size`. Args: page: page number. size: size number. Returns: list of dict containing the dash_id and accordingly meta info. maybe empty list [] when page * size > total dashes in db. that's reasonable.
[ "Get", "dashboard", "meta", "info", "from", "in", "page", "page", "and", "page", "size", "is", "size", "." ]
b28a6b447c86bcec562e554efe96c64660ddf7a2
https://github.com/litaotao/IPython-Dashboard/blob/b28a6b447c86bcec562e554efe96c64660ddf7a2/dashboard/server/resources/home.py#L72-L91
231,403
litaotao/IPython-Dashboard
dashboard/server/resources/storage.py
KeyList.get
def get(self): """Get key list in storage. """ keys = r_kv.keys() keys.sort() return build_response(dict(data=keys, code=200))
python
def get(self): """Get key list in storage. """ keys = r_kv.keys() keys.sort() return build_response(dict(data=keys, code=200))
[ "def", "get", "(", "self", ")", ":", "keys", "=", "r_kv", ".", "keys", "(", ")", "keys", ".", "sort", "(", ")", "return", "build_response", "(", "dict", "(", "data", "=", "keys", ",", "code", "=", "200", ")", ")" ]
Get key list in storage.
[ "Get", "key", "list", "in", "storage", "." ]
b28a6b447c86bcec562e554efe96c64660ddf7a2
https://github.com/litaotao/IPython-Dashboard/blob/b28a6b447c86bcec562e554efe96c64660ddf7a2/dashboard/server/resources/storage.py#L23-L28
231,404
litaotao/IPython-Dashboard
dashboard/server/resources/storage.py
Key.get
def get(self, key): """Get a key-value from storage according to the key name. """ data = r_kv.get(key) # data = json.dumps(data) if isinstance(data, str) else data # data = json.loads(data) if data else {} return build_response(dict(data=data, code=200))
python
def get(self, key): """Get a key-value from storage according to the key name. """ data = r_kv.get(key) # data = json.dumps(data) if isinstance(data, str) else data # data = json.loads(data) if data else {} return build_response(dict(data=data, code=200))
[ "def", "get", "(", "self", ",", "key", ")", ":", "data", "=", "r_kv", ".", "get", "(", "key", ")", "# data = json.dumps(data) if isinstance(data, str) else data", "# data = json.loads(data) if data else {}", "return", "build_response", "(", "dict", "(", "data", "=", "data", ",", "code", "=", "200", ")", ")" ]
Get a key-value from storage according to the key name.
[ "Get", "a", "key", "-", "value", "from", "storage", "according", "to", "the", "key", "name", "." ]
b28a6b447c86bcec562e554efe96c64660ddf7a2
https://github.com/litaotao/IPython-Dashboard/blob/b28a6b447c86bcec562e554efe96c64660ddf7a2/dashboard/server/resources/storage.py#L40-L47
231,405
litaotao/IPython-Dashboard
dashboard/server/resources/dash.py
Dash.get
def get(self, dash_id): """Just return the dashboard id in the rendering html. JS will do other work [ajax and rendering] according to the dash_id. Args: dash_id: dashboard id. Returns: rendered html. """ return make_response(render_template('dashboard.html', dash_id=dash_id, api_root=config.app_host))
python
def get(self, dash_id): """Just return the dashboard id in the rendering html. JS will do other work [ajax and rendering] according to the dash_id. Args: dash_id: dashboard id. Returns: rendered html. """ return make_response(render_template('dashboard.html', dash_id=dash_id, api_root=config.app_host))
[ "def", "get", "(", "self", ",", "dash_id", ")", ":", "return", "make_response", "(", "render_template", "(", "'dashboard.html'", ",", "dash_id", "=", "dash_id", ",", "api_root", "=", "config", ".", "app_host", ")", ")" ]
Just return the dashboard id in the rendering html. JS will do other work [ajax and rendering] according to the dash_id. Args: dash_id: dashboard id. Returns: rendered html.
[ "Just", "return", "the", "dashboard", "id", "in", "the", "rendering", "html", "." ]
b28a6b447c86bcec562e554efe96c64660ddf7a2
https://github.com/litaotao/IPython-Dashboard/blob/b28a6b447c86bcec562e554efe96c64660ddf7a2/dashboard/server/resources/dash.py#L25-L36
231,406
litaotao/IPython-Dashboard
dashboard/server/resources/dash.py
DashData.get
def get(self, dash_id): """Read dashboard content. Args: dash_id: dashboard id. Returns: A dict containing the content of that dashboard, not include the meta info. """ data = json.loads(r_db.hmget(config.DASH_CONTENT_KEY, dash_id)[0]) return build_response(dict(data=data, code=200))
python
def get(self, dash_id): """Read dashboard content. Args: dash_id: dashboard id. Returns: A dict containing the content of that dashboard, not include the meta info. """ data = json.loads(r_db.hmget(config.DASH_CONTENT_KEY, dash_id)[0]) return build_response(dict(data=data, code=200))
[ "def", "get", "(", "self", ",", "dash_id", ")", ":", "data", "=", "json", ".", "loads", "(", "r_db", ".", "hmget", "(", "config", ".", "DASH_CONTENT_KEY", ",", "dash_id", ")", "[", "0", "]", ")", "return", "build_response", "(", "dict", "(", "data", "=", "data", ",", "code", "=", "200", ")", ")" ]
Read dashboard content. Args: dash_id: dashboard id. Returns: A dict containing the content of that dashboard, not include the meta info.
[ "Read", "dashboard", "content", "." ]
b28a6b447c86bcec562e554efe96c64660ddf7a2
https://github.com/litaotao/IPython-Dashboard/blob/b28a6b447c86bcec562e554efe96c64660ddf7a2/dashboard/server/resources/dash.py#L46-L56
231,407
litaotao/IPython-Dashboard
dashboard/server/resources/dash.py
DashData.put
def put(self, dash_id=0): """Update a dash meta and content, return updated dash content. Args: dash_id: dashboard id. Returns: A dict containing the updated content of that dashboard, not include the meta info. """ data = request.get_json() updated = self._update_dash(dash_id, data) return build_response(dict(data=updated, code=200))
python
def put(self, dash_id=0): """Update a dash meta and content, return updated dash content. Args: dash_id: dashboard id. Returns: A dict containing the updated content of that dashboard, not include the meta info. """ data = request.get_json() updated = self._update_dash(dash_id, data) return build_response(dict(data=updated, code=200))
[ "def", "put", "(", "self", ",", "dash_id", "=", "0", ")", ":", "data", "=", "request", ".", "get_json", "(", ")", "updated", "=", "self", ".", "_update_dash", "(", "dash_id", ",", "data", ")", "return", "build_response", "(", "dict", "(", "data", "=", "updated", ",", "code", "=", "200", ")", ")" ]
Update a dash meta and content, return updated dash content. Args: dash_id: dashboard id. Returns: A dict containing the updated content of that dashboard, not include the meta info.
[ "Update", "a", "dash", "meta", "and", "content", "return", "updated", "dash", "content", "." ]
b28a6b447c86bcec562e554efe96c64660ddf7a2
https://github.com/litaotao/IPython-Dashboard/blob/b28a6b447c86bcec562e554efe96c64660ddf7a2/dashboard/server/resources/dash.py#L58-L69
231,408
litaotao/IPython-Dashboard
dashboard/server/resources/dash.py
DashData.delete
def delete(self, dash_id): """Delete a dash meta and content, return updated dash content. Actually, just remove it to a specfied place in database. Args: dash_id: dashboard id. Returns: Redirect to home page. """ removed_info = dict( time_modified = r_db.zscore(config.DASH_ID_KEY, dash_id), meta = r_db.hget(config.DASH_META_KEY, dash_id), content = r_db.hget(config.DASH_CONTENT_KEY, dash_id)) r_db.zrem(config.DASH_ID_KEY, dash_id) r_db.hdel(config.DASH_META_KEY, dash_id) r_db.hdel(config.DASH_CONTENT_KEY, dash_id) return {'removed_info': removed_info}
python
def delete(self, dash_id): """Delete a dash meta and content, return updated dash content. Actually, just remove it to a specfied place in database. Args: dash_id: dashboard id. Returns: Redirect to home page. """ removed_info = dict( time_modified = r_db.zscore(config.DASH_ID_KEY, dash_id), meta = r_db.hget(config.DASH_META_KEY, dash_id), content = r_db.hget(config.DASH_CONTENT_KEY, dash_id)) r_db.zrem(config.DASH_ID_KEY, dash_id) r_db.hdel(config.DASH_META_KEY, dash_id) r_db.hdel(config.DASH_CONTENT_KEY, dash_id) return {'removed_info': removed_info}
[ "def", "delete", "(", "self", ",", "dash_id", ")", ":", "removed_info", "=", "dict", "(", "time_modified", "=", "r_db", ".", "zscore", "(", "config", ".", "DASH_ID_KEY", ",", "dash_id", ")", ",", "meta", "=", "r_db", ".", "hget", "(", "config", ".", "DASH_META_KEY", ",", "dash_id", ")", ",", "content", "=", "r_db", ".", "hget", "(", "config", ".", "DASH_CONTENT_KEY", ",", "dash_id", ")", ")", "r_db", ".", "zrem", "(", "config", ".", "DASH_ID_KEY", ",", "dash_id", ")", "r_db", ".", "hdel", "(", "config", ".", "DASH_META_KEY", ",", "dash_id", ")", "r_db", ".", "hdel", "(", "config", ".", "DASH_CONTENT_KEY", ",", "dash_id", ")", "return", "{", "'removed_info'", ":", "removed_info", "}" ]
Delete a dash meta and content, return updated dash content. Actually, just remove it to a specfied place in database. Args: dash_id: dashboard id. Returns: Redirect to home page.
[ "Delete", "a", "dash", "meta", "and", "content", "return", "updated", "dash", "content", "." ]
b28a6b447c86bcec562e554efe96c64660ddf7a2
https://github.com/litaotao/IPython-Dashboard/blob/b28a6b447c86bcec562e554efe96c64660ddf7a2/dashboard/server/resources/dash.py#L71-L89
231,409
totalgood/nlpia
src/nlpia/translate.py
main
def main( lang='deu', n=900, epochs=50, batch_size=64, num_neurons=256, encoder_input_data=None, decoder_input_data=None, decoder_target_data=None, checkpoint_dir=os.path.join(BIGDATA_PATH, 'checkpoints'), ): """ Train an LSTM encoder-decoder squence-to-sequence model on Anki flashcards for international translation >>> model = main('spa', n=400, epochs=3, batch_size=128, num_neurons=32) Train on 360 samples, validate on 40 samples Epoch 1/3 ... >>> len(model.get_weights()) 8 # 64 common characters in German, 56 in English >>> model.get_weights()[-1].shape[0] >=50 True >>> model.get_weights()[-2].shape[0] 32 """ mkdir_p(checkpoint_dir) encoder_input_path = os.path.join( checkpoint_dir, 'nlpia-ch10-translate-input-{}.npy'.format(lang)) decoder_input_path = os.path.join( checkpoint_dir, 'nlpia-ch10-translate-decoder-input-{}.npy'.format(lang)) decoder_target_path = os.path.join( checkpoint_dir, 'nlpia-ch10-translate-target-{}.npy'.format('eng')) data_paths = (encoder_input_path, decoder_input_path, decoder_target_path) encoder_input_data = [] if all([os.path.isfile(p) for p in data_paths]): encoder_input_data = np.load(encoder_input_path) decoder_input_data = np.load(decoder_input_path) decoder_target_data = np.load(decoder_target_path) if len(encoder_input_data) < n: encoder_input_data, decoder_input_data, decoder_target_data = onehot_char_training_data( lang=lang, n=n, data_paths=data_paths) encoder_input_data = encoder_input_data[:n] decoder_input_data = decoder_input_data[:n] decoder_target_data = decoder_target_data[:n] model = fit(data_paths=data_paths, epochs=epochs, batch_size=batch_size, num_neurons=num_neurons) return model
python
def main( lang='deu', n=900, epochs=50, batch_size=64, num_neurons=256, encoder_input_data=None, decoder_input_data=None, decoder_target_data=None, checkpoint_dir=os.path.join(BIGDATA_PATH, 'checkpoints'), ): """ Train an LSTM encoder-decoder squence-to-sequence model on Anki flashcards for international translation >>> model = main('spa', n=400, epochs=3, batch_size=128, num_neurons=32) Train on 360 samples, validate on 40 samples Epoch 1/3 ... >>> len(model.get_weights()) 8 # 64 common characters in German, 56 in English >>> model.get_weights()[-1].shape[0] >=50 True >>> model.get_weights()[-2].shape[0] 32 """ mkdir_p(checkpoint_dir) encoder_input_path = os.path.join( checkpoint_dir, 'nlpia-ch10-translate-input-{}.npy'.format(lang)) decoder_input_path = os.path.join( checkpoint_dir, 'nlpia-ch10-translate-decoder-input-{}.npy'.format(lang)) decoder_target_path = os.path.join( checkpoint_dir, 'nlpia-ch10-translate-target-{}.npy'.format('eng')) data_paths = (encoder_input_path, decoder_input_path, decoder_target_path) encoder_input_data = [] if all([os.path.isfile(p) for p in data_paths]): encoder_input_data = np.load(encoder_input_path) decoder_input_data = np.load(decoder_input_path) decoder_target_data = np.load(decoder_target_path) if len(encoder_input_data) < n: encoder_input_data, decoder_input_data, decoder_target_data = onehot_char_training_data( lang=lang, n=n, data_paths=data_paths) encoder_input_data = encoder_input_data[:n] decoder_input_data = decoder_input_data[:n] decoder_target_data = decoder_target_data[:n] model = fit(data_paths=data_paths, epochs=epochs, batch_size=batch_size, num_neurons=num_neurons) return model
[ "def", "main", "(", "lang", "=", "'deu'", ",", "n", "=", "900", ",", "epochs", "=", "50", ",", "batch_size", "=", "64", ",", "num_neurons", "=", "256", ",", "encoder_input_data", "=", "None", ",", "decoder_input_data", "=", "None", ",", "decoder_target_data", "=", "None", ",", "checkpoint_dir", "=", "os", ".", "path", ".", "join", "(", "BIGDATA_PATH", ",", "'checkpoints'", ")", ",", ")", ":", "mkdir_p", "(", "checkpoint_dir", ")", "encoder_input_path", "=", "os", ".", "path", ".", "join", "(", "checkpoint_dir", ",", "'nlpia-ch10-translate-input-{}.npy'", ".", "format", "(", "lang", ")", ")", "decoder_input_path", "=", "os", ".", "path", ".", "join", "(", "checkpoint_dir", ",", "'nlpia-ch10-translate-decoder-input-{}.npy'", ".", "format", "(", "lang", ")", ")", "decoder_target_path", "=", "os", ".", "path", ".", "join", "(", "checkpoint_dir", ",", "'nlpia-ch10-translate-target-{}.npy'", ".", "format", "(", "'eng'", ")", ")", "data_paths", "=", "(", "encoder_input_path", ",", "decoder_input_path", ",", "decoder_target_path", ")", "encoder_input_data", "=", "[", "]", "if", "all", "(", "[", "os", ".", "path", ".", "isfile", "(", "p", ")", "for", "p", "in", "data_paths", "]", ")", ":", "encoder_input_data", "=", "np", ".", "load", "(", "encoder_input_path", ")", "decoder_input_data", "=", "np", ".", "load", "(", "decoder_input_path", ")", "decoder_target_data", "=", "np", ".", "load", "(", "decoder_target_path", ")", "if", "len", "(", "encoder_input_data", ")", "<", "n", ":", "encoder_input_data", ",", "decoder_input_data", ",", "decoder_target_data", "=", "onehot_char_training_data", "(", "lang", "=", "lang", ",", "n", "=", "n", ",", "data_paths", "=", "data_paths", ")", "encoder_input_data", "=", "encoder_input_data", "[", ":", "n", "]", "decoder_input_data", "=", "decoder_input_data", "[", ":", "n", "]", "decoder_target_data", "=", "decoder_target_data", "[", ":", "n", "]", "model", "=", "fit", "(", "data_paths", "=", "data_paths", ",", "epochs", "=", "epochs", ",", "batch_size", "=", "batch_size", ",", "num_neurons", "=", "num_neurons", ")", "return", "model" ]
Train an LSTM encoder-decoder squence-to-sequence model on Anki flashcards for international translation >>> model = main('spa', n=400, epochs=3, batch_size=128, num_neurons=32) Train on 360 samples, validate on 40 samples Epoch 1/3 ... >>> len(model.get_weights()) 8 # 64 common characters in German, 56 in English >>> model.get_weights()[-1].shape[0] >=50 True >>> model.get_weights()[-2].shape[0] 32
[ "Train", "an", "LSTM", "encoder", "-", "decoder", "squence", "-", "to", "-", "sequence", "model", "on", "Anki", "flashcards", "for", "international", "translation" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/translate.py#L202-L248
231,410
totalgood/nlpia
src/nlpia/book/forum/boltz.py
BoltzmanMachine.energy
def energy(self, v, h=None): """Compute the global energy for the current joint state of all nodes >>> q11_4 = BoltzmanMachine(bv=[0., 0.], bh=[-2.], Whh=np.zeros((1, 1)), Wvv=np.zeros((2, 2)), Wvh=[[3.], [-1.]]) >>> q11_4.configurations() >>> v1v2h = product([0, 1], [0, 1], [0, 1]) >>> E = np.array([q11_4.energy(v=x[0:2], h=[x[2]]) for x in v1v2h]) >>> expnegE = np.exp(-E) >>> sumexpnegE = np.sum(expnegE) >>> pvh = np.array([ene / sumexpnegE for ene in expnegE]) >>> pv = [0] * len(df) >>> num_hid_states = 2 ** self.Nh >>> for i in range(len(df)): j = int(i / num_hid_states) pv[i] = sum(pvh[k] for k in range(j * num_hid_states, (j + 1) * num_hid_states)) >>> pd.DataFrame(tablify(v1v2h, -E, expnegE, pvh, pv), columns='v1 v2 h -E exp(-E) p(v,h), p(v)'.split()) """ h = np.zeros(self.Nh) if h is None else h negE = np.dot(v, self.bv) negE += np.dot(h, self.bh) for j in range(self.Nv): for i in range(j): negE += v[i] * v[j] * self.Wvv[i][j] for i in range(self.Nv): for k in range(self.Nh): negE += v[i] * h[k] * self.Wvh[i][k] for l in range(self.Nh): for k in range(l): negE += h[k] * h[l] * self.Whh[k][l] return -negE
python
def energy(self, v, h=None): """Compute the global energy for the current joint state of all nodes >>> q11_4 = BoltzmanMachine(bv=[0., 0.], bh=[-2.], Whh=np.zeros((1, 1)), Wvv=np.zeros((2, 2)), Wvh=[[3.], [-1.]]) >>> q11_4.configurations() >>> v1v2h = product([0, 1], [0, 1], [0, 1]) >>> E = np.array([q11_4.energy(v=x[0:2], h=[x[2]]) for x in v1v2h]) >>> expnegE = np.exp(-E) >>> sumexpnegE = np.sum(expnegE) >>> pvh = np.array([ene / sumexpnegE for ene in expnegE]) >>> pv = [0] * len(df) >>> num_hid_states = 2 ** self.Nh >>> for i in range(len(df)): j = int(i / num_hid_states) pv[i] = sum(pvh[k] for k in range(j * num_hid_states, (j + 1) * num_hid_states)) >>> pd.DataFrame(tablify(v1v2h, -E, expnegE, pvh, pv), columns='v1 v2 h -E exp(-E) p(v,h), p(v)'.split()) """ h = np.zeros(self.Nh) if h is None else h negE = np.dot(v, self.bv) negE += np.dot(h, self.bh) for j in range(self.Nv): for i in range(j): negE += v[i] * v[j] * self.Wvv[i][j] for i in range(self.Nv): for k in range(self.Nh): negE += v[i] * h[k] * self.Wvh[i][k] for l in range(self.Nh): for k in range(l): negE += h[k] * h[l] * self.Whh[k][l] return -negE
[ "def", "energy", "(", "self", ",", "v", ",", "h", "=", "None", ")", ":", "h", "=", "np", ".", "zeros", "(", "self", ".", "Nh", ")", "if", "h", "is", "None", "else", "h", "negE", "=", "np", ".", "dot", "(", "v", ",", "self", ".", "bv", ")", "negE", "+=", "np", ".", "dot", "(", "h", ",", "self", ".", "bh", ")", "for", "j", "in", "range", "(", "self", ".", "Nv", ")", ":", "for", "i", "in", "range", "(", "j", ")", ":", "negE", "+=", "v", "[", "i", "]", "*", "v", "[", "j", "]", "*", "self", ".", "Wvv", "[", "i", "]", "[", "j", "]", "for", "i", "in", "range", "(", "self", ".", "Nv", ")", ":", "for", "k", "in", "range", "(", "self", ".", "Nh", ")", ":", "negE", "+=", "v", "[", "i", "]", "*", "h", "[", "k", "]", "*", "self", ".", "Wvh", "[", "i", "]", "[", "k", "]", "for", "l", "in", "range", "(", "self", ".", "Nh", ")", ":", "for", "k", "in", "range", "(", "l", ")", ":", "negE", "+=", "h", "[", "k", "]", "*", "h", "[", "l", "]", "*", "self", ".", "Whh", "[", "k", "]", "[", "l", "]", "return", "-", "negE" ]
Compute the global energy for the current joint state of all nodes >>> q11_4 = BoltzmanMachine(bv=[0., 0.], bh=[-2.], Whh=np.zeros((1, 1)), Wvv=np.zeros((2, 2)), Wvh=[[3.], [-1.]]) >>> q11_4.configurations() >>> v1v2h = product([0, 1], [0, 1], [0, 1]) >>> E = np.array([q11_4.energy(v=x[0:2], h=[x[2]]) for x in v1v2h]) >>> expnegE = np.exp(-E) >>> sumexpnegE = np.sum(expnegE) >>> pvh = np.array([ene / sumexpnegE for ene in expnegE]) >>> pv = [0] * len(df) >>> num_hid_states = 2 ** self.Nh >>> for i in range(len(df)): j = int(i / num_hid_states) pv[i] = sum(pvh[k] for k in range(j * num_hid_states, (j + 1) * num_hid_states)) >>> pd.DataFrame(tablify(v1v2h, -E, expnegE, pvh, pv), columns='v1 v2 h -E exp(-E) p(v,h), p(v)'.split())
[ "Compute", "the", "global", "energy", "for", "the", "current", "joint", "state", "of", "all", "nodes" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/forum/boltz.py#L103-L133
231,411
totalgood/nlpia
src/nlpia/book/forum/boltz.py
Hopfield.energy
def energy(self): r""" Compute the global energy for the current joint state of all nodes - sum(s[i] * b[i]) - sum([s[i]*s[j]*W[i,j] for (i, j) in product(range(N), range(N)) if i<j)]) E = − ∑ s i b i − ∑ i i< j s i s j w ij """ s, b, W, N = self.state, self.b, self.W, self.N self.E = - sum(s * b) - sum([s[i] * s[j] * W[i, j] for (i, j) in product(range(N), range(N)) if i < j]) self.low_energies[-1] = self.E self.low_energies.sort() self.high_energies[-1] = self.E self.high_energies.sort() self.high_energies = self.high_energies[::-1] return self.E
python
def energy(self): r""" Compute the global energy for the current joint state of all nodes - sum(s[i] * b[i]) - sum([s[i]*s[j]*W[i,j] for (i, j) in product(range(N), range(N)) if i<j)]) E = − ∑ s i b i − ∑ i i< j s i s j w ij """ s, b, W, N = self.state, self.b, self.W, self.N self.E = - sum(s * b) - sum([s[i] * s[j] * W[i, j] for (i, j) in product(range(N), range(N)) if i < j]) self.low_energies[-1] = self.E self.low_energies.sort() self.high_energies[-1] = self.E self.high_energies.sort() self.high_energies = self.high_energies[::-1] return self.E
[ "def", "energy", "(", "self", ")", ":", "s", ",", "b", ",", "W", ",", "N", "=", "self", ".", "state", ",", "self", ".", "b", ",", "self", ".", "W", ",", "self", ".", "N", "self", ".", "E", "=", "-", "sum", "(", "s", "*", "b", ")", "-", "sum", "(", "[", "s", "[", "i", "]", "*", "s", "[", "j", "]", "*", "W", "[", "i", ",", "j", "]", "for", "(", "i", ",", "j", ")", "in", "product", "(", "range", "(", "N", ")", ",", "range", "(", "N", ")", ")", "if", "i", "<", "j", "]", ")", "self", ".", "low_energies", "[", "-", "1", "]", "=", "self", ".", "E", "self", ".", "low_energies", ".", "sort", "(", ")", "self", ".", "high_energies", "[", "-", "1", "]", "=", "self", ".", "E", "self", ".", "high_energies", ".", "sort", "(", ")", "self", ".", "high_energies", "=", "self", ".", "high_energies", "[", ":", ":", "-", "1", "]", "return", "self", ".", "E" ]
r""" Compute the global energy for the current joint state of all nodes - sum(s[i] * b[i]) - sum([s[i]*s[j]*W[i,j] for (i, j) in product(range(N), range(N)) if i<j)]) E = − ∑ s i b i − ∑ i i< j s i s j w ij
[ "r", "Compute", "the", "global", "energy", "for", "the", "current", "joint", "state", "of", "all", "nodes" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/forum/boltz.py#L210-L226
231,412
totalgood/nlpia
src/nlpia/translators.py
HyperlinkStyleCorrector.translate
def translate(self, text, to_template='{name} ({url})', from_template=None, name_matcher=None, url_matcher=None): """ Translate hyperinks into printable book style for Manning Publishing >>> translator = HyperlinkStyleCorrector() >>> adoc = 'See http://totalgood.com[Total Good] about that.' >>> translator.translate(adoc) 'See Total Good (http://totalgood.com) about that.' """ return self.replace(text, to_template=to_template, from_template=from_template, name_matcher=name_matcher, url_matcher=url_matcher)
python
def translate(self, text, to_template='{name} ({url})', from_template=None, name_matcher=None, url_matcher=None): """ Translate hyperinks into printable book style for Manning Publishing >>> translator = HyperlinkStyleCorrector() >>> adoc = 'See http://totalgood.com[Total Good] about that.' >>> translator.translate(adoc) 'See Total Good (http://totalgood.com) about that.' """ return self.replace(text, to_template=to_template, from_template=from_template, name_matcher=name_matcher, url_matcher=url_matcher)
[ "def", "translate", "(", "self", ",", "text", ",", "to_template", "=", "'{name} ({url})'", ",", "from_template", "=", "None", ",", "name_matcher", "=", "None", ",", "url_matcher", "=", "None", ")", ":", "return", "self", ".", "replace", "(", "text", ",", "to_template", "=", "to_template", ",", "from_template", "=", "from_template", ",", "name_matcher", "=", "name_matcher", ",", "url_matcher", "=", "url_matcher", ")" ]
Translate hyperinks into printable book style for Manning Publishing >>> translator = HyperlinkStyleCorrector() >>> adoc = 'See http://totalgood.com[Total Good] about that.' >>> translator.translate(adoc) 'See Total Good (http://totalgood.com) about that.'
[ "Translate", "hyperinks", "into", "printable", "book", "style", "for", "Manning", "Publishing" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/translators.py#L235-L244
231,413
totalgood/nlpia
src/nlpia/scripts/cleandialog.py
main
def main(dialogpath=None): """ Parse the state transition graph for a set of dialog-definition tables to find an fix deadends """ if dialogpath is None: args = parse_args() dialogpath = os.path.abspath(os.path.expanduser(args.dialogpath)) else: dialogpath = os.path.abspath(os.path.expanduser(args.dialogpath)) return clean_csvs(dialogpath=dialogpath)
python
def main(dialogpath=None): """ Parse the state transition graph for a set of dialog-definition tables to find an fix deadends """ if dialogpath is None: args = parse_args() dialogpath = os.path.abspath(os.path.expanduser(args.dialogpath)) else: dialogpath = os.path.abspath(os.path.expanduser(args.dialogpath)) return clean_csvs(dialogpath=dialogpath)
[ "def", "main", "(", "dialogpath", "=", "None", ")", ":", "if", "dialogpath", "is", "None", ":", "args", "=", "parse_args", "(", ")", "dialogpath", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "args", ".", "dialogpath", ")", ")", "else", ":", "dialogpath", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "expanduser", "(", "args", ".", "dialogpath", ")", ")", "return", "clean_csvs", "(", "dialogpath", "=", "dialogpath", ")" ]
Parse the state transition graph for a set of dialog-definition tables to find an fix deadends
[ "Parse", "the", "state", "transition", "graph", "for", "a", "set", "of", "dialog", "-", "definition", "tables", "to", "find", "an", "fix", "deadends" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/scripts/cleandialog.py#L24-L31
231,414
totalgood/nlpia
src/nlpia/book/scripts/create_raw_ubuntu_dataset.py
prepare_data_maybe_download
def prepare_data_maybe_download(directory): """ Download and unpack dialogs if necessary. """ filename = 'ubuntu_dialogs.tgz' url = 'http://cs.mcgill.ca/~jpineau/datasets/ubuntu-corpus-1.0/ubuntu_dialogs.tgz' dialogs_path = os.path.join(directory, 'dialogs') # test it there are some dialogs in the path if not os.path.exists(os.path.join(directory, "10", "1.tst")): # dialogs are missing archive_path = os.path.join(directory, filename) if not os.path.exists(archive_path): # archive missing, download it print("Downloading %s to %s" % (url, archive_path)) filepath, _ = urllib.request.urlretrieve(url, archive_path) print "Successfully downloaded " + filepath # unpack data if not os.path.exists(dialogs_path): print("Unpacking dialogs ...") with tarfile.open(archive_path) as tar: tar.extractall(path=directory) print("Archive unpacked.") return
python
def prepare_data_maybe_download(directory): """ Download and unpack dialogs if necessary. """ filename = 'ubuntu_dialogs.tgz' url = 'http://cs.mcgill.ca/~jpineau/datasets/ubuntu-corpus-1.0/ubuntu_dialogs.tgz' dialogs_path = os.path.join(directory, 'dialogs') # test it there are some dialogs in the path if not os.path.exists(os.path.join(directory, "10", "1.tst")): # dialogs are missing archive_path = os.path.join(directory, filename) if not os.path.exists(archive_path): # archive missing, download it print("Downloading %s to %s" % (url, archive_path)) filepath, _ = urllib.request.urlretrieve(url, archive_path) print "Successfully downloaded " + filepath # unpack data if not os.path.exists(dialogs_path): print("Unpacking dialogs ...") with tarfile.open(archive_path) as tar: tar.extractall(path=directory) print("Archive unpacked.") return
[ "def", "prepare_data_maybe_download", "(", "directory", ")", ":", "filename", "=", "'ubuntu_dialogs.tgz'", "url", "=", "'http://cs.mcgill.ca/~jpineau/datasets/ubuntu-corpus-1.0/ubuntu_dialogs.tgz'", "dialogs_path", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "'dialogs'", ")", "# test it there are some dialogs in the path", "if", "not", "os", ".", "path", ".", "exists", "(", "os", ".", "path", ".", "join", "(", "directory", ",", "\"10\"", ",", "\"1.tst\"", ")", ")", ":", "# dialogs are missing", "archive_path", "=", "os", ".", "path", ".", "join", "(", "directory", ",", "filename", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "archive_path", ")", ":", "# archive missing, download it", "print", "(", "\"Downloading %s to %s\"", "%", "(", "url", ",", "archive_path", ")", ")", "filepath", ",", "_", "=", "urllib", ".", "request", ".", "urlretrieve", "(", "url", ",", "archive_path", ")", "print", "\"Successfully downloaded \"", "+", "filepath", "# unpack data", "if", "not", "os", ".", "path", ".", "exists", "(", "dialogs_path", ")", ":", "print", "(", "\"Unpacking dialogs ...\"", ")", "with", "tarfile", ".", "open", "(", "archive_path", ")", "as", "tar", ":", "tar", ".", "extractall", "(", "path", "=", "directory", ")", "print", "(", "\"Archive unpacked.\"", ")", "return" ]
Download and unpack dialogs if necessary.
[ "Download", "and", "unpack", "dialogs", "if", "necessary", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/scripts/create_raw_ubuntu_dataset.py#L252-L277
231,415
totalgood/nlpia
src/nlpia/skeleton.py
fib
def fib(n): """Fibonacci example function Args: n (int): integer Returns: int: n-th Fibonacci number """ assert n > 0 a, b = 1, 1 for i in range(n - 1): a, b = b, a + b return a
python
def fib(n): """Fibonacci example function Args: n (int): integer Returns: int: n-th Fibonacci number """ assert n > 0 a, b = 1, 1 for i in range(n - 1): a, b = b, a + b return a
[ "def", "fib", "(", "n", ")", ":", "assert", "n", ">", "0", "a", ",", "b", "=", "1", ",", "1", "for", "i", "in", "range", "(", "n", "-", "1", ")", ":", "a", ",", "b", "=", "b", ",", "a", "+", "b", "return", "a" ]
Fibonacci example function Args: n (int): integer Returns: int: n-th Fibonacci number
[ "Fibonacci", "example", "function" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/skeleton.py#L37-L50
231,416
totalgood/nlpia
src/nlpia/skeleton.py
main
def main(args): """Main entry point allowing external calls Args: args ([str]): command line parameter list """ args = parse_args(args) setup_logging(args.loglevel) _logger.debug("Starting crazy calculations...") print("The {}-th Fibonacci number is {}".format(args.n, fib(args.n))) _logger.info("Script ends here")
python
def main(args): """Main entry point allowing external calls Args: args ([str]): command line parameter list """ args = parse_args(args) setup_logging(args.loglevel) _logger.debug("Starting crazy calculations...") print("The {}-th Fibonacci number is {}".format(args.n, fib(args.n))) _logger.info("Script ends here")
[ "def", "main", "(", "args", ")", ":", "args", "=", "parse_args", "(", "args", ")", "setup_logging", "(", "args", ".", "loglevel", ")", "_logger", ".", "debug", "(", "\"Starting crazy calculations...\"", ")", "print", "(", "\"The {}-th Fibonacci number is {}\"", ".", "format", "(", "args", ".", "n", ",", "fib", "(", "args", ".", "n", ")", ")", ")", "_logger", ".", "info", "(", "\"Script ends here\"", ")" ]
Main entry point allowing external calls Args: args ([str]): command line parameter list
[ "Main", "entry", "point", "allowing", "external", "calls" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/skeleton.py#L101-L111
231,417
totalgood/nlpia
src/nlpia/features.py
optimize_feature_power
def optimize_feature_power(df, output_column_name=None, exponents=[2., 1., .8, .5, .25, .1, .01]): """ Plot the correlation coefficient for various exponential scalings of input features >>> np.random.seed(314159) >>> df = pd.DataFrame() >>> df['output'] = np.random.randn(1000) >>> df['x10'] = df.output * 10 >>> df['sq'] = df.output ** 2 >>> df['sqrt'] = df.output ** .5 >>> optimize_feature_power(df, output_column_name='output').round(2) x10 sq sqrt power 2.00 -0.08 1.00 0.83 1.00 1.00 -0.08 0.97 0.80 1.00 0.90 0.99 0.50 0.97 0.83 1.00 0.25 0.93 0.76 0.99 0.10 0.89 0.71 0.97 0.01 0.86 0.67 0.95 Returns: DataFrame: columns are the input_columns from the source dataframe (df) rows are correlation with output for each attempted exponent used to scale the input features """ output_column_name = list(df.columns)[-1] if output_column_name is None else output_column_name input_column_names = [colname for colname in df.columns if output_column_name != colname] results = np.zeros((len(exponents), len(input_column_names))) for rownum, exponent in enumerate(exponents): for colnum, column_name in enumerate(input_column_names): results[rownum, colnum] = (df[output_column_name] ** exponent).corr(df[column_name]) results = pd.DataFrame(results, columns=input_column_names, index=pd.Series(exponents, name='power')) # results.plot(logx=True) return results
python
def optimize_feature_power(df, output_column_name=None, exponents=[2., 1., .8, .5, .25, .1, .01]): """ Plot the correlation coefficient for various exponential scalings of input features >>> np.random.seed(314159) >>> df = pd.DataFrame() >>> df['output'] = np.random.randn(1000) >>> df['x10'] = df.output * 10 >>> df['sq'] = df.output ** 2 >>> df['sqrt'] = df.output ** .5 >>> optimize_feature_power(df, output_column_name='output').round(2) x10 sq sqrt power 2.00 -0.08 1.00 0.83 1.00 1.00 -0.08 0.97 0.80 1.00 0.90 0.99 0.50 0.97 0.83 1.00 0.25 0.93 0.76 0.99 0.10 0.89 0.71 0.97 0.01 0.86 0.67 0.95 Returns: DataFrame: columns are the input_columns from the source dataframe (df) rows are correlation with output for each attempted exponent used to scale the input features """ output_column_name = list(df.columns)[-1] if output_column_name is None else output_column_name input_column_names = [colname for colname in df.columns if output_column_name != colname] results = np.zeros((len(exponents), len(input_column_names))) for rownum, exponent in enumerate(exponents): for colnum, column_name in enumerate(input_column_names): results[rownum, colnum] = (df[output_column_name] ** exponent).corr(df[column_name]) results = pd.DataFrame(results, columns=input_column_names, index=pd.Series(exponents, name='power')) # results.plot(logx=True) return results
[ "def", "optimize_feature_power", "(", "df", ",", "output_column_name", "=", "None", ",", "exponents", "=", "[", "2.", ",", "1.", ",", ".8", ",", ".5", ",", ".25", ",", ".1", ",", ".01", "]", ")", ":", "output_column_name", "=", "list", "(", "df", ".", "columns", ")", "[", "-", "1", "]", "if", "output_column_name", "is", "None", "else", "output_column_name", "input_column_names", "=", "[", "colname", "for", "colname", "in", "df", ".", "columns", "if", "output_column_name", "!=", "colname", "]", "results", "=", "np", ".", "zeros", "(", "(", "len", "(", "exponents", ")", ",", "len", "(", "input_column_names", ")", ")", ")", "for", "rownum", ",", "exponent", "in", "enumerate", "(", "exponents", ")", ":", "for", "colnum", ",", "column_name", "in", "enumerate", "(", "input_column_names", ")", ":", "results", "[", "rownum", ",", "colnum", "]", "=", "(", "df", "[", "output_column_name", "]", "**", "exponent", ")", ".", "corr", "(", "df", "[", "column_name", "]", ")", "results", "=", "pd", ".", "DataFrame", "(", "results", ",", "columns", "=", "input_column_names", ",", "index", "=", "pd", ".", "Series", "(", "exponents", ",", "name", "=", "'power'", ")", ")", "# results.plot(logx=True)", "return", "results" ]
Plot the correlation coefficient for various exponential scalings of input features >>> np.random.seed(314159) >>> df = pd.DataFrame() >>> df['output'] = np.random.randn(1000) >>> df['x10'] = df.output * 10 >>> df['sq'] = df.output ** 2 >>> df['sqrt'] = df.output ** .5 >>> optimize_feature_power(df, output_column_name='output').round(2) x10 sq sqrt power 2.00 -0.08 1.00 0.83 1.00 1.00 -0.08 0.97 0.80 1.00 0.90 0.99 0.50 0.97 0.83 1.00 0.25 0.93 0.76 0.99 0.10 0.89 0.71 0.97 0.01 0.86 0.67 0.95 Returns: DataFrame: columns are the input_columns from the source dataframe (df) rows are correlation with output for each attempted exponent used to scale the input features
[ "Plot", "the", "correlation", "coefficient", "for", "various", "exponential", "scalings", "of", "input", "features" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/features.py#L5-L38
231,418
totalgood/nlpia
src/nlpia/highd.py
representative_sample
def representative_sample(X, num_samples, save=False): """Sample vectors in X, preferring edge cases and vectors farthest from other vectors in sample set """ X = X.values if hasattr(X, 'values') else np.array(X) N, M = X.shape rownums = np.arange(N) np.random.shuffle(rownums) idx = AnnoyIndex(M) for i, row in enumerate(X): idx.add_item(i, row) idx.build(int(np.log2(N)) + 1) if save: if isinstance(save, basestring): idxfilename = save else: idxfile = tempfile.NamedTemporaryFile(delete=False) idxfile.close() idxfilename = idxfile.name idx.save(idxfilename) idx = AnnoyIndex(M) idx.load(idxfile.name) samples = -1 * np.ones(shape=(num_samples,), dtype=int) samples[0] = rownums[0] # FIXME: some integer determined by N and num_samples and distribution j, num_nns = 0, min(1000, int(num_samples / 2. + 1)) for i in rownums: if i in samples: continue nns = idx.get_nns_by_item(i, num_nns) # FIXME: pick vector furthest from past K (K > 1) points or outside of a hypercube # (sized to uniformly fill the space) around the last sample samples[j + 1] = np.setdiff1d(nns, samples)[-1] if len(num_nns) < num_samples / 3.: num_nns = min(N, 1.3 * num_nns) j += 1 return samples
python
def representative_sample(X, num_samples, save=False): """Sample vectors in X, preferring edge cases and vectors farthest from other vectors in sample set """ X = X.values if hasattr(X, 'values') else np.array(X) N, M = X.shape rownums = np.arange(N) np.random.shuffle(rownums) idx = AnnoyIndex(M) for i, row in enumerate(X): idx.add_item(i, row) idx.build(int(np.log2(N)) + 1) if save: if isinstance(save, basestring): idxfilename = save else: idxfile = tempfile.NamedTemporaryFile(delete=False) idxfile.close() idxfilename = idxfile.name idx.save(idxfilename) idx = AnnoyIndex(M) idx.load(idxfile.name) samples = -1 * np.ones(shape=(num_samples,), dtype=int) samples[0] = rownums[0] # FIXME: some integer determined by N and num_samples and distribution j, num_nns = 0, min(1000, int(num_samples / 2. + 1)) for i in rownums: if i in samples: continue nns = idx.get_nns_by_item(i, num_nns) # FIXME: pick vector furthest from past K (K > 1) points or outside of a hypercube # (sized to uniformly fill the space) around the last sample samples[j + 1] = np.setdiff1d(nns, samples)[-1] if len(num_nns) < num_samples / 3.: num_nns = min(N, 1.3 * num_nns) j += 1 return samples
[ "def", "representative_sample", "(", "X", ",", "num_samples", ",", "save", "=", "False", ")", ":", "X", "=", "X", ".", "values", "if", "hasattr", "(", "X", ",", "'values'", ")", "else", "np", ".", "array", "(", "X", ")", "N", ",", "M", "=", "X", ".", "shape", "rownums", "=", "np", ".", "arange", "(", "N", ")", "np", ".", "random", ".", "shuffle", "(", "rownums", ")", "idx", "=", "AnnoyIndex", "(", "M", ")", "for", "i", ",", "row", "in", "enumerate", "(", "X", ")", ":", "idx", ".", "add_item", "(", "i", ",", "row", ")", "idx", ".", "build", "(", "int", "(", "np", ".", "log2", "(", "N", ")", ")", "+", "1", ")", "if", "save", ":", "if", "isinstance", "(", "save", ",", "basestring", ")", ":", "idxfilename", "=", "save", "else", ":", "idxfile", "=", "tempfile", ".", "NamedTemporaryFile", "(", "delete", "=", "False", ")", "idxfile", ".", "close", "(", ")", "idxfilename", "=", "idxfile", ".", "name", "idx", ".", "save", "(", "idxfilename", ")", "idx", "=", "AnnoyIndex", "(", "M", ")", "idx", ".", "load", "(", "idxfile", ".", "name", ")", "samples", "=", "-", "1", "*", "np", ".", "ones", "(", "shape", "=", "(", "num_samples", ",", ")", ",", "dtype", "=", "int", ")", "samples", "[", "0", "]", "=", "rownums", "[", "0", "]", "# FIXME: some integer determined by N and num_samples and distribution", "j", ",", "num_nns", "=", "0", ",", "min", "(", "1000", ",", "int", "(", "num_samples", "/", "2.", "+", "1", ")", ")", "for", "i", "in", "rownums", ":", "if", "i", "in", "samples", ":", "continue", "nns", "=", "idx", ".", "get_nns_by_item", "(", "i", ",", "num_nns", ")", "# FIXME: pick vector furthest from past K (K > 1) points or outside of a hypercube", "# (sized to uniformly fill the space) around the last sample", "samples", "[", "j", "+", "1", "]", "=", "np", ".", "setdiff1d", "(", "nns", ",", "samples", ")", "[", "-", "1", "]", "if", "len", "(", "num_nns", ")", "<", "num_samples", "/", "3.", ":", "num_nns", "=", "min", "(", "N", ",", "1.3", "*", "num_nns", ")", "j", "+=", "1", "return", "samples" ]
Sample vectors in X, preferring edge cases and vectors farthest from other vectors in sample set
[ "Sample", "vectors", "in", "X", "preferring", "edge", "cases", "and", "vectors", "farthest", "from", "other", "vectors", "in", "sample", "set" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/highd.py#L28-L68
231,419
totalgood/nlpia
src/nlpia/book/examples/ch03-2.py
cosine_sim
def cosine_sim(vec1, vec2): """ Since our vectors are dictionaries, lets convert them to lists for easier mathing. """ vec1 = [val for val in vec1.values()] vec2 = [val for val in vec2.values()] dot_prod = 0 for i, v in enumerate(vec1): dot_prod += v * vec2[i] mag_1 = math.sqrt(sum([x**2 for x in vec1])) mag_2 = math.sqrt(sum([x**2 for x in vec2])) return dot_prod / (mag_1 * mag_2)
python
def cosine_sim(vec1, vec2): """ Since our vectors are dictionaries, lets convert them to lists for easier mathing. """ vec1 = [val for val in vec1.values()] vec2 = [val for val in vec2.values()] dot_prod = 0 for i, v in enumerate(vec1): dot_prod += v * vec2[i] mag_1 = math.sqrt(sum([x**2 for x in vec1])) mag_2 = math.sqrt(sum([x**2 for x in vec2])) return dot_prod / (mag_1 * mag_2)
[ "def", "cosine_sim", "(", "vec1", ",", "vec2", ")", ":", "vec1", "=", "[", "val", "for", "val", "in", "vec1", ".", "values", "(", ")", "]", "vec2", "=", "[", "val", "for", "val", "in", "vec2", ".", "values", "(", ")", "]", "dot_prod", "=", "0", "for", "i", ",", "v", "in", "enumerate", "(", "vec1", ")", ":", "dot_prod", "+=", "v", "*", "vec2", "[", "i", "]", "mag_1", "=", "math", ".", "sqrt", "(", "sum", "(", "[", "x", "**", "2", "for", "x", "in", "vec1", "]", ")", ")", "mag_2", "=", "math", ".", "sqrt", "(", "sum", "(", "[", "x", "**", "2", "for", "x", "in", "vec2", "]", ")", ")", "return", "dot_prod", "/", "(", "mag_1", "*", "mag_2", ")" ]
Since our vectors are dictionaries, lets convert them to lists for easier mathing.
[ "Since", "our", "vectors", "are", "dictionaries", "lets", "convert", "them", "to", "lists", "for", "easier", "mathing", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch03-2.py#L141-L155
231,420
totalgood/nlpia
src/nlpia/models.py
LinearRegressor.fit
def fit(self, X, y): """ Compute average slope and intercept for all X, y pairs Arguments: X (np.array): model input (independent variable) y (np.array): model output (dependent variable) Returns: Linear Regression instance with `slope` and `intercept` attributes References: Based on: https://github.com/justmarkham/DAT4/blob/master/notebooks/08_linear_regression.ipynb >>> n_samples = 100 >>> X = np.arange(100).reshape((n_samples, 1)) >>> slope, intercept = 3.14159, -4.242 >>> y = 3.14 * X + np.random.randn(*X.shape) + intercept >>> line = LinearRegressor() >>> line.fit(X, y) <nlpia.models.LinearRegressor object ... >>> abs(line.slope - slope) < abs(0.02 * (slope + 1)) True >>> abs(line.intercept - intercept) < 0.2 * (abs(intercept) + 1) True """ # initial sums n = float(len(X)) sum_x = X.sum() sum_y = y.sum() sum_xy = (X * y).sum() sum_xx = (X**2).sum() # formula for w0 self.slope = (sum_xy - (sum_x * sum_y) / n) / (sum_xx - (sum_x * sum_x) / n) # formula for w1 self.intercept = sum_y / n - self.slope * (sum_x / n) return self
python
def fit(self, X, y): """ Compute average slope and intercept for all X, y pairs Arguments: X (np.array): model input (independent variable) y (np.array): model output (dependent variable) Returns: Linear Regression instance with `slope` and `intercept` attributes References: Based on: https://github.com/justmarkham/DAT4/blob/master/notebooks/08_linear_regression.ipynb >>> n_samples = 100 >>> X = np.arange(100).reshape((n_samples, 1)) >>> slope, intercept = 3.14159, -4.242 >>> y = 3.14 * X + np.random.randn(*X.shape) + intercept >>> line = LinearRegressor() >>> line.fit(X, y) <nlpia.models.LinearRegressor object ... >>> abs(line.slope - slope) < abs(0.02 * (slope + 1)) True >>> abs(line.intercept - intercept) < 0.2 * (abs(intercept) + 1) True """ # initial sums n = float(len(X)) sum_x = X.sum() sum_y = y.sum() sum_xy = (X * y).sum() sum_xx = (X**2).sum() # formula for w0 self.slope = (sum_xy - (sum_x * sum_y) / n) / (sum_xx - (sum_x * sum_x) / n) # formula for w1 self.intercept = sum_y / n - self.slope * (sum_x / n) return self
[ "def", "fit", "(", "self", ",", "X", ",", "y", ")", ":", "# initial sums", "n", "=", "float", "(", "len", "(", "X", ")", ")", "sum_x", "=", "X", ".", "sum", "(", ")", "sum_y", "=", "y", ".", "sum", "(", ")", "sum_xy", "=", "(", "X", "*", "y", ")", ".", "sum", "(", ")", "sum_xx", "=", "(", "X", "**", "2", ")", ".", "sum", "(", ")", "# formula for w0", "self", ".", "slope", "=", "(", "sum_xy", "-", "(", "sum_x", "*", "sum_y", ")", "/", "n", ")", "/", "(", "sum_xx", "-", "(", "sum_x", "*", "sum_x", ")", "/", "n", ")", "# formula for w1", "self", ".", "intercept", "=", "sum_y", "/", "n", "-", "self", ".", "slope", "*", "(", "sum_x", "/", "n", ")", "return", "self" ]
Compute average slope and intercept for all X, y pairs Arguments: X (np.array): model input (independent variable) y (np.array): model output (dependent variable) Returns: Linear Regression instance with `slope` and `intercept` attributes References: Based on: https://github.com/justmarkham/DAT4/blob/master/notebooks/08_linear_regression.ipynb >>> n_samples = 100 >>> X = np.arange(100).reshape((n_samples, 1)) >>> slope, intercept = 3.14159, -4.242 >>> y = 3.14 * X + np.random.randn(*X.shape) + intercept >>> line = LinearRegressor() >>> line.fit(X, y) <nlpia.models.LinearRegressor object ... >>> abs(line.slope - slope) < abs(0.02 * (slope + 1)) True >>> abs(line.intercept - intercept) < 0.2 * (abs(intercept) + 1) True
[ "Compute", "average", "slope", "and", "intercept", "for", "all", "X", "y", "pairs" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/models.py#L15-L54
231,421
totalgood/nlpia
src/nlpia/web.py
looks_like_url
def looks_like_url(url): """ Simplified check to see if the text appears to be a URL. Similar to `urlparse` but much more basic. Returns: True if the url str appears to be valid. False otherwise. >>> url = looks_like_url("totalgood.org") >>> bool(url) True """ if not isinstance(url, basestring): return False if not isinstance(url, basestring) or len(url) >= 1024 or not cre_url.match(url): return False return True
python
def looks_like_url(url): """ Simplified check to see if the text appears to be a URL. Similar to `urlparse` but much more basic. Returns: True if the url str appears to be valid. False otherwise. >>> url = looks_like_url("totalgood.org") >>> bool(url) True """ if not isinstance(url, basestring): return False if not isinstance(url, basestring) or len(url) >= 1024 or not cre_url.match(url): return False return True
[ "def", "looks_like_url", "(", "url", ")", ":", "if", "not", "isinstance", "(", "url", ",", "basestring", ")", ":", "return", "False", "if", "not", "isinstance", "(", "url", ",", "basestring", ")", "or", "len", "(", "url", ")", ">=", "1024", "or", "not", "cre_url", ".", "match", "(", "url", ")", ":", "return", "False", "return", "True" ]
Simplified check to see if the text appears to be a URL. Similar to `urlparse` but much more basic. Returns: True if the url str appears to be valid. False otherwise. >>> url = looks_like_url("totalgood.org") >>> bool(url) True
[ "Simplified", "check", "to", "see", "if", "the", "text", "appears", "to", "be", "a", "URL", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/web.py#L68-L85
231,422
totalgood/nlpia
src/nlpia/web.py
try_parse_url
def try_parse_url(url): """ User urlparse to try to parse URL returning None on exception """ if len(url.strip()) < 4: logger.info('URL too short: {}'.format(url)) return None try: parsed_url = urlparse(url) except ValueError: logger.info('Parse URL ValueError: {}'.format(url)) return None if parsed_url.scheme: return parsed_url try: parsed_url = urlparse('http://' + parsed_url.geturl()) except ValueError: logger.info('Invalid URL for assumed http scheme: urlparse("{}") from "{}" '.format('http://' + parsed_url.geturl(), url)) return None if not parsed_url.scheme: logger.info('Unable to guess a scheme for URL: {}'.format(url)) return None return parsed_url
python
def try_parse_url(url): """ User urlparse to try to parse URL returning None on exception """ if len(url.strip()) < 4: logger.info('URL too short: {}'.format(url)) return None try: parsed_url = urlparse(url) except ValueError: logger.info('Parse URL ValueError: {}'.format(url)) return None if parsed_url.scheme: return parsed_url try: parsed_url = urlparse('http://' + parsed_url.geturl()) except ValueError: logger.info('Invalid URL for assumed http scheme: urlparse("{}") from "{}" '.format('http://' + parsed_url.geturl(), url)) return None if not parsed_url.scheme: logger.info('Unable to guess a scheme for URL: {}'.format(url)) return None return parsed_url
[ "def", "try_parse_url", "(", "url", ")", ":", "if", "len", "(", "url", ".", "strip", "(", ")", ")", "<", "4", ":", "logger", ".", "info", "(", "'URL too short: {}'", ".", "format", "(", "url", ")", ")", "return", "None", "try", ":", "parsed_url", "=", "urlparse", "(", "url", ")", "except", "ValueError", ":", "logger", ".", "info", "(", "'Parse URL ValueError: {}'", ".", "format", "(", "url", ")", ")", "return", "None", "if", "parsed_url", ".", "scheme", ":", "return", "parsed_url", "try", ":", "parsed_url", "=", "urlparse", "(", "'http://'", "+", "parsed_url", ".", "geturl", "(", ")", ")", "except", "ValueError", ":", "logger", ".", "info", "(", "'Invalid URL for assumed http scheme: urlparse(\"{}\") from \"{}\" '", ".", "format", "(", "'http://'", "+", "parsed_url", ".", "geturl", "(", ")", ",", "url", ")", ")", "return", "None", "if", "not", "parsed_url", ".", "scheme", ":", "logger", ".", "info", "(", "'Unable to guess a scheme for URL: {}'", ".", "format", "(", "url", ")", ")", "return", "None", "return", "parsed_url" ]
User urlparse to try to parse URL returning None on exception
[ "User", "urlparse", "to", "try", "to", "parse", "URL", "returning", "None", "on", "exception" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/web.py#L88-L108
231,423
totalgood/nlpia
src/nlpia/web.py
get_url_filemeta
def get_url_filemeta(url): """ Request HTML for the page at the URL indicated and return the url, filename, and remote size TODO: just add remote_size and basename and filename attributes to the urlparse object instead of returning a dict >>> sorted(get_url_filemeta('mozilla.com').items()) [('filename', ''), ('hostname', 'mozilla.com'), ('path', ''), ('remote_size', -1), ('url', 'http://mozilla.com'), ('username', None)] >>> sorted(get_url_filemeta('https://duckduckgo.com/about?q=nlp').items()) [('filename', 'about'), ('hostname', 'duckduckgo.com'), ('path', '/about'), ('remote_size', -1), ('url', 'https://duckduckgo.com/about?q=nlp'), ('username', None)] >>> 1000 <= int(get_url_filemeta('en.wikipedia.org')['remote_size']) <= 200000 True """ parsed_url = try_parse_url(url) if parsed_url is None: return None if parsed_url.scheme.startswith('ftp'): return get_ftp_filemeta(parsed_url) url = parsed_url.geturl() try: r = requests.get(url, stream=True, allow_redirects=True, timeout=5) remote_size = r.headers.get('Content-Length', -1) return dict(url=url, hostname=parsed_url.hostname, path=parsed_url.path, username=parsed_url.username, remote_size=remote_size, filename=os.path.basename(parsed_url.path)) except ConnectionError: return None except (InvalidURL, InvalidSchema, InvalidHeader, MissingSchema): return None return None
python
def get_url_filemeta(url): """ Request HTML for the page at the URL indicated and return the url, filename, and remote size TODO: just add remote_size and basename and filename attributes to the urlparse object instead of returning a dict >>> sorted(get_url_filemeta('mozilla.com').items()) [('filename', ''), ('hostname', 'mozilla.com'), ('path', ''), ('remote_size', -1), ('url', 'http://mozilla.com'), ('username', None)] >>> sorted(get_url_filemeta('https://duckduckgo.com/about?q=nlp').items()) [('filename', 'about'), ('hostname', 'duckduckgo.com'), ('path', '/about'), ('remote_size', -1), ('url', 'https://duckduckgo.com/about?q=nlp'), ('username', None)] >>> 1000 <= int(get_url_filemeta('en.wikipedia.org')['remote_size']) <= 200000 True """ parsed_url = try_parse_url(url) if parsed_url is None: return None if parsed_url.scheme.startswith('ftp'): return get_ftp_filemeta(parsed_url) url = parsed_url.geturl() try: r = requests.get(url, stream=True, allow_redirects=True, timeout=5) remote_size = r.headers.get('Content-Length', -1) return dict(url=url, hostname=parsed_url.hostname, path=parsed_url.path, username=parsed_url.username, remote_size=remote_size, filename=os.path.basename(parsed_url.path)) except ConnectionError: return None except (InvalidURL, InvalidSchema, InvalidHeader, MissingSchema): return None return None
[ "def", "get_url_filemeta", "(", "url", ")", ":", "parsed_url", "=", "try_parse_url", "(", "url", ")", "if", "parsed_url", "is", "None", ":", "return", "None", "if", "parsed_url", ".", "scheme", ".", "startswith", "(", "'ftp'", ")", ":", "return", "get_ftp_filemeta", "(", "parsed_url", ")", "url", "=", "parsed_url", ".", "geturl", "(", ")", "try", ":", "r", "=", "requests", ".", "get", "(", "url", ",", "stream", "=", "True", ",", "allow_redirects", "=", "True", ",", "timeout", "=", "5", ")", "remote_size", "=", "r", ".", "headers", ".", "get", "(", "'Content-Length'", ",", "-", "1", ")", "return", "dict", "(", "url", "=", "url", ",", "hostname", "=", "parsed_url", ".", "hostname", ",", "path", "=", "parsed_url", ".", "path", ",", "username", "=", "parsed_url", ".", "username", ",", "remote_size", "=", "remote_size", ",", "filename", "=", "os", ".", "path", ".", "basename", "(", "parsed_url", ".", "path", ")", ")", "except", "ConnectionError", ":", "return", "None", "except", "(", "InvalidURL", ",", "InvalidSchema", ",", "InvalidHeader", ",", "MissingSchema", ")", ":", "return", "None", "return", "None" ]
Request HTML for the page at the URL indicated and return the url, filename, and remote size TODO: just add remote_size and basename and filename attributes to the urlparse object instead of returning a dict >>> sorted(get_url_filemeta('mozilla.com').items()) [('filename', ''), ('hostname', 'mozilla.com'), ('path', ''), ('remote_size', -1), ('url', 'http://mozilla.com'), ('username', None)] >>> sorted(get_url_filemeta('https://duckduckgo.com/about?q=nlp').items()) [('filename', 'about'), ('hostname', 'duckduckgo.com'), ('path', '/about'), ('remote_size', -1), ('url', 'https://duckduckgo.com/about?q=nlp'), ('username', None)] >>> 1000 <= int(get_url_filemeta('en.wikipedia.org')['remote_size']) <= 200000 True
[ "Request", "HTML", "for", "the", "page", "at", "the", "URL", "indicated", "and", "return", "the", "url", "filename", "and", "remote", "size" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/web.py#L111-L152
231,424
totalgood/nlpia
src/nlpia/web.py
save_response_content
def save_response_content(response, filename='data.csv', destination=os.path.curdir, chunksize=32768): """ For streaming response from requests, download the content one CHUNK at a time """ chunksize = chunksize or 32768 if os.path.sep in filename: full_destination_path = filename else: full_destination_path = os.path.join(destination, filename) full_destination_path = expand_filepath(full_destination_path) with open(full_destination_path, "wb") as f: for chunk in tqdm(response.iter_content(CHUNK_SIZE)): if chunk: # filter out keep-alive new chunks f.write(chunk) return full_destination_path
python
def save_response_content(response, filename='data.csv', destination=os.path.curdir, chunksize=32768): """ For streaming response from requests, download the content one CHUNK at a time """ chunksize = chunksize or 32768 if os.path.sep in filename: full_destination_path = filename else: full_destination_path = os.path.join(destination, filename) full_destination_path = expand_filepath(full_destination_path) with open(full_destination_path, "wb") as f: for chunk in tqdm(response.iter_content(CHUNK_SIZE)): if chunk: # filter out keep-alive new chunks f.write(chunk) return full_destination_path
[ "def", "save_response_content", "(", "response", ",", "filename", "=", "'data.csv'", ",", "destination", "=", "os", ".", "path", ".", "curdir", ",", "chunksize", "=", "32768", ")", ":", "chunksize", "=", "chunksize", "or", "32768", "if", "os", ".", "path", ".", "sep", "in", "filename", ":", "full_destination_path", "=", "filename", "else", ":", "full_destination_path", "=", "os", ".", "path", ".", "join", "(", "destination", ",", "filename", ")", "full_destination_path", "=", "expand_filepath", "(", "full_destination_path", ")", "with", "open", "(", "full_destination_path", ",", "\"wb\"", ")", "as", "f", ":", "for", "chunk", "in", "tqdm", "(", "response", ".", "iter_content", "(", "CHUNK_SIZE", ")", ")", ":", "if", "chunk", ":", "# filter out keep-alive new chunks", "f", ".", "write", "(", "chunk", ")", "return", "full_destination_path" ]
For streaming response from requests, download the content one CHUNK at a time
[ "For", "streaming", "response", "from", "requests", "download", "the", "content", "one", "CHUNK", "at", "a", "time" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/web.py#L209-L221
231,425
totalgood/nlpia
src/nlpia/web.py
download_file_from_google_drive
def download_file_from_google_drive(driveid, filename=None, destination=os.path.curdir): """ Download script for google drive shared links Thank you @turdus-merula and Andrew Hundt! https://stackoverflow.com/a/39225039/623735 """ if '&id=' in driveid: # https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs # dailymail_stories.tgz driveid = driveid.split('&id=')[-1] if '?id=' in driveid: # 'https://drive.google.com/open?id=14mELuzm0OvXnwjb0mzAiG-Ake9_NP_LQ' # SSD pretrainined keras model driveid = driveid.split('?id=')[-1] URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params={'id': driveid}, stream=True) token = get_response_confirmation_token(response) if token: params = {'id': driveid, 'confirm': token} response = session.get(URL, params=params, stream=True) filename = filename or get_url_filename(driveid=driveid) full_destination_path = save_response_content(response, filename=fileanme, destination=destination) return os.path.abspath(destination)
python
def download_file_from_google_drive(driveid, filename=None, destination=os.path.curdir): """ Download script for google drive shared links Thank you @turdus-merula and Andrew Hundt! https://stackoverflow.com/a/39225039/623735 """ if '&id=' in driveid: # https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs # dailymail_stories.tgz driveid = driveid.split('&id=')[-1] if '?id=' in driveid: # 'https://drive.google.com/open?id=14mELuzm0OvXnwjb0mzAiG-Ake9_NP_LQ' # SSD pretrainined keras model driveid = driveid.split('?id=')[-1] URL = "https://docs.google.com/uc?export=download" session = requests.Session() response = session.get(URL, params={'id': driveid}, stream=True) token = get_response_confirmation_token(response) if token: params = {'id': driveid, 'confirm': token} response = session.get(URL, params=params, stream=True) filename = filename or get_url_filename(driveid=driveid) full_destination_path = save_response_content(response, filename=fileanme, destination=destination) return os.path.abspath(destination)
[ "def", "download_file_from_google_drive", "(", "driveid", ",", "filename", "=", "None", ",", "destination", "=", "os", ".", "path", ".", "curdir", ")", ":", "if", "'&id='", "in", "driveid", ":", "# https://drive.google.com/uc?export=download&id=0BwmD_VLjROrfM1BxdkxVaTY2bWs # dailymail_stories.tgz", "driveid", "=", "driveid", ".", "split", "(", "'&id='", ")", "[", "-", "1", "]", "if", "'?id='", "in", "driveid", ":", "# 'https://drive.google.com/open?id=14mELuzm0OvXnwjb0mzAiG-Ake9_NP_LQ' # SSD pretrainined keras model", "driveid", "=", "driveid", ".", "split", "(", "'?id='", ")", "[", "-", "1", "]", "URL", "=", "\"https://docs.google.com/uc?export=download\"", "session", "=", "requests", ".", "Session", "(", ")", "response", "=", "session", ".", "get", "(", "URL", ",", "params", "=", "{", "'id'", ":", "driveid", "}", ",", "stream", "=", "True", ")", "token", "=", "get_response_confirmation_token", "(", "response", ")", "if", "token", ":", "params", "=", "{", "'id'", ":", "driveid", ",", "'confirm'", ":", "token", "}", "response", "=", "session", ".", "get", "(", "URL", ",", "params", "=", "params", ",", "stream", "=", "True", ")", "filename", "=", "filename", "or", "get_url_filename", "(", "driveid", "=", "driveid", ")", "full_destination_path", "=", "save_response_content", "(", "response", ",", "filename", "=", "fileanme", ",", "destination", "=", "destination", ")", "return", "os", ".", "path", ".", "abspath", "(", "destination", ")" ]
Download script for google drive shared links Thank you @turdus-merula and Andrew Hundt! https://stackoverflow.com/a/39225039/623735
[ "Download", "script", "for", "google", "drive", "shared", "links" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/web.py#L224-L252
231,426
totalgood/nlpia
src/nlpia/book/examples/ch11_greetings.py
find_greeting
def find_greeting(s): """ Return the the greeting string Hi, Hello, or Yo if it occurs at the beginning of a string >>> find_greeting('Hi Mr. Turing!') 'Hi' >>> find_greeting('Hello, Rosa.') 'Hello' >>> find_greeting("Yo, what's up?") 'Yo' >>> find_greeting("Hello") 'Hello' >>> print(find_greeting("hello")) None >>> print(find_greeting("HelloWorld")) None """ if s[0] == 'H': if s[:3] in ['Hi', 'Hi ', 'Hi,', 'Hi!']: return s[:2] elif s[:6] in ['Hello', 'Hello ', 'Hello,', 'Hello!']: return s[:5] elif s[0] == 'Y': if s[1] == 'o' and s[:3] in ['Yo', 'Yo,', 'Yo ', 'Yo!']: return s[:2] return None
python
def find_greeting(s): """ Return the the greeting string Hi, Hello, or Yo if it occurs at the beginning of a string >>> find_greeting('Hi Mr. Turing!') 'Hi' >>> find_greeting('Hello, Rosa.') 'Hello' >>> find_greeting("Yo, what's up?") 'Yo' >>> find_greeting("Hello") 'Hello' >>> print(find_greeting("hello")) None >>> print(find_greeting("HelloWorld")) None """ if s[0] == 'H': if s[:3] in ['Hi', 'Hi ', 'Hi,', 'Hi!']: return s[:2] elif s[:6] in ['Hello', 'Hello ', 'Hello,', 'Hello!']: return s[:5] elif s[0] == 'Y': if s[1] == 'o' and s[:3] in ['Yo', 'Yo,', 'Yo ', 'Yo!']: return s[:2] return None
[ "def", "find_greeting", "(", "s", ")", ":", "if", "s", "[", "0", "]", "==", "'H'", ":", "if", "s", "[", ":", "3", "]", "in", "[", "'Hi'", ",", "'Hi '", ",", "'Hi,'", ",", "'Hi!'", "]", ":", "return", "s", "[", ":", "2", "]", "elif", "s", "[", ":", "6", "]", "in", "[", "'Hello'", ",", "'Hello '", ",", "'Hello,'", ",", "'Hello!'", "]", ":", "return", "s", "[", ":", "5", "]", "elif", "s", "[", "0", "]", "==", "'Y'", ":", "if", "s", "[", "1", "]", "==", "'o'", "and", "s", "[", ":", "3", "]", "in", "[", "'Yo'", ",", "'Yo,'", ",", "'Yo '", ",", "'Yo!'", "]", ":", "return", "s", "[", ":", "2", "]", "return", "None" ]
Return the the greeting string Hi, Hello, or Yo if it occurs at the beginning of a string >>> find_greeting('Hi Mr. Turing!') 'Hi' >>> find_greeting('Hello, Rosa.') 'Hello' >>> find_greeting("Yo, what's up?") 'Yo' >>> find_greeting("Hello") 'Hello' >>> print(find_greeting("hello")) None >>> print(find_greeting("HelloWorld")) None
[ "Return", "the", "the", "greeting", "string", "Hi", "Hello", "or", "Yo", "if", "it", "occurs", "at", "the", "beginning", "of", "a", "string" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch11_greetings.py#L4-L28
231,427
totalgood/nlpia
src/nlpia/scripts/hunspell_to_json.py
file_to_list
def file_to_list(in_file): ''' Reads file into list ''' lines = [] for line in in_file: # Strip new line line = line.strip('\n') # Ignore empty lines if line != '': # Ignore comments if line[0] != '#': lines.append(line) return lines
python
def file_to_list(in_file): ''' Reads file into list ''' lines = [] for line in in_file: # Strip new line line = line.strip('\n') # Ignore empty lines if line != '': # Ignore comments if line[0] != '#': lines.append(line) return lines
[ "def", "file_to_list", "(", "in_file", ")", ":", "lines", "=", "[", "]", "for", "line", "in", "in_file", ":", "# Strip new line", "line", "=", "line", ".", "strip", "(", "'\\n'", ")", "# Ignore empty lines", "if", "line", "!=", "''", ":", "# Ignore comments", "if", "line", "[", "0", "]", "!=", "'#'", ":", "lines", ".", "append", "(", "line", ")", "return", "lines" ]
Reads file into list
[ "Reads", "file", "into", "list" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/scripts/hunspell_to_json.py#L39-L52
231,428
totalgood/nlpia
src/nlpia/scripts/hunspell_to_json.py
CompoundRule.add_flag_values
def add_flag_values(self, entry, flag): ''' Adds flag value to applicable compounds ''' if flag in self.flags: self.flags[flag].append(entry)
python
def add_flag_values(self, entry, flag): ''' Adds flag value to applicable compounds ''' if flag in self.flags: self.flags[flag].append(entry)
[ "def", "add_flag_values", "(", "self", ",", "entry", ",", "flag", ")", ":", "if", "flag", "in", "self", ".", "flags", ":", "self", ".", "flags", "[", "flag", "]", ".", "append", "(", "entry", ")" ]
Adds flag value to applicable compounds
[ "Adds", "flag", "value", "to", "applicable", "compounds" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/scripts/hunspell_to_json.py#L110-L113
231,429
totalgood/nlpia
src/nlpia/scripts/hunspell_to_json.py
CompoundRule.get_regex
def get_regex(self): ''' Generates and returns compound regular expression ''' regex = '' for flag in self.compound: if flag == '?' or flag == '*': regex += flag else: regex += '(' + '|'.join(self.flags[flag]) + ')' return regex
python
def get_regex(self): ''' Generates and returns compound regular expression ''' regex = '' for flag in self.compound: if flag == '?' or flag == '*': regex += flag else: regex += '(' + '|'.join(self.flags[flag]) + ')' return regex
[ "def", "get_regex", "(", "self", ")", ":", "regex", "=", "''", "for", "flag", "in", "self", ".", "compound", ":", "if", "flag", "==", "'?'", "or", "flag", "==", "'*'", ":", "regex", "+=", "flag", "else", ":", "regex", "+=", "'('", "+", "'|'", ".", "join", "(", "self", ".", "flags", "[", "flag", "]", ")", "+", "')'", "return", "regex" ]
Generates and returns compound regular expression
[ "Generates", "and", "returns", "compound", "regular", "expression" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/scripts/hunspell_to_json.py#L115-L124
231,430
totalgood/nlpia
src/nlpia/scripts/hunspell_to_json.py
DICT.__parse_dict
def __parse_dict(self): ''' Parses dictionary with according rules ''' i = 0 lines = self.lines for line in lines: line = line.split('/') word = line[0] flags = line[1] if len(line) > 1 else None # Base Word self.num_words += 1 if flags != None: # Derivatives possible for flag in flags: # Compound? if flag in self.aff.compound_flags or flag == self.aff.only_in_compound_flag: for rule in self.aff.compound_rules: rule.add_flag_values(word, flag) else: # No Suggest flags if self.aff.no_suggest_flag == flag: pass else: affix_rule_entries = self.aff.affix_rules[flag] # Get flag that meets condition for i in range(len(affix_rule_entries)): rule = affix_rule_entries[i] if rule.meets_condition(word): # Add word to list if does not already exist if word not in self.words: self.words[word] = [] # Derivatives self.num_words += 1 if self.format == "addsub": add_sub = rule.generate_add_sub() # Add to list of keys if add_sub not in self.keys: self.keys.append(add_sub) # Check if key is to be generated if self.key: self.words[word].append(str(self.keys.index(add_sub))) else: # Generate addsub next to base word self.words[word].append(rule.generate_add_sub()) else: # Default, insert complete derivative word self.words[word].append(rule.create_derivative(word)) else: # No derivatives. self.words[word] = [] # Create regular expression from compounds for rule in self.aff.compound_rules: # Add to list self.regex_compounds.append(rule.get_regex())
python
def __parse_dict(self): ''' Parses dictionary with according rules ''' i = 0 lines = self.lines for line in lines: line = line.split('/') word = line[0] flags = line[1] if len(line) > 1 else None # Base Word self.num_words += 1 if flags != None: # Derivatives possible for flag in flags: # Compound? if flag in self.aff.compound_flags or flag == self.aff.only_in_compound_flag: for rule in self.aff.compound_rules: rule.add_flag_values(word, flag) else: # No Suggest flags if self.aff.no_suggest_flag == flag: pass else: affix_rule_entries = self.aff.affix_rules[flag] # Get flag that meets condition for i in range(len(affix_rule_entries)): rule = affix_rule_entries[i] if rule.meets_condition(word): # Add word to list if does not already exist if word not in self.words: self.words[word] = [] # Derivatives self.num_words += 1 if self.format == "addsub": add_sub = rule.generate_add_sub() # Add to list of keys if add_sub not in self.keys: self.keys.append(add_sub) # Check if key is to be generated if self.key: self.words[word].append(str(self.keys.index(add_sub))) else: # Generate addsub next to base word self.words[word].append(rule.generate_add_sub()) else: # Default, insert complete derivative word self.words[word].append(rule.create_derivative(word)) else: # No derivatives. self.words[word] = [] # Create regular expression from compounds for rule in self.aff.compound_rules: # Add to list self.regex_compounds.append(rule.get_regex())
[ "def", "__parse_dict", "(", "self", ")", ":", "i", "=", "0", "lines", "=", "self", ".", "lines", "for", "line", "in", "lines", ":", "line", "=", "line", ".", "split", "(", "'/'", ")", "word", "=", "line", "[", "0", "]", "flags", "=", "line", "[", "1", "]", "if", "len", "(", "line", ")", ">", "1", "else", "None", "# Base Word", "self", ".", "num_words", "+=", "1", "if", "flags", "!=", "None", ":", "# Derivatives possible", "for", "flag", "in", "flags", ":", "# Compound?", "if", "flag", "in", "self", ".", "aff", ".", "compound_flags", "or", "flag", "==", "self", ".", "aff", ".", "only_in_compound_flag", ":", "for", "rule", "in", "self", ".", "aff", ".", "compound_rules", ":", "rule", ".", "add_flag_values", "(", "word", ",", "flag", ")", "else", ":", "# No Suggest flags", "if", "self", ".", "aff", ".", "no_suggest_flag", "==", "flag", ":", "pass", "else", ":", "affix_rule_entries", "=", "self", ".", "aff", ".", "affix_rules", "[", "flag", "]", "# Get flag that meets condition", "for", "i", "in", "range", "(", "len", "(", "affix_rule_entries", ")", ")", ":", "rule", "=", "affix_rule_entries", "[", "i", "]", "if", "rule", ".", "meets_condition", "(", "word", ")", ":", "# Add word to list if does not already exist", "if", "word", "not", "in", "self", ".", "words", ":", "self", ".", "words", "[", "word", "]", "=", "[", "]", "# Derivatives", "self", ".", "num_words", "+=", "1", "if", "self", ".", "format", "==", "\"addsub\"", ":", "add_sub", "=", "rule", ".", "generate_add_sub", "(", ")", "# Add to list of keys", "if", "add_sub", "not", "in", "self", ".", "keys", ":", "self", ".", "keys", ".", "append", "(", "add_sub", ")", "# Check if key is to be generated", "if", "self", ".", "key", ":", "self", ".", "words", "[", "word", "]", ".", "append", "(", "str", "(", "self", ".", "keys", ".", "index", "(", "add_sub", ")", ")", ")", "else", ":", "# Generate addsub next to base word", "self", ".", "words", "[", "word", "]", ".", "append", "(", "rule", ".", "generate_add_sub", "(", ")", ")", "else", ":", "# Default, insert complete derivative word", "self", ".", "words", "[", "word", "]", ".", "append", "(", "rule", ".", "create_derivative", "(", "word", ")", ")", "else", ":", "# No derivatives.", "self", ".", "words", "[", "word", "]", "=", "[", "]", "# Create regular expression from compounds", "for", "rule", "in", "self", ".", "aff", ".", "compound_rules", ":", "# Add to list", "self", ".", "regex_compounds", ".", "append", "(", "rule", ".", "get_regex", "(", ")", ")" ]
Parses dictionary with according rules
[ "Parses", "dictionary", "with", "according", "rules" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/scripts/hunspell_to_json.py#L282-L343
231,431
totalgood/nlpia
src/nlpia/loaders.py
load_imdb_df
def load_imdb_df(dirpath=os.path.join(BIGDATA_PATH, 'aclImdb'), subdirectories=(('train', 'test'), ('pos', 'neg', 'unsup'))): """ Walk directory tree starting at `path` to compile a DataFrame of movie review text labeled with their 1-10 star ratings Returns: DataFrame: columns=['url', 'rating', 'text'], index=MultiIndex(['train_test', 'pos_neg_unsup', 'id']) TODO: Make this more robust/general by allowing the subdirectories to be None and find all the subdirs containing txt files >> imdb_df().head() url rating text index0 index1 index2 train pos 0 http://www.imdb.com/title/tt0453418 9 Bromwell High is a cartoon comedy. It ran at t... 1 http://www.imdb.com/title/tt0210075 7 If you like adult comedy cartoons, like South ... 2 http://www.imdb.com/title/tt0085688 9 Bromwell High is nothing short of brilliant. E... 3 http://www.imdb.com/title/tt0033022 10 "All the world's a stage and its people actors... 4 http://www.imdb.com/title/tt0043137 8 FUTZ is the only show preserved from the exper... """ dfs = {} for subdirs in tqdm(list(product(*subdirectories))): urlspath = os.path.join(dirpath, subdirs[0], 'urls_{}.txt'.format(subdirs[1])) if not os.path.isfile(urlspath): if subdirs != ('test', 'unsup'): # test/ dir doesn't usually have an unsup subdirectory logger.warning('Unable to find expected IMDB review list of URLs: {}'.format(urlspath)) continue df = pd.read_csv(urlspath, header=None, names=['url']) # df.index.name = 'id' df['url'] = series_strip(df.url, endswith='/usercomments') textsdir = os.path.join(dirpath, subdirs[0], subdirs[1]) if not os.path.isdir(textsdir): logger.warning('Unable to find expected IMDB review text subdirectory: {}'.format(textsdir)) continue filenames = [fn for fn in os.listdir(textsdir) if fn.lower().endswith('.txt')] df['index0'] = subdirs[0] # TODO: column names more generic so will work on other datasets df['index1'] = subdirs[1] df['index2'] = np.array([int(fn[:-4].split('_')[0]) for fn in filenames]) df['rating'] = np.array([int(fn[:-4].split('_')[1]) for fn in filenames]) texts = [] for fn in filenames: with ensure_open(os.path.join(textsdir, fn)) as f: texts.append(f.read()) df['text'] = np.array(texts) del texts df.set_index('index0 index1 index2'.split(), inplace=True) df.sort_index(inplace=True) dfs[subdirs] = df return pd.concat(dfs.values())
python
def load_imdb_df(dirpath=os.path.join(BIGDATA_PATH, 'aclImdb'), subdirectories=(('train', 'test'), ('pos', 'neg', 'unsup'))): """ Walk directory tree starting at `path` to compile a DataFrame of movie review text labeled with their 1-10 star ratings Returns: DataFrame: columns=['url', 'rating', 'text'], index=MultiIndex(['train_test', 'pos_neg_unsup', 'id']) TODO: Make this more robust/general by allowing the subdirectories to be None and find all the subdirs containing txt files >> imdb_df().head() url rating text index0 index1 index2 train pos 0 http://www.imdb.com/title/tt0453418 9 Bromwell High is a cartoon comedy. It ran at t... 1 http://www.imdb.com/title/tt0210075 7 If you like adult comedy cartoons, like South ... 2 http://www.imdb.com/title/tt0085688 9 Bromwell High is nothing short of brilliant. E... 3 http://www.imdb.com/title/tt0033022 10 "All the world's a stage and its people actors... 4 http://www.imdb.com/title/tt0043137 8 FUTZ is the only show preserved from the exper... """ dfs = {} for subdirs in tqdm(list(product(*subdirectories))): urlspath = os.path.join(dirpath, subdirs[0], 'urls_{}.txt'.format(subdirs[1])) if not os.path.isfile(urlspath): if subdirs != ('test', 'unsup'): # test/ dir doesn't usually have an unsup subdirectory logger.warning('Unable to find expected IMDB review list of URLs: {}'.format(urlspath)) continue df = pd.read_csv(urlspath, header=None, names=['url']) # df.index.name = 'id' df['url'] = series_strip(df.url, endswith='/usercomments') textsdir = os.path.join(dirpath, subdirs[0], subdirs[1]) if not os.path.isdir(textsdir): logger.warning('Unable to find expected IMDB review text subdirectory: {}'.format(textsdir)) continue filenames = [fn for fn in os.listdir(textsdir) if fn.lower().endswith('.txt')] df['index0'] = subdirs[0] # TODO: column names more generic so will work on other datasets df['index1'] = subdirs[1] df['index2'] = np.array([int(fn[:-4].split('_')[0]) for fn in filenames]) df['rating'] = np.array([int(fn[:-4].split('_')[1]) for fn in filenames]) texts = [] for fn in filenames: with ensure_open(os.path.join(textsdir, fn)) as f: texts.append(f.read()) df['text'] = np.array(texts) del texts df.set_index('index0 index1 index2'.split(), inplace=True) df.sort_index(inplace=True) dfs[subdirs] = df return pd.concat(dfs.values())
[ "def", "load_imdb_df", "(", "dirpath", "=", "os", ".", "path", ".", "join", "(", "BIGDATA_PATH", ",", "'aclImdb'", ")", ",", "subdirectories", "=", "(", "(", "'train'", ",", "'test'", ")", ",", "(", "'pos'", ",", "'neg'", ",", "'unsup'", ")", ")", ")", ":", "dfs", "=", "{", "}", "for", "subdirs", "in", "tqdm", "(", "list", "(", "product", "(", "*", "subdirectories", ")", ")", ")", ":", "urlspath", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "subdirs", "[", "0", "]", ",", "'urls_{}.txt'", ".", "format", "(", "subdirs", "[", "1", "]", ")", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "urlspath", ")", ":", "if", "subdirs", "!=", "(", "'test'", ",", "'unsup'", ")", ":", "# test/ dir doesn't usually have an unsup subdirectory", "logger", ".", "warning", "(", "'Unable to find expected IMDB review list of URLs: {}'", ".", "format", "(", "urlspath", ")", ")", "continue", "df", "=", "pd", ".", "read_csv", "(", "urlspath", ",", "header", "=", "None", ",", "names", "=", "[", "'url'", "]", ")", "# df.index.name = 'id'", "df", "[", "'url'", "]", "=", "series_strip", "(", "df", ".", "url", ",", "endswith", "=", "'/usercomments'", ")", "textsdir", "=", "os", ".", "path", ".", "join", "(", "dirpath", ",", "subdirs", "[", "0", "]", ",", "subdirs", "[", "1", "]", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "textsdir", ")", ":", "logger", ".", "warning", "(", "'Unable to find expected IMDB review text subdirectory: {}'", ".", "format", "(", "textsdir", ")", ")", "continue", "filenames", "=", "[", "fn", "for", "fn", "in", "os", ".", "listdir", "(", "textsdir", ")", "if", "fn", ".", "lower", "(", ")", ".", "endswith", "(", "'.txt'", ")", "]", "df", "[", "'index0'", "]", "=", "subdirs", "[", "0", "]", "# TODO: column names more generic so will work on other datasets", "df", "[", "'index1'", "]", "=", "subdirs", "[", "1", "]", "df", "[", "'index2'", "]", "=", "np", ".", "array", "(", "[", "int", "(", "fn", "[", ":", "-", "4", "]", ".", "split", "(", "'_'", ")", "[", "0", "]", ")", "for", "fn", "in", "filenames", "]", ")", "df", "[", "'rating'", "]", "=", "np", ".", "array", "(", "[", "int", "(", "fn", "[", ":", "-", "4", "]", ".", "split", "(", "'_'", ")", "[", "1", "]", ")", "for", "fn", "in", "filenames", "]", ")", "texts", "=", "[", "]", "for", "fn", "in", "filenames", ":", "with", "ensure_open", "(", "os", ".", "path", ".", "join", "(", "textsdir", ",", "fn", ")", ")", "as", "f", ":", "texts", ".", "append", "(", "f", ".", "read", "(", ")", ")", "df", "[", "'text'", "]", "=", "np", ".", "array", "(", "texts", ")", "del", "texts", "df", ".", "set_index", "(", "'index0 index1 index2'", ".", "split", "(", ")", ",", "inplace", "=", "True", ")", "df", ".", "sort_index", "(", "inplace", "=", "True", ")", "dfs", "[", "subdirs", "]", "=", "df", "return", "pd", ".", "concat", "(", "dfs", ".", "values", "(", ")", ")" ]
Walk directory tree starting at `path` to compile a DataFrame of movie review text labeled with their 1-10 star ratings Returns: DataFrame: columns=['url', 'rating', 'text'], index=MultiIndex(['train_test', 'pos_neg_unsup', 'id']) TODO: Make this more robust/general by allowing the subdirectories to be None and find all the subdirs containing txt files >> imdb_df().head() url rating text index0 index1 index2 train pos 0 http://www.imdb.com/title/tt0453418 9 Bromwell High is a cartoon comedy. It ran at t... 1 http://www.imdb.com/title/tt0210075 7 If you like adult comedy cartoons, like South ... 2 http://www.imdb.com/title/tt0085688 9 Bromwell High is nothing short of brilliant. E... 3 http://www.imdb.com/title/tt0033022 10 "All the world's a stage and its people actors... 4 http://www.imdb.com/title/tt0043137 8 FUTZ is the only show preserved from the exper...
[ "Walk", "directory", "tree", "starting", "at", "path", "to", "compile", "a", "DataFrame", "of", "movie", "review", "text", "labeled", "with", "their", "1", "-", "10", "star", "ratings" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L108-L155
231,432
totalgood/nlpia
src/nlpia/loaders.py
load_glove
def load_glove(filepath, batch_size=1000, limit=None, verbose=True): r""" Load a pretrained GloVE word vector model First header line of GloVE text file should look like: 400000 50\n First vector of GloVE text file should look like: the .12 .22 .32 .42 ... .42 >>> wv = load_glove(os.path.join(BIGDATA_PATH, 'glove_test.txt')) >>> wv.most_similar('and')[:3] [(',', 0.92...), ('.', 0.91...), ('of', 0.86...)] """ num_dim = isglove(filepath) tqdm_prog = tqdm if verbose else no_tqdm wv = KeyedVectors(num_dim) if limit: vocab_size = int(limit) else: with ensure_open(filepath) as fin: for i, line in enumerate(fin): pass vocab_size = i + 1 wv.vectors = np.zeros((vocab_size, num_dim), REAL) with ensure_open(filepath) as fin: batch, words = [], [] for i, line in enumerate(tqdm_prog(fin, total=vocab_size)): line = line.split() word = line[0] vector = np.array(line[1:]).astype(float) # words.append(word) # batch.append(vector) wv.index2word.append(word) wv.vocab[word] = Vocab(index=i, count=vocab_size - i) wv.vectors[i] = vector if len(words) >= batch_size: # wv[words] = np.array(batch) batch, words = [], [] if i >= vocab_size - 1: break if words: wv[words] = np.array(batch) return wv
python
def load_glove(filepath, batch_size=1000, limit=None, verbose=True): r""" Load a pretrained GloVE word vector model First header line of GloVE text file should look like: 400000 50\n First vector of GloVE text file should look like: the .12 .22 .32 .42 ... .42 >>> wv = load_glove(os.path.join(BIGDATA_PATH, 'glove_test.txt')) >>> wv.most_similar('and')[:3] [(',', 0.92...), ('.', 0.91...), ('of', 0.86...)] """ num_dim = isglove(filepath) tqdm_prog = tqdm if verbose else no_tqdm wv = KeyedVectors(num_dim) if limit: vocab_size = int(limit) else: with ensure_open(filepath) as fin: for i, line in enumerate(fin): pass vocab_size = i + 1 wv.vectors = np.zeros((vocab_size, num_dim), REAL) with ensure_open(filepath) as fin: batch, words = [], [] for i, line in enumerate(tqdm_prog(fin, total=vocab_size)): line = line.split() word = line[0] vector = np.array(line[1:]).astype(float) # words.append(word) # batch.append(vector) wv.index2word.append(word) wv.vocab[word] = Vocab(index=i, count=vocab_size - i) wv.vectors[i] = vector if len(words) >= batch_size: # wv[words] = np.array(batch) batch, words = [], [] if i >= vocab_size - 1: break if words: wv[words] = np.array(batch) return wv
[ "def", "load_glove", "(", "filepath", ",", "batch_size", "=", "1000", ",", "limit", "=", "None", ",", "verbose", "=", "True", ")", ":", "num_dim", "=", "isglove", "(", "filepath", ")", "tqdm_prog", "=", "tqdm", "if", "verbose", "else", "no_tqdm", "wv", "=", "KeyedVectors", "(", "num_dim", ")", "if", "limit", ":", "vocab_size", "=", "int", "(", "limit", ")", "else", ":", "with", "ensure_open", "(", "filepath", ")", "as", "fin", ":", "for", "i", ",", "line", "in", "enumerate", "(", "fin", ")", ":", "pass", "vocab_size", "=", "i", "+", "1", "wv", ".", "vectors", "=", "np", ".", "zeros", "(", "(", "vocab_size", ",", "num_dim", ")", ",", "REAL", ")", "with", "ensure_open", "(", "filepath", ")", "as", "fin", ":", "batch", ",", "words", "=", "[", "]", ",", "[", "]", "for", "i", ",", "line", "in", "enumerate", "(", "tqdm_prog", "(", "fin", ",", "total", "=", "vocab_size", ")", ")", ":", "line", "=", "line", ".", "split", "(", ")", "word", "=", "line", "[", "0", "]", "vector", "=", "np", ".", "array", "(", "line", "[", "1", ":", "]", ")", ".", "astype", "(", "float", ")", "# words.append(word)", "# batch.append(vector)", "wv", ".", "index2word", ".", "append", "(", "word", ")", "wv", ".", "vocab", "[", "word", "]", "=", "Vocab", "(", "index", "=", "i", ",", "count", "=", "vocab_size", "-", "i", ")", "wv", ".", "vectors", "[", "i", "]", "=", "vector", "if", "len", "(", "words", ")", ">=", "batch_size", ":", "# wv[words] = np.array(batch)", "batch", ",", "words", "=", "[", "]", ",", "[", "]", "if", "i", ">=", "vocab_size", "-", "1", ":", "break", "if", "words", ":", "wv", "[", "words", "]", "=", "np", ".", "array", "(", "batch", ")", "return", "wv" ]
r""" Load a pretrained GloVE word vector model First header line of GloVE text file should look like: 400000 50\n First vector of GloVE text file should look like: the .12 .22 .32 .42 ... .42 >>> wv = load_glove(os.path.join(BIGDATA_PATH, 'glove_test.txt')) >>> wv.most_similar('and')[:3] [(',', 0.92...), ('.', 0.91...), ('of', 0.86...)]
[ "r", "Load", "a", "pretrained", "GloVE", "word", "vector", "model" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L158-L204
231,433
totalgood/nlpia
src/nlpia/loaders.py
load_glove_df
def load_glove_df(filepath, **kwargs): """ Load a GloVE-format text file into a dataframe >>> df = load_glove_df(os.path.join(BIGDATA_PATH, 'glove_test.txt')) >>> df.index[:3] Index(['the', ',', '.'], dtype='object', name=0) >>> df.iloc[0][:3] 1 0.41800 2 0.24968 3 -0.41242 Name: the, dtype: float64 """ pdkwargs = dict(index_col=0, header=None, sep=r'\s', skiprows=[0], verbose=False, engine='python') pdkwargs.update(kwargs) return pd.read_csv(filepath, **pdkwargs)
python
def load_glove_df(filepath, **kwargs): """ Load a GloVE-format text file into a dataframe >>> df = load_glove_df(os.path.join(BIGDATA_PATH, 'glove_test.txt')) >>> df.index[:3] Index(['the', ',', '.'], dtype='object', name=0) >>> df.iloc[0][:3] 1 0.41800 2 0.24968 3 -0.41242 Name: the, dtype: float64 """ pdkwargs = dict(index_col=0, header=None, sep=r'\s', skiprows=[0], verbose=False, engine='python') pdkwargs.update(kwargs) return pd.read_csv(filepath, **pdkwargs)
[ "def", "load_glove_df", "(", "filepath", ",", "*", "*", "kwargs", ")", ":", "pdkwargs", "=", "dict", "(", "index_col", "=", "0", ",", "header", "=", "None", ",", "sep", "=", "r'\\s'", ",", "skiprows", "=", "[", "0", "]", ",", "verbose", "=", "False", ",", "engine", "=", "'python'", ")", "pdkwargs", ".", "update", "(", "kwargs", ")", "return", "pd", ".", "read_csv", "(", "filepath", ",", "*", "*", "pdkwargs", ")" ]
Load a GloVE-format text file into a dataframe >>> df = load_glove_df(os.path.join(BIGDATA_PATH, 'glove_test.txt')) >>> df.index[:3] Index(['the', ',', '.'], dtype='object', name=0) >>> df.iloc[0][:3] 1 0.41800 2 0.24968 3 -0.41242 Name: the, dtype: float64
[ "Load", "a", "GloVE", "-", "format", "text", "file", "into", "a", "dataframe" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L207-L221
231,434
totalgood/nlpia
src/nlpia/loaders.py
get_en2fr
def get_en2fr(url='http://www.manythings.org/anki/fra-eng.zip'): """ Download and parse English->French translation dataset used in Keras seq2seq example """ download_unzip(url) return pd.read_table(url, compression='zip', header=None, skip_blank_lines=True, sep='\t', skiprows=0, names='en fr'.split())
python
def get_en2fr(url='http://www.manythings.org/anki/fra-eng.zip'): """ Download and parse English->French translation dataset used in Keras seq2seq example """ download_unzip(url) return pd.read_table(url, compression='zip', header=None, skip_blank_lines=True, sep='\t', skiprows=0, names='en fr'.split())
[ "def", "get_en2fr", "(", "url", "=", "'http://www.manythings.org/anki/fra-eng.zip'", ")", ":", "download_unzip", "(", "url", ")", "return", "pd", ".", "read_table", "(", "url", ",", "compression", "=", "'zip'", ",", "header", "=", "None", ",", "skip_blank_lines", "=", "True", ",", "sep", "=", "'\\t'", ",", "skiprows", "=", "0", ",", "names", "=", "'en fr'", ".", "split", "(", ")", ")" ]
Download and parse English->French translation dataset used in Keras seq2seq example
[ "Download", "and", "parse", "English", "-", ">", "French", "translation", "dataset", "used", "in", "Keras", "seq2seq", "example" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L233-L236
231,435
totalgood/nlpia
src/nlpia/loaders.py
load_anki_df
def load_anki_df(language='deu'): """ Load into a DataFrame statements in one language along with their translation into English >>> get_data('zsm').head(1) eng zsm 0 Are you new? Awak baru? """ if os.path.isfile(language): filepath = language lang = re.search('[a-z]{3}-eng/', filepath).group()[:3].lower() else: lang = (language or 'deu').lower()[:3] filepath = os.path.join(BIGDATA_PATH, '{}-eng'.format(lang), '{}.txt'.format(lang)) df = pd.read_table(filepath, skiprows=1, header=None) df.columns = ['eng', lang] return df
python
def load_anki_df(language='deu'): """ Load into a DataFrame statements in one language along with their translation into English >>> get_data('zsm').head(1) eng zsm 0 Are you new? Awak baru? """ if os.path.isfile(language): filepath = language lang = re.search('[a-z]{3}-eng/', filepath).group()[:3].lower() else: lang = (language or 'deu').lower()[:3] filepath = os.path.join(BIGDATA_PATH, '{}-eng'.format(lang), '{}.txt'.format(lang)) df = pd.read_table(filepath, skiprows=1, header=None) df.columns = ['eng', lang] return df
[ "def", "load_anki_df", "(", "language", "=", "'deu'", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "language", ")", ":", "filepath", "=", "language", "lang", "=", "re", ".", "search", "(", "'[a-z]{3}-eng/'", ",", "filepath", ")", ".", "group", "(", ")", "[", ":", "3", "]", ".", "lower", "(", ")", "else", ":", "lang", "=", "(", "language", "or", "'deu'", ")", ".", "lower", "(", ")", "[", ":", "3", "]", "filepath", "=", "os", ".", "path", ".", "join", "(", "BIGDATA_PATH", ",", "'{}-eng'", ".", "format", "(", "lang", ")", ",", "'{}.txt'", ".", "format", "(", "lang", ")", ")", "df", "=", "pd", ".", "read_table", "(", "filepath", ",", "skiprows", "=", "1", ",", "header", "=", "None", ")", "df", ".", "columns", "=", "[", "'eng'", ",", "lang", "]", "return", "df" ]
Load into a DataFrame statements in one language along with their translation into English >>> get_data('zsm').head(1) eng zsm 0 Are you new? Awak baru?
[ "Load", "into", "a", "DataFrame", "statements", "in", "one", "language", "along", "with", "their", "translation", "into", "English" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L239-L254
231,436
totalgood/nlpia
src/nlpia/loaders.py
generate_big_urls_glove
def generate_big_urls_glove(bigurls=None): """ Generate a dictionary of URLs for various combinations of GloVe training set sizes and dimensionality """ bigurls = bigurls or {} for num_dim in (50, 100, 200, 300): # not all of these dimensionality, and training set size combinations were trained by Stanford for suffixes, num_words in zip( ('sm -sm _sm -small _small'.split(), 'med -med _med -medium _medium'.split(), 'lg -lg _lg -large _large'.split()), (6, 42, 840) ): for suf in suffixes[:-1]: name = 'glove' + suf + str(num_dim) dirname = 'glove.{num_words}B'.format(num_words=num_words) # glove.42B.300d.w2v.txt filename = dirname + '.{num_dim}d.w2v.txt'.format(num_dim=num_dim) # seed the alias named URL with the URL for that training set size's canonical name bigurl_tuple = BIG_URLS['glove' + suffixes[-1]] bigurls[name] = list(bigurl_tuple[:2]) bigurls[name].append(os.path.join(dirname, filename)) bigurls[name].append(load_glove) bigurls[name] = tuple(bigurls[name]) return bigurls
python
def generate_big_urls_glove(bigurls=None): """ Generate a dictionary of URLs for various combinations of GloVe training set sizes and dimensionality """ bigurls = bigurls or {} for num_dim in (50, 100, 200, 300): # not all of these dimensionality, and training set size combinations were trained by Stanford for suffixes, num_words in zip( ('sm -sm _sm -small _small'.split(), 'med -med _med -medium _medium'.split(), 'lg -lg _lg -large _large'.split()), (6, 42, 840) ): for suf in suffixes[:-1]: name = 'glove' + suf + str(num_dim) dirname = 'glove.{num_words}B'.format(num_words=num_words) # glove.42B.300d.w2v.txt filename = dirname + '.{num_dim}d.w2v.txt'.format(num_dim=num_dim) # seed the alias named URL with the URL for that training set size's canonical name bigurl_tuple = BIG_URLS['glove' + suffixes[-1]] bigurls[name] = list(bigurl_tuple[:2]) bigurls[name].append(os.path.join(dirname, filename)) bigurls[name].append(load_glove) bigurls[name] = tuple(bigurls[name]) return bigurls
[ "def", "generate_big_urls_glove", "(", "bigurls", "=", "None", ")", ":", "bigurls", "=", "bigurls", "or", "{", "}", "for", "num_dim", "in", "(", "50", ",", "100", ",", "200", ",", "300", ")", ":", "# not all of these dimensionality, and training set size combinations were trained by Stanford", "for", "suffixes", ",", "num_words", "in", "zip", "(", "(", "'sm -sm _sm -small _small'", ".", "split", "(", ")", ",", "'med -med _med -medium _medium'", ".", "split", "(", ")", ",", "'lg -lg _lg -large _large'", ".", "split", "(", ")", ")", ",", "(", "6", ",", "42", ",", "840", ")", ")", ":", "for", "suf", "in", "suffixes", "[", ":", "-", "1", "]", ":", "name", "=", "'glove'", "+", "suf", "+", "str", "(", "num_dim", ")", "dirname", "=", "'glove.{num_words}B'", ".", "format", "(", "num_words", "=", "num_words", ")", "# glove.42B.300d.w2v.txt", "filename", "=", "dirname", "+", "'.{num_dim}d.w2v.txt'", ".", "format", "(", "num_dim", "=", "num_dim", ")", "# seed the alias named URL with the URL for that training set size's canonical name", "bigurl_tuple", "=", "BIG_URLS", "[", "'glove'", "+", "suffixes", "[", "-", "1", "]", "]", "bigurls", "[", "name", "]", "=", "list", "(", "bigurl_tuple", "[", ":", "2", "]", ")", "bigurls", "[", "name", "]", ".", "append", "(", "os", ".", "path", ".", "join", "(", "dirname", ",", "filename", ")", ")", "bigurls", "[", "name", "]", ".", "append", "(", "load_glove", ")", "bigurls", "[", "name", "]", "=", "tuple", "(", "bigurls", "[", "name", "]", ")", "return", "bigurls" ]
Generate a dictionary of URLs for various combinations of GloVe training set sizes and dimensionality
[ "Generate", "a", "dictionary", "of", "URLs", "for", "various", "combinations", "of", "GloVe", "training", "set", "sizes", "and", "dimensionality" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L381-L403
231,437
totalgood/nlpia
src/nlpia/loaders.py
normalize_ext_rename
def normalize_ext_rename(filepath): """ normalize file ext like '.tgz' -> '.tar.gz' and '300d.txt' -> '300d.glove.txt' and rename the file >>> pth = os.path.join(DATA_PATH, 'sms_slang_dict.txt') >>> pth == normalize_ext_rename(pth) True """ logger.debug('normalize_ext.filepath=' + str(filepath)) new_file_path = normalize_ext(filepath) logger.debug('download_unzip.new_filepaths=' + str(new_file_path)) # FIXME: fails when name is a url filename filepath = rename_file(filepath, new_file_path) logger.debug('download_unzip.filepath=' + str(filepath)) return filepath
python
def normalize_ext_rename(filepath): """ normalize file ext like '.tgz' -> '.tar.gz' and '300d.txt' -> '300d.glove.txt' and rename the file >>> pth = os.path.join(DATA_PATH, 'sms_slang_dict.txt') >>> pth == normalize_ext_rename(pth) True """ logger.debug('normalize_ext.filepath=' + str(filepath)) new_file_path = normalize_ext(filepath) logger.debug('download_unzip.new_filepaths=' + str(new_file_path)) # FIXME: fails when name is a url filename filepath = rename_file(filepath, new_file_path) logger.debug('download_unzip.filepath=' + str(filepath)) return filepath
[ "def", "normalize_ext_rename", "(", "filepath", ")", ":", "logger", ".", "debug", "(", "'normalize_ext.filepath='", "+", "str", "(", "filepath", ")", ")", "new_file_path", "=", "normalize_ext", "(", "filepath", ")", "logger", ".", "debug", "(", "'download_unzip.new_filepaths='", "+", "str", "(", "new_file_path", ")", ")", "# FIXME: fails when name is a url filename", "filepath", "=", "rename_file", "(", "filepath", ",", "new_file_path", ")", "logger", ".", "debug", "(", "'download_unzip.filepath='", "+", "str", "(", "filepath", ")", ")", "return", "filepath" ]
normalize file ext like '.tgz' -> '.tar.gz' and '300d.txt' -> '300d.glove.txt' and rename the file >>> pth = os.path.join(DATA_PATH, 'sms_slang_dict.txt') >>> pth == normalize_ext_rename(pth) True
[ "normalize", "file", "ext", "like", ".", "tgz", "-", ">", ".", "tar", ".", "gz", "and", "300d", ".", "txt", "-", ">", "300d", ".", "glove", ".", "txt", "and", "rename", "the", "file" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L506-L519
231,438
totalgood/nlpia
src/nlpia/loaders.py
untar
def untar(fname, verbose=True): """ Uunzip and untar a tar.gz file into a subdir of the BIGDATA_PATH directory """ if fname.lower().endswith(".tar.gz"): dirpath = os.path.join(BIGDATA_PATH, os.path.basename(fname)[:-7]) if os.path.isdir(dirpath): return dirpath with tarfile.open(fname) as tf: members = tf.getmembers() for member in tqdm(members, total=len(members)): tf.extract(member, path=BIGDATA_PATH) dirpath = os.path.join(BIGDATA_PATH, members[0].name) if os.path.isdir(dirpath): return dirpath else: logger.warning("Not a tar.gz file: {}".format(fname))
python
def untar(fname, verbose=True): """ Uunzip and untar a tar.gz file into a subdir of the BIGDATA_PATH directory """ if fname.lower().endswith(".tar.gz"): dirpath = os.path.join(BIGDATA_PATH, os.path.basename(fname)[:-7]) if os.path.isdir(dirpath): return dirpath with tarfile.open(fname) as tf: members = tf.getmembers() for member in tqdm(members, total=len(members)): tf.extract(member, path=BIGDATA_PATH) dirpath = os.path.join(BIGDATA_PATH, members[0].name) if os.path.isdir(dirpath): return dirpath else: logger.warning("Not a tar.gz file: {}".format(fname))
[ "def", "untar", "(", "fname", ",", "verbose", "=", "True", ")", ":", "if", "fname", ".", "lower", "(", ")", ".", "endswith", "(", "\".tar.gz\"", ")", ":", "dirpath", "=", "os", ".", "path", ".", "join", "(", "BIGDATA_PATH", ",", "os", ".", "path", ".", "basename", "(", "fname", ")", "[", ":", "-", "7", "]", ")", "if", "os", ".", "path", ".", "isdir", "(", "dirpath", ")", ":", "return", "dirpath", "with", "tarfile", ".", "open", "(", "fname", ")", "as", "tf", ":", "members", "=", "tf", ".", "getmembers", "(", ")", "for", "member", "in", "tqdm", "(", "members", ",", "total", "=", "len", "(", "members", ")", ")", ":", "tf", ".", "extract", "(", "member", ",", "path", "=", "BIGDATA_PATH", ")", "dirpath", "=", "os", ".", "path", ".", "join", "(", "BIGDATA_PATH", ",", "members", "[", "0", "]", ".", "name", ")", "if", "os", ".", "path", ".", "isdir", "(", "dirpath", ")", ":", "return", "dirpath", "else", ":", "logger", ".", "warning", "(", "\"Not a tar.gz file: {}\"", ".", "format", "(", "fname", ")", ")" ]
Uunzip and untar a tar.gz file into a subdir of the BIGDATA_PATH directory
[ "Uunzip", "and", "untar", "a", "tar", ".", "gz", "file", "into", "a", "subdir", "of", "the", "BIGDATA_PATH", "directory" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L522-L536
231,439
totalgood/nlpia
src/nlpia/loaders.py
endswith_strip
def endswith_strip(s, endswith='.txt', ignorecase=True): """ Strip a suffix from the end of a string >>> endswith_strip('http://TotalGood.com', '.COM') 'http://TotalGood' >>> endswith_strip('http://TotalGood.com', endswith='.COM', ignorecase=False) 'http://TotalGood.com' """ if ignorecase: if s.lower().endswith(endswith.lower()): return s[:-len(endswith)] else: if s.endswith(endswith): return s[:-len(endswith)] return s
python
def endswith_strip(s, endswith='.txt', ignorecase=True): """ Strip a suffix from the end of a string >>> endswith_strip('http://TotalGood.com', '.COM') 'http://TotalGood' >>> endswith_strip('http://TotalGood.com', endswith='.COM', ignorecase=False) 'http://TotalGood.com' """ if ignorecase: if s.lower().endswith(endswith.lower()): return s[:-len(endswith)] else: if s.endswith(endswith): return s[:-len(endswith)] return s
[ "def", "endswith_strip", "(", "s", ",", "endswith", "=", "'.txt'", ",", "ignorecase", "=", "True", ")", ":", "if", "ignorecase", ":", "if", "s", ".", "lower", "(", ")", ".", "endswith", "(", "endswith", ".", "lower", "(", ")", ")", ":", "return", "s", "[", ":", "-", "len", "(", "endswith", ")", "]", "else", ":", "if", "s", ".", "endswith", "(", "endswith", ")", ":", "return", "s", "[", ":", "-", "len", "(", "endswith", ")", "]", "return", "s" ]
Strip a suffix from the end of a string >>> endswith_strip('http://TotalGood.com', '.COM') 'http://TotalGood' >>> endswith_strip('http://TotalGood.com', endswith='.COM', ignorecase=False) 'http://TotalGood.com'
[ "Strip", "a", "suffix", "from", "the", "end", "of", "a", "string" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L570-L584
231,440
totalgood/nlpia
src/nlpia/loaders.py
startswith_strip
def startswith_strip(s, startswith='http://', ignorecase=True): """ Strip a prefix from the beginning of a string >>> startswith_strip('HTtp://TotalGood.com', 'HTTP://') 'TotalGood.com' >>> startswith_strip('HTtp://TotalGood.com', startswith='HTTP://', ignorecase=False) 'HTtp://TotalGood.com' """ if ignorecase: if s.lower().startswith(startswith.lower()): return s[len(startswith):] else: if s.endswith(startswith): return s[len(startswith):] return s
python
def startswith_strip(s, startswith='http://', ignorecase=True): """ Strip a prefix from the beginning of a string >>> startswith_strip('HTtp://TotalGood.com', 'HTTP://') 'TotalGood.com' >>> startswith_strip('HTtp://TotalGood.com', startswith='HTTP://', ignorecase=False) 'HTtp://TotalGood.com' """ if ignorecase: if s.lower().startswith(startswith.lower()): return s[len(startswith):] else: if s.endswith(startswith): return s[len(startswith):] return s
[ "def", "startswith_strip", "(", "s", ",", "startswith", "=", "'http://'", ",", "ignorecase", "=", "True", ")", ":", "if", "ignorecase", ":", "if", "s", ".", "lower", "(", ")", ".", "startswith", "(", "startswith", ".", "lower", "(", ")", ")", ":", "return", "s", "[", "len", "(", "startswith", ")", ":", "]", "else", ":", "if", "s", ".", "endswith", "(", "startswith", ")", ":", "return", "s", "[", "len", "(", "startswith", ")", ":", "]", "return", "s" ]
Strip a prefix from the beginning of a string >>> startswith_strip('HTtp://TotalGood.com', 'HTTP://') 'TotalGood.com' >>> startswith_strip('HTtp://TotalGood.com', startswith='HTTP://', ignorecase=False) 'HTtp://TotalGood.com'
[ "Strip", "a", "prefix", "from", "the", "beginning", "of", "a", "string" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L587-L601
231,441
totalgood/nlpia
src/nlpia/loaders.py
get_longest_table
def get_longest_table(url='https://www.openoffice.org/dev_docs/source/file_extensions.html', header=0): """ Retrieve the HTML tables from a URL and return the longest DataFrame found >>> get_longest_table('https://en.wikipedia.org/wiki/List_of_sovereign_states').columns Index(['Common and formal names', 'Membership within the UN System[a]', 'Sovereignty dispute[b]', 'Further information on status and recognition of sovereignty[d]'], dtype='object') """ dfs = pd.read_html(url, header=header) return longest_table(dfs)
python
def get_longest_table(url='https://www.openoffice.org/dev_docs/source/file_extensions.html', header=0): """ Retrieve the HTML tables from a URL and return the longest DataFrame found >>> get_longest_table('https://en.wikipedia.org/wiki/List_of_sovereign_states').columns Index(['Common and formal names', 'Membership within the UN System[a]', 'Sovereignty dispute[b]', 'Further information on status and recognition of sovereignty[d]'], dtype='object') """ dfs = pd.read_html(url, header=header) return longest_table(dfs)
[ "def", "get_longest_table", "(", "url", "=", "'https://www.openoffice.org/dev_docs/source/file_extensions.html'", ",", "header", "=", "0", ")", ":", "dfs", "=", "pd", ".", "read_html", "(", "url", ",", "header", "=", "header", ")", "return", "longest_table", "(", "dfs", ")" ]
Retrieve the HTML tables from a URL and return the longest DataFrame found >>> get_longest_table('https://en.wikipedia.org/wiki/List_of_sovereign_states').columns Index(['Common and formal names', 'Membership within the UN System[a]', 'Sovereignty dispute[b]', 'Further information on status and recognition of sovereignty[d]'], dtype='object')
[ "Retrieve", "the", "HTML", "tables", "from", "a", "URL", "and", "return", "the", "longest", "DataFrame", "found" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L609-L619
231,442
totalgood/nlpia
src/nlpia/loaders.py
get_filename_extensions
def get_filename_extensions(url='https://www.webopedia.com/quick_ref/fileextensionsfull.asp'): """ Load a DataFrame of filename extensions from the indicated url >>> df = get_filename_extensions('https://www.openoffice.org/dev_docs/source/file_extensions.html') >>> df.head(2) ext description 0 .a UNIX static library file. 1 .asm Non-UNIX assembler source file. """ df = get_longest_table(url) columns = list(df.columns) columns[0] = 'ext' columns[1] = 'description' if len(columns) > 2: columns[2] = 'details' df.columns = columns return df
python
def get_filename_extensions(url='https://www.webopedia.com/quick_ref/fileextensionsfull.asp'): """ Load a DataFrame of filename extensions from the indicated url >>> df = get_filename_extensions('https://www.openoffice.org/dev_docs/source/file_extensions.html') >>> df.head(2) ext description 0 .a UNIX static library file. 1 .asm Non-UNIX assembler source file. """ df = get_longest_table(url) columns = list(df.columns) columns[0] = 'ext' columns[1] = 'description' if len(columns) > 2: columns[2] = 'details' df.columns = columns return df
[ "def", "get_filename_extensions", "(", "url", "=", "'https://www.webopedia.com/quick_ref/fileextensionsfull.asp'", ")", ":", "df", "=", "get_longest_table", "(", "url", ")", "columns", "=", "list", "(", "df", ".", "columns", ")", "columns", "[", "0", "]", "=", "'ext'", "columns", "[", "1", "]", "=", "'description'", "if", "len", "(", "columns", ")", ">", "2", ":", "columns", "[", "2", "]", "=", "'details'", "df", ".", "columns", "=", "columns", "return", "df" ]
Load a DataFrame of filename extensions from the indicated url >>> df = get_filename_extensions('https://www.openoffice.org/dev_docs/source/file_extensions.html') >>> df.head(2) ext description 0 .a UNIX static library file. 1 .asm Non-UNIX assembler source file.
[ "Load", "a", "DataFrame", "of", "filename", "extensions", "from", "the", "indicated", "url" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L663-L679
231,443
totalgood/nlpia
src/nlpia/loaders.py
create_big_url
def create_big_url(name): """ If name looks like a url, with an http, add an entry for it in BIG_URLS """ # BIG side effect global BIG_URLS filemeta = get_url_filemeta(name) if not filemeta: return None filename = filemeta['filename'] remote_size = filemeta['remote_size'] url = filemeta['url'] name = filename.split('.') name = (name[0] if name[0] not in ('', '.') else name[1]).replace(' ', '-') name = name.lower().strip() BIG_URLS[name] = (url, int(remote_size or -1), filename) return name
python
def create_big_url(name): """ If name looks like a url, with an http, add an entry for it in BIG_URLS """ # BIG side effect global BIG_URLS filemeta = get_url_filemeta(name) if not filemeta: return None filename = filemeta['filename'] remote_size = filemeta['remote_size'] url = filemeta['url'] name = filename.split('.') name = (name[0] if name[0] not in ('', '.') else name[1]).replace(' ', '-') name = name.lower().strip() BIG_URLS[name] = (url, int(remote_size or -1), filename) return name
[ "def", "create_big_url", "(", "name", ")", ":", "# BIG side effect", "global", "BIG_URLS", "filemeta", "=", "get_url_filemeta", "(", "name", ")", "if", "not", "filemeta", ":", "return", "None", "filename", "=", "filemeta", "[", "'filename'", "]", "remote_size", "=", "filemeta", "[", "'remote_size'", "]", "url", "=", "filemeta", "[", "'url'", "]", "name", "=", "filename", ".", "split", "(", "'.'", ")", "name", "=", "(", "name", "[", "0", "]", "if", "name", "[", "0", "]", "not", "in", "(", "''", ",", "'.'", ")", "else", "name", "[", "1", "]", ")", ".", "replace", "(", "' '", ",", "'-'", ")", "name", "=", "name", ".", "lower", "(", ")", ".", "strip", "(", ")", "BIG_URLS", "[", "name", "]", "=", "(", "url", ",", "int", "(", "remote_size", "or", "-", "1", ")", ",", "filename", ")", "return", "name" ]
If name looks like a url, with an http, add an entry for it in BIG_URLS
[ "If", "name", "looks", "like", "a", "url", "with", "an", "http", "add", "an", "entry", "for", "it", "in", "BIG_URLS" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L787-L801
231,444
totalgood/nlpia
src/nlpia/loaders.py
get_data
def get_data(name='sms-spam', nrows=None, limit=None): """ Load data from a json, csv, or txt file if it exists in the data dir. References: [cities_air_pollution_index](https://www.numbeo.com/pollution/rankings.jsp) [cities](http://download.geonames.org/export/dump/cities.zip) [cities_us](http://download.geonames.org/export/dump/cities_us.zip) >>> from nlpia.data.loaders import get_data >>> words = get_data('words_ubuntu_us') >>> len(words) 99171 >>> list(words[:8]) ['A', "A's", "AA's", "AB's", "ABM's", "AC's", "ACTH's", "AI's"] >>> get_data('ubuntu_dialog_test').iloc[0] Context i think we could import the old comments via r... Utterance basically each xfree86 upload will NOT force u... Name: 0, dtype: object >>> get_data('imdb_test').info() <class 'pandas.core.frame.DataFrame'> MultiIndex: 20 entries, (train, pos, 0) to (train, neg, 9) Data columns (total 3 columns): url 20 non-null object rating 20 non-null int64 text 20 non-null object dtypes: int64(1), object(2) memory usage: 809.0+ bytes """ nrows = nrows or limit if name in BIG_URLS: logger.info('Downloading {}'.format(name)) filepaths = download_unzip(name, normalize_filenames=True) logger.debug('nlpia.loaders.get_data.filepaths=' + str(filepaths)) filepath = filepaths[name][0] if isinstance(filepaths[name], (list, tuple)) else filepaths[name] logger.debug('nlpia.loaders.get_data.filepath=' + str(filepath)) filepathlow = filepath.lower() if len(BIG_URLS[name]) >= 4: kwargs = BIG_URLS[name][4] if len(BIG_URLS[name]) >= 5 else {} return BIG_URLS[name][3](filepath, **kwargs) if filepathlow.endswith('.w2v.txt'): try: return KeyedVectors.load_word2vec_format(filepath, binary=False, limit=nrows) except (TypeError, UnicodeError): pass if filepathlow.endswith('.w2v.bin') or filepathlow.endswith('.bin.gz') or filepathlow.endswith('.w2v.bin.gz'): try: return KeyedVectors.load_word2vec_format(filepath, binary=True, limit=nrows) except (TypeError, UnicodeError): pass if filepathlow.endswith('.gz'): try: filepath = ensure_open(filepath) except: # noqa pass if re.match(r'.json([.][a-z]{0,3}){0,2}', filepathlow): return read_json(filepath) if filepathlow.endswith('.tsv.gz') or filepathlow.endswith('.tsv'): try: return pd.read_table(filepath) except: # noqa pass if filepathlow.endswith('.csv.gz') or filepathlow.endswith('.csv'): try: return read_csv(filepath) except: # noqa pass if filepathlow.endswith('.txt'): try: return read_txt(filepath) except (TypeError, UnicodeError): pass return filepaths[name] elif name in DATASET_NAME2FILENAME: return read_named_csv(name, nrows=nrows) elif name in DATA_NAMES: return read_named_csv(DATA_NAMES[name], nrows=nrows) elif os.path.isfile(name): return read_named_csv(name, nrows=nrows) elif os.path.isfile(os.path.join(DATA_PATH, name)): return read_named_csv(os.path.join(DATA_PATH, name), nrows=nrows) msg = 'Unable to find dataset "{}"" in {} or {} (*.csv.gz, *.csv, *.json, *.zip, or *.txt)\n'.format( name, DATA_PATH, BIGDATA_PATH) msg += 'Available dataset names include:\n{}'.format('\n'.join(DATASET_NAMES)) logger.error(msg) raise IOError(msg)
python
def get_data(name='sms-spam', nrows=None, limit=None): """ Load data from a json, csv, or txt file if it exists in the data dir. References: [cities_air_pollution_index](https://www.numbeo.com/pollution/rankings.jsp) [cities](http://download.geonames.org/export/dump/cities.zip) [cities_us](http://download.geonames.org/export/dump/cities_us.zip) >>> from nlpia.data.loaders import get_data >>> words = get_data('words_ubuntu_us') >>> len(words) 99171 >>> list(words[:8]) ['A', "A's", "AA's", "AB's", "ABM's", "AC's", "ACTH's", "AI's"] >>> get_data('ubuntu_dialog_test').iloc[0] Context i think we could import the old comments via r... Utterance basically each xfree86 upload will NOT force u... Name: 0, dtype: object >>> get_data('imdb_test').info() <class 'pandas.core.frame.DataFrame'> MultiIndex: 20 entries, (train, pos, 0) to (train, neg, 9) Data columns (total 3 columns): url 20 non-null object rating 20 non-null int64 text 20 non-null object dtypes: int64(1), object(2) memory usage: 809.0+ bytes """ nrows = nrows or limit if name in BIG_URLS: logger.info('Downloading {}'.format(name)) filepaths = download_unzip(name, normalize_filenames=True) logger.debug('nlpia.loaders.get_data.filepaths=' + str(filepaths)) filepath = filepaths[name][0] if isinstance(filepaths[name], (list, tuple)) else filepaths[name] logger.debug('nlpia.loaders.get_data.filepath=' + str(filepath)) filepathlow = filepath.lower() if len(BIG_URLS[name]) >= 4: kwargs = BIG_URLS[name][4] if len(BIG_URLS[name]) >= 5 else {} return BIG_URLS[name][3](filepath, **kwargs) if filepathlow.endswith('.w2v.txt'): try: return KeyedVectors.load_word2vec_format(filepath, binary=False, limit=nrows) except (TypeError, UnicodeError): pass if filepathlow.endswith('.w2v.bin') or filepathlow.endswith('.bin.gz') or filepathlow.endswith('.w2v.bin.gz'): try: return KeyedVectors.load_word2vec_format(filepath, binary=True, limit=nrows) except (TypeError, UnicodeError): pass if filepathlow.endswith('.gz'): try: filepath = ensure_open(filepath) except: # noqa pass if re.match(r'.json([.][a-z]{0,3}){0,2}', filepathlow): return read_json(filepath) if filepathlow.endswith('.tsv.gz') or filepathlow.endswith('.tsv'): try: return pd.read_table(filepath) except: # noqa pass if filepathlow.endswith('.csv.gz') or filepathlow.endswith('.csv'): try: return read_csv(filepath) except: # noqa pass if filepathlow.endswith('.txt'): try: return read_txt(filepath) except (TypeError, UnicodeError): pass return filepaths[name] elif name in DATASET_NAME2FILENAME: return read_named_csv(name, nrows=nrows) elif name in DATA_NAMES: return read_named_csv(DATA_NAMES[name], nrows=nrows) elif os.path.isfile(name): return read_named_csv(name, nrows=nrows) elif os.path.isfile(os.path.join(DATA_PATH, name)): return read_named_csv(os.path.join(DATA_PATH, name), nrows=nrows) msg = 'Unable to find dataset "{}"" in {} or {} (*.csv.gz, *.csv, *.json, *.zip, or *.txt)\n'.format( name, DATA_PATH, BIGDATA_PATH) msg += 'Available dataset names include:\n{}'.format('\n'.join(DATASET_NAMES)) logger.error(msg) raise IOError(msg)
[ "def", "get_data", "(", "name", "=", "'sms-spam'", ",", "nrows", "=", "None", ",", "limit", "=", "None", ")", ":", "nrows", "=", "nrows", "or", "limit", "if", "name", "in", "BIG_URLS", ":", "logger", ".", "info", "(", "'Downloading {}'", ".", "format", "(", "name", ")", ")", "filepaths", "=", "download_unzip", "(", "name", ",", "normalize_filenames", "=", "True", ")", "logger", ".", "debug", "(", "'nlpia.loaders.get_data.filepaths='", "+", "str", "(", "filepaths", ")", ")", "filepath", "=", "filepaths", "[", "name", "]", "[", "0", "]", "if", "isinstance", "(", "filepaths", "[", "name", "]", ",", "(", "list", ",", "tuple", ")", ")", "else", "filepaths", "[", "name", "]", "logger", ".", "debug", "(", "'nlpia.loaders.get_data.filepath='", "+", "str", "(", "filepath", ")", ")", "filepathlow", "=", "filepath", ".", "lower", "(", ")", "if", "len", "(", "BIG_URLS", "[", "name", "]", ")", ">=", "4", ":", "kwargs", "=", "BIG_URLS", "[", "name", "]", "[", "4", "]", "if", "len", "(", "BIG_URLS", "[", "name", "]", ")", ">=", "5", "else", "{", "}", "return", "BIG_URLS", "[", "name", "]", "[", "3", "]", "(", "filepath", ",", "*", "*", "kwargs", ")", "if", "filepathlow", ".", "endswith", "(", "'.w2v.txt'", ")", ":", "try", ":", "return", "KeyedVectors", ".", "load_word2vec_format", "(", "filepath", ",", "binary", "=", "False", ",", "limit", "=", "nrows", ")", "except", "(", "TypeError", ",", "UnicodeError", ")", ":", "pass", "if", "filepathlow", ".", "endswith", "(", "'.w2v.bin'", ")", "or", "filepathlow", ".", "endswith", "(", "'.bin.gz'", ")", "or", "filepathlow", ".", "endswith", "(", "'.w2v.bin.gz'", ")", ":", "try", ":", "return", "KeyedVectors", ".", "load_word2vec_format", "(", "filepath", ",", "binary", "=", "True", ",", "limit", "=", "nrows", ")", "except", "(", "TypeError", ",", "UnicodeError", ")", ":", "pass", "if", "filepathlow", ".", "endswith", "(", "'.gz'", ")", ":", "try", ":", "filepath", "=", "ensure_open", "(", "filepath", ")", "except", ":", "# noqa", "pass", "if", "re", ".", "match", "(", "r'.json([.][a-z]{0,3}){0,2}'", ",", "filepathlow", ")", ":", "return", "read_json", "(", "filepath", ")", "if", "filepathlow", ".", "endswith", "(", "'.tsv.gz'", ")", "or", "filepathlow", ".", "endswith", "(", "'.tsv'", ")", ":", "try", ":", "return", "pd", ".", "read_table", "(", "filepath", ")", "except", ":", "# noqa", "pass", "if", "filepathlow", ".", "endswith", "(", "'.csv.gz'", ")", "or", "filepathlow", ".", "endswith", "(", "'.csv'", ")", ":", "try", ":", "return", "read_csv", "(", "filepath", ")", "except", ":", "# noqa", "pass", "if", "filepathlow", ".", "endswith", "(", "'.txt'", ")", ":", "try", ":", "return", "read_txt", "(", "filepath", ")", "except", "(", "TypeError", ",", "UnicodeError", ")", ":", "pass", "return", "filepaths", "[", "name", "]", "elif", "name", "in", "DATASET_NAME2FILENAME", ":", "return", "read_named_csv", "(", "name", ",", "nrows", "=", "nrows", ")", "elif", "name", "in", "DATA_NAMES", ":", "return", "read_named_csv", "(", "DATA_NAMES", "[", "name", "]", ",", "nrows", "=", "nrows", ")", "elif", "os", ".", "path", ".", "isfile", "(", "name", ")", ":", "return", "read_named_csv", "(", "name", ",", "nrows", "=", "nrows", ")", "elif", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "DATA_PATH", ",", "name", ")", ")", ":", "return", "read_named_csv", "(", "os", ".", "path", ".", "join", "(", "DATA_PATH", ",", "name", ")", ",", "nrows", "=", "nrows", ")", "msg", "=", "'Unable to find dataset \"{}\"\" in {} or {} (*.csv.gz, *.csv, *.json, *.zip, or *.txt)\\n'", ".", "format", "(", "name", ",", "DATA_PATH", ",", "BIGDATA_PATH", ")", "msg", "+=", "'Available dataset names include:\\n{}'", ".", "format", "(", "'\\n'", ".", "join", "(", "DATASET_NAMES", ")", ")", "logger", ".", "error", "(", "msg", ")", "raise", "IOError", "(", "msg", ")" ]
Load data from a json, csv, or txt file if it exists in the data dir. References: [cities_air_pollution_index](https://www.numbeo.com/pollution/rankings.jsp) [cities](http://download.geonames.org/export/dump/cities.zip) [cities_us](http://download.geonames.org/export/dump/cities_us.zip) >>> from nlpia.data.loaders import get_data >>> words = get_data('words_ubuntu_us') >>> len(words) 99171 >>> list(words[:8]) ['A', "A's", "AA's", "AB's", "ABM's", "AC's", "ACTH's", "AI's"] >>> get_data('ubuntu_dialog_test').iloc[0] Context i think we could import the old comments via r... Utterance basically each xfree86 upload will NOT force u... Name: 0, dtype: object >>> get_data('imdb_test').info() <class 'pandas.core.frame.DataFrame'> MultiIndex: 20 entries, (train, pos, 0) to (train, neg, 9) Data columns (total 3 columns): url 20 non-null object rating 20 non-null int64 text 20 non-null object dtypes: int64(1), object(2) memory usage: 809.0+ bytes
[ "Load", "data", "from", "a", "json", "csv", "or", "txt", "file", "if", "it", "exists", "in", "the", "data", "dir", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L1027-L1113
231,445
totalgood/nlpia
src/nlpia/loaders.py
get_wikidata_qnum
def get_wikidata_qnum(wikiarticle, wikisite): """Retrieve the Query number for a wikidata database of metadata about a particular article >>> print(get_wikidata_qnum(wikiarticle="Andromeda Galaxy", wikisite="enwiki")) Q2469 """ resp = requests.get('https://www.wikidata.org/w/api.php', timeout=5, params={ 'action': 'wbgetentities', 'titles': wikiarticle, 'sites': wikisite, 'props': '', 'format': 'json' }).json() return list(resp['entities'])[0]
python
def get_wikidata_qnum(wikiarticle, wikisite): """Retrieve the Query number for a wikidata database of metadata about a particular article >>> print(get_wikidata_qnum(wikiarticle="Andromeda Galaxy", wikisite="enwiki")) Q2469 """ resp = requests.get('https://www.wikidata.org/w/api.php', timeout=5, params={ 'action': 'wbgetentities', 'titles': wikiarticle, 'sites': wikisite, 'props': '', 'format': 'json' }).json() return list(resp['entities'])[0]
[ "def", "get_wikidata_qnum", "(", "wikiarticle", ",", "wikisite", ")", ":", "resp", "=", "requests", ".", "get", "(", "'https://www.wikidata.org/w/api.php'", ",", "timeout", "=", "5", ",", "params", "=", "{", "'action'", ":", "'wbgetentities'", ",", "'titles'", ":", "wikiarticle", ",", "'sites'", ":", "wikisite", ",", "'props'", ":", "''", ",", "'format'", ":", "'json'", "}", ")", ".", "json", "(", ")", "return", "list", "(", "resp", "[", "'entities'", "]", ")", "[", "0", "]" ]
Retrieve the Query number for a wikidata database of metadata about a particular article >>> print(get_wikidata_qnum(wikiarticle="Andromeda Galaxy", wikisite="enwiki")) Q2469
[ "Retrieve", "the", "Query", "number", "for", "a", "wikidata", "database", "of", "metadata", "about", "a", "particular", "article" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L1126-L1139
231,446
totalgood/nlpia
src/nlpia/loaders.py
normalize_column_names
def normalize_column_names(df): r""" Clean up whitespace in column names. See better version at `pugnlp.clean_columns` >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['Hello World', 'not here']) >>> normalize_column_names(df) ['hello_world', 'not_here'] """ columns = df.columns if hasattr(df, 'columns') else df columns = [c.lower().replace(' ', '_') for c in columns] return columns
python
def normalize_column_names(df): r""" Clean up whitespace in column names. See better version at `pugnlp.clean_columns` >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['Hello World', 'not here']) >>> normalize_column_names(df) ['hello_world', 'not_here'] """ columns = df.columns if hasattr(df, 'columns') else df columns = [c.lower().replace(' ', '_') for c in columns] return columns
[ "def", "normalize_column_names", "(", "df", ")", ":", "columns", "=", "df", ".", "columns", "if", "hasattr", "(", "df", ",", "'columns'", ")", "else", "df", "columns", "=", "[", "c", ".", "lower", "(", ")", ".", "replace", "(", "' '", ",", "'_'", ")", "for", "c", "in", "columns", "]", "return", "columns" ]
r""" Clean up whitespace in column names. See better version at `pugnlp.clean_columns` >>> df = pd.DataFrame([[1, 2], [3, 4]], columns=['Hello World', 'not here']) >>> normalize_column_names(df) ['hello_world', 'not_here']
[ "r", "Clean", "up", "whitespace", "in", "column", "names", ".", "See", "better", "version", "at", "pugnlp", ".", "clean_columns" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L1167-L1176
231,447
totalgood/nlpia
src/nlpia/loaders.py
clean_column_values
def clean_column_values(df, inplace=True): r""" Convert dollar value strings, numbers with commas, and percents into floating point values >>> df = get_data('us_gov_deficits_raw') >>> df2 = clean_column_values(df, inplace=False) >>> df2.iloc[0] Fiscal year 10/2017-3/2018 President's party R Senate majority party R House majority party R Top-bracket marginal income tax rate 38.3 National debt millions 2.10896e+07 National debt millions of 1983 dollars 8.47004e+06 Deficit\n(millions of 1983 dollars) 431443 Surplus string in 1983 dollars NaN Deficit string in 1983 dollars ($ = $10B) $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ Net surplus in 1983 dollars ($B) -430 Name: 0, dtype: object """ dollars_percents = re.compile(r'[%$,;\s]+') if not inplace: df = df.copy() for c in df.columns: values = None if df[c].dtype.char in '<U S O'.split(): try: values = df[c].copy() values = values.fillna('') values = values.astype(str).str.replace(dollars_percents, '') # values = values.str.strip().str.replace(dollars_percents, '').str.strip() if values.str.len().sum() > .2 * df[c].astype(str).str.len().sum(): values[values.isnull()] = np.nan values[values == ''] = np.nan values = values.astype(float) except ValueError: values = None except: # noqa logger.error('Error on column {} with dtype {}'.format(c, df[c].dtype)) raise if values is not None: if values.isnull().sum() < .6 * len(values) and values.any(): df[c] = values return df
python
def clean_column_values(df, inplace=True): r""" Convert dollar value strings, numbers with commas, and percents into floating point values >>> df = get_data('us_gov_deficits_raw') >>> df2 = clean_column_values(df, inplace=False) >>> df2.iloc[0] Fiscal year 10/2017-3/2018 President's party R Senate majority party R House majority party R Top-bracket marginal income tax rate 38.3 National debt millions 2.10896e+07 National debt millions of 1983 dollars 8.47004e+06 Deficit\n(millions of 1983 dollars) 431443 Surplus string in 1983 dollars NaN Deficit string in 1983 dollars ($ = $10B) $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ Net surplus in 1983 dollars ($B) -430 Name: 0, dtype: object """ dollars_percents = re.compile(r'[%$,;\s]+') if not inplace: df = df.copy() for c in df.columns: values = None if df[c].dtype.char in '<U S O'.split(): try: values = df[c].copy() values = values.fillna('') values = values.astype(str).str.replace(dollars_percents, '') # values = values.str.strip().str.replace(dollars_percents, '').str.strip() if values.str.len().sum() > .2 * df[c].astype(str).str.len().sum(): values[values.isnull()] = np.nan values[values == ''] = np.nan values = values.astype(float) except ValueError: values = None except: # noqa logger.error('Error on column {} with dtype {}'.format(c, df[c].dtype)) raise if values is not None: if values.isnull().sum() < .6 * len(values) and values.any(): df[c] = values return df
[ "def", "clean_column_values", "(", "df", ",", "inplace", "=", "True", ")", ":", "dollars_percents", "=", "re", ".", "compile", "(", "r'[%$,;\\s]+'", ")", "if", "not", "inplace", ":", "df", "=", "df", ".", "copy", "(", ")", "for", "c", "in", "df", ".", "columns", ":", "values", "=", "None", "if", "df", "[", "c", "]", ".", "dtype", ".", "char", "in", "'<U S O'", ".", "split", "(", ")", ":", "try", ":", "values", "=", "df", "[", "c", "]", ".", "copy", "(", ")", "values", "=", "values", ".", "fillna", "(", "''", ")", "values", "=", "values", ".", "astype", "(", "str", ")", ".", "str", ".", "replace", "(", "dollars_percents", ",", "''", ")", "# values = values.str.strip().str.replace(dollars_percents, '').str.strip()", "if", "values", ".", "str", ".", "len", "(", ")", ".", "sum", "(", ")", ">", ".2", "*", "df", "[", "c", "]", ".", "astype", "(", "str", ")", ".", "str", ".", "len", "(", ")", ".", "sum", "(", ")", ":", "values", "[", "values", ".", "isnull", "(", ")", "]", "=", "np", ".", "nan", "values", "[", "values", "==", "''", "]", "=", "np", ".", "nan", "values", "=", "values", ".", "astype", "(", "float", ")", "except", "ValueError", ":", "values", "=", "None", "except", ":", "# noqa", "logger", ".", "error", "(", "'Error on column {} with dtype {}'", ".", "format", "(", "c", ",", "df", "[", "c", "]", ".", "dtype", ")", ")", "raise", "if", "values", "is", "not", "None", ":", "if", "values", ".", "isnull", "(", ")", ".", "sum", "(", ")", "<", ".6", "*", "len", "(", "values", ")", "and", "values", ".", "any", "(", ")", ":", "df", "[", "c", "]", "=", "values", "return", "df" ]
r""" Convert dollar value strings, numbers with commas, and percents into floating point values >>> df = get_data('us_gov_deficits_raw') >>> df2 = clean_column_values(df, inplace=False) >>> df2.iloc[0] Fiscal year 10/2017-3/2018 President's party R Senate majority party R House majority party R Top-bracket marginal income tax rate 38.3 National debt millions 2.10896e+07 National debt millions of 1983 dollars 8.47004e+06 Deficit\n(millions of 1983 dollars) 431443 Surplus string in 1983 dollars NaN Deficit string in 1983 dollars ($ = $10B) $$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$ Net surplus in 1983 dollars ($B) -430 Name: 0, dtype: object
[ "r", "Convert", "dollar", "value", "strings", "numbers", "with", "commas", "and", "percents", "into", "floating", "point", "values" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L1179-L1222
231,448
totalgood/nlpia
src/nlpia/loaders.py
isglove
def isglove(filepath): """ Get the first word vector in a GloVE file and return its dimensionality or False if not a vector >>> isglove(os.path.join(DATA_PATH, 'cats_and_dogs.txt')) False """ with ensure_open(filepath, 'r') as f: header_line = f.readline() vector_line = f.readline() try: num_vectors, num_dim = header_line.split() return int(num_dim) except (ValueError, TypeError): pass vector = vector_line.split()[1:] if len(vector) % 10: print(vector) print(len(vector) % 10) return False try: vector = np.array([float(x) for x in vector]) except (ValueError, TypeError): return False if np.all(np.abs(vector) < 12.): return len(vector) return False
python
def isglove(filepath): """ Get the first word vector in a GloVE file and return its dimensionality or False if not a vector >>> isglove(os.path.join(DATA_PATH, 'cats_and_dogs.txt')) False """ with ensure_open(filepath, 'r') as f: header_line = f.readline() vector_line = f.readline() try: num_vectors, num_dim = header_line.split() return int(num_dim) except (ValueError, TypeError): pass vector = vector_line.split()[1:] if len(vector) % 10: print(vector) print(len(vector) % 10) return False try: vector = np.array([float(x) for x in vector]) except (ValueError, TypeError): return False if np.all(np.abs(vector) < 12.): return len(vector) return False
[ "def", "isglove", "(", "filepath", ")", ":", "with", "ensure_open", "(", "filepath", ",", "'r'", ")", "as", "f", ":", "header_line", "=", "f", ".", "readline", "(", ")", "vector_line", "=", "f", ".", "readline", "(", ")", "try", ":", "num_vectors", ",", "num_dim", "=", "header_line", ".", "split", "(", ")", "return", "int", "(", "num_dim", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "pass", "vector", "=", "vector_line", ".", "split", "(", ")", "[", "1", ":", "]", "if", "len", "(", "vector", ")", "%", "10", ":", "print", "(", "vector", ")", "print", "(", "len", "(", "vector", ")", "%", "10", ")", "return", "False", "try", ":", "vector", "=", "np", ".", "array", "(", "[", "float", "(", "x", ")", "for", "x", "in", "vector", "]", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "return", "False", "if", "np", ".", "all", "(", "np", ".", "abs", "(", "vector", ")", "<", "12.", ")", ":", "return", "len", "(", "vector", ")", "return", "False" ]
Get the first word vector in a GloVE file and return its dimensionality or False if not a vector >>> isglove(os.path.join(DATA_PATH, 'cats_and_dogs.txt')) False
[ "Get", "the", "first", "word", "vector", "in", "a", "GloVE", "file", "and", "return", "its", "dimensionality", "or", "False", "if", "not", "a", "vector" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L1320-L1346
231,449
totalgood/nlpia
src/nlpia/loaders.py
nlp
def nlp(texts, lang='en', linesep=None, verbose=True): r""" Use the SpaCy parser to parse and tag natural language strings. Load the SpaCy parser language model lazily and share it among all nlpia modules. Probably unnecessary, since SpaCy probably takes care of this with `spacy.load()` >>> _parse is None True >>> doc = nlp("Domo arigatto Mr. Roboto.") >>> doc.text 'Domo arigatto Mr. Roboto.' >>> doc.ents (Roboto,) >>> docs = nlp("Hey Mr. Tangerine Man!\nPlay a song for me.\n", linesep='\n') >>> doc = docs[0] >>> [t for t in doc] [Hey, Mr., Tangerine, Man, !] >>> [tok.text for tok in doc] ['Hey', 'Mr.', 'Tangerine', 'Man', '!'] >>> [(tok.text, tok.tag_) for tok in doc] [('Hey', 'UH'), ('Mr.', 'NNP'), ('Tangerine', 'NNP'), ('Man', 'NN'), ('!', '.')] >>> [(ent.text, ent.ent_id, ent.has_vector, ent.vector[:3].round(3)) for ent in doc.ents] [('Tangerine Man', 0, True, array([0.72 , 1.913, 2.675], dtype=float32))] """ # doesn't let you load a different model anywhere else in the module linesep = os.linesep if linesep in ('default', True, 1, 'os') else linesep tqdm_prog = no_tqdm if (not verbose or (hasattr(texts, '__len__') and len(texts) < 3)) else tqdm global _parse if not _parse: try: _parse = spacy.load(lang) except (OSError, IOError): try: spacy.cli.download(lang) except URLError: logger.warning("Unable to download Spacy language model '{}' so nlp(text) just returns text.split()".format(lang)) parse = _parse or str.split # TODO: reverse this recursion (str first then sequence) to allow for sequences of sequences of texts if isinstance(texts, str): if linesep: return nlp(texts.split(linesep)) else: return nlp([texts]) if hasattr(texts, '__len__'): if len(texts) == 1: return parse(texts[0]) elif len(texts) > 1: return [(parse or str.split)(text) for text in tqdm_prog(texts)] else: return None else: # return generator if sequence of strings doesn't have __len__ which means its an iterable or generator itself return (parse(text) for text in tqdm_prog(texts))
python
def nlp(texts, lang='en', linesep=None, verbose=True): r""" Use the SpaCy parser to parse and tag natural language strings. Load the SpaCy parser language model lazily and share it among all nlpia modules. Probably unnecessary, since SpaCy probably takes care of this with `spacy.load()` >>> _parse is None True >>> doc = nlp("Domo arigatto Mr. Roboto.") >>> doc.text 'Domo arigatto Mr. Roboto.' >>> doc.ents (Roboto,) >>> docs = nlp("Hey Mr. Tangerine Man!\nPlay a song for me.\n", linesep='\n') >>> doc = docs[0] >>> [t for t in doc] [Hey, Mr., Tangerine, Man, !] >>> [tok.text for tok in doc] ['Hey', 'Mr.', 'Tangerine', 'Man', '!'] >>> [(tok.text, tok.tag_) for tok in doc] [('Hey', 'UH'), ('Mr.', 'NNP'), ('Tangerine', 'NNP'), ('Man', 'NN'), ('!', '.')] >>> [(ent.text, ent.ent_id, ent.has_vector, ent.vector[:3].round(3)) for ent in doc.ents] [('Tangerine Man', 0, True, array([0.72 , 1.913, 2.675], dtype=float32))] """ # doesn't let you load a different model anywhere else in the module linesep = os.linesep if linesep in ('default', True, 1, 'os') else linesep tqdm_prog = no_tqdm if (not verbose or (hasattr(texts, '__len__') and len(texts) < 3)) else tqdm global _parse if not _parse: try: _parse = spacy.load(lang) except (OSError, IOError): try: spacy.cli.download(lang) except URLError: logger.warning("Unable to download Spacy language model '{}' so nlp(text) just returns text.split()".format(lang)) parse = _parse or str.split # TODO: reverse this recursion (str first then sequence) to allow for sequences of sequences of texts if isinstance(texts, str): if linesep: return nlp(texts.split(linesep)) else: return nlp([texts]) if hasattr(texts, '__len__'): if len(texts) == 1: return parse(texts[0]) elif len(texts) > 1: return [(parse or str.split)(text) for text in tqdm_prog(texts)] else: return None else: # return generator if sequence of strings doesn't have __len__ which means its an iterable or generator itself return (parse(text) for text in tqdm_prog(texts))
[ "def", "nlp", "(", "texts", ",", "lang", "=", "'en'", ",", "linesep", "=", "None", ",", "verbose", "=", "True", ")", ":", "# doesn't let you load a different model anywhere else in the module", "linesep", "=", "os", ".", "linesep", "if", "linesep", "in", "(", "'default'", ",", "True", ",", "1", ",", "'os'", ")", "else", "linesep", "tqdm_prog", "=", "no_tqdm", "if", "(", "not", "verbose", "or", "(", "hasattr", "(", "texts", ",", "'__len__'", ")", "and", "len", "(", "texts", ")", "<", "3", ")", ")", "else", "tqdm", "global", "_parse", "if", "not", "_parse", ":", "try", ":", "_parse", "=", "spacy", ".", "load", "(", "lang", ")", "except", "(", "OSError", ",", "IOError", ")", ":", "try", ":", "spacy", ".", "cli", ".", "download", "(", "lang", ")", "except", "URLError", ":", "logger", ".", "warning", "(", "\"Unable to download Spacy language model '{}' so nlp(text) just returns text.split()\"", ".", "format", "(", "lang", ")", ")", "parse", "=", "_parse", "or", "str", ".", "split", "# TODO: reverse this recursion (str first then sequence) to allow for sequences of sequences of texts", "if", "isinstance", "(", "texts", ",", "str", ")", ":", "if", "linesep", ":", "return", "nlp", "(", "texts", ".", "split", "(", "linesep", ")", ")", "else", ":", "return", "nlp", "(", "[", "texts", "]", ")", "if", "hasattr", "(", "texts", ",", "'__len__'", ")", ":", "if", "len", "(", "texts", ")", "==", "1", ":", "return", "parse", "(", "texts", "[", "0", "]", ")", "elif", "len", "(", "texts", ")", ">", "1", ":", "return", "[", "(", "parse", "or", "str", ".", "split", ")", "(", "text", ")", "for", "text", "in", "tqdm_prog", "(", "texts", ")", "]", "else", ":", "return", "None", "else", ":", "# return generator if sequence of strings doesn't have __len__ which means its an iterable or generator itself", "return", "(", "parse", "(", "text", ")", "for", "text", "in", "tqdm_prog", "(", "texts", ")", ")" ]
r""" Use the SpaCy parser to parse and tag natural language strings. Load the SpaCy parser language model lazily and share it among all nlpia modules. Probably unnecessary, since SpaCy probably takes care of this with `spacy.load()` >>> _parse is None True >>> doc = nlp("Domo arigatto Mr. Roboto.") >>> doc.text 'Domo arigatto Mr. Roboto.' >>> doc.ents (Roboto,) >>> docs = nlp("Hey Mr. Tangerine Man!\nPlay a song for me.\n", linesep='\n') >>> doc = docs[0] >>> [t for t in doc] [Hey, Mr., Tangerine, Man, !] >>> [tok.text for tok in doc] ['Hey', 'Mr.', 'Tangerine', 'Man', '!'] >>> [(tok.text, tok.tag_) for tok in doc] [('Hey', 'UH'), ('Mr.', 'NNP'), ('Tangerine', 'NNP'), ('Man', 'NN'), ('!', '.')] >>> [(ent.text, ent.ent_id, ent.has_vector, ent.vector[:3].round(3)) for ent in doc.ents] [('Tangerine Man', 0, True, array([0.72 , 1.913, 2.675], dtype=float32))]
[ "r", "Use", "the", "SpaCy", "parser", "to", "parse", "and", "tag", "natural", "language", "strings", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/loaders.py#L1349-L1405
231,450
totalgood/nlpia
src/nlpia/talk.py
get_decoder
def get_decoder(libdir=None, modeldir=None, lang='en-us'): """ Create a decoder with the requested language model """ modeldir = modeldir or (os.path.join(libdir, 'model') if libdir else MODELDIR) libdir = os.path.dirname(modeldir) config = ps.Decoder.default_config() config.set_string('-hmm', os.path.join(modeldir, lang)) config.set_string('-lm', os.path.join(modeldir, lang + '.lm.bin')) config.set_string('-dict', os.path.join(modeldir, 'cmudict-' + lang + '.dict')) print(config) return ps.Decoder(config)
python
def get_decoder(libdir=None, modeldir=None, lang='en-us'): """ Create a decoder with the requested language model """ modeldir = modeldir or (os.path.join(libdir, 'model') if libdir else MODELDIR) libdir = os.path.dirname(modeldir) config = ps.Decoder.default_config() config.set_string('-hmm', os.path.join(modeldir, lang)) config.set_string('-lm', os.path.join(modeldir, lang + '.lm.bin')) config.set_string('-dict', os.path.join(modeldir, 'cmudict-' + lang + '.dict')) print(config) return ps.Decoder(config)
[ "def", "get_decoder", "(", "libdir", "=", "None", ",", "modeldir", "=", "None", ",", "lang", "=", "'en-us'", ")", ":", "modeldir", "=", "modeldir", "or", "(", "os", ".", "path", ".", "join", "(", "libdir", ",", "'model'", ")", "if", "libdir", "else", "MODELDIR", ")", "libdir", "=", "os", ".", "path", ".", "dirname", "(", "modeldir", ")", "config", "=", "ps", ".", "Decoder", ".", "default_config", "(", ")", "config", ".", "set_string", "(", "'-hmm'", ",", "os", ".", "path", ".", "join", "(", "modeldir", ",", "lang", ")", ")", "config", ".", "set_string", "(", "'-lm'", ",", "os", ".", "path", ".", "join", "(", "modeldir", ",", "lang", "+", "'.lm.bin'", ")", ")", "config", ".", "set_string", "(", "'-dict'", ",", "os", ".", "path", ".", "join", "(", "modeldir", ",", "'cmudict-'", "+", "lang", "+", "'.dict'", ")", ")", "print", "(", "config", ")", "return", "ps", ".", "Decoder", "(", "config", ")" ]
Create a decoder with the requested language model
[ "Create", "a", "decoder", "with", "the", "requested", "language", "model" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/talk.py#L43-L52
231,451
totalgood/nlpia
src/nlpia/talk.py
transcribe
def transcribe(decoder, audio_file, libdir=None): """ Decode streaming audio data from raw binary file on disk. """ decoder = get_decoder() decoder.start_utt() stream = open(audio_file, 'rb') while True: buf = stream.read(1024) if buf: decoder.process_raw(buf, False, False) else: break decoder.end_utt() return evaluate_results(decoder)
python
def transcribe(decoder, audio_file, libdir=None): """ Decode streaming audio data from raw binary file on disk. """ decoder = get_decoder() decoder.start_utt() stream = open(audio_file, 'rb') while True: buf = stream.read(1024) if buf: decoder.process_raw(buf, False, False) else: break decoder.end_utt() return evaluate_results(decoder)
[ "def", "transcribe", "(", "decoder", ",", "audio_file", ",", "libdir", "=", "None", ")", ":", "decoder", "=", "get_decoder", "(", ")", "decoder", ".", "start_utt", "(", ")", "stream", "=", "open", "(", "audio_file", ",", "'rb'", ")", "while", "True", ":", "buf", "=", "stream", ".", "read", "(", "1024", ")", "if", "buf", ":", "decoder", ".", "process_raw", "(", "buf", ",", "False", ",", "False", ")", "else", ":", "break", "decoder", ".", "end_utt", "(", ")", "return", "evaluate_results", "(", "decoder", ")" ]
Decode streaming audio data from raw binary file on disk.
[ "Decode", "streaming", "audio", "data", "from", "raw", "binary", "file", "on", "disk", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/talk.py#L67-L80
231,452
totalgood/nlpia
src/nlpia/book/examples/ch09.py
pre_process_data
def pre_process_data(filepath): """ This is dependent on your training data source but we will try to generalize it as best as possible. """ positive_path = os.path.join(filepath, 'pos') negative_path = os.path.join(filepath, 'neg') pos_label = 1 neg_label = 0 dataset = [] for filename in glob.glob(os.path.join(positive_path, '*.txt')): with open(filename, 'r') as f: dataset.append((pos_label, f.read())) for filename in glob.glob(os.path.join(negative_path, '*.txt')): with open(filename, 'r') as f: dataset.append((neg_label, f.read())) shuffle(dataset) return dataset
python
def pre_process_data(filepath): """ This is dependent on your training data source but we will try to generalize it as best as possible. """ positive_path = os.path.join(filepath, 'pos') negative_path = os.path.join(filepath, 'neg') pos_label = 1 neg_label = 0 dataset = [] for filename in glob.glob(os.path.join(positive_path, '*.txt')): with open(filename, 'r') as f: dataset.append((pos_label, f.read())) for filename in glob.glob(os.path.join(negative_path, '*.txt')): with open(filename, 'r') as f: dataset.append((neg_label, f.read())) shuffle(dataset) return dataset
[ "def", "pre_process_data", "(", "filepath", ")", ":", "positive_path", "=", "os", ".", "path", ".", "join", "(", "filepath", ",", "'pos'", ")", "negative_path", "=", "os", ".", "path", ".", "join", "(", "filepath", ",", "'neg'", ")", "pos_label", "=", "1", "neg_label", "=", "0", "dataset", "=", "[", "]", "for", "filename", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "positive_path", ",", "'*.txt'", ")", ")", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "dataset", ".", "append", "(", "(", "pos_label", ",", "f", ".", "read", "(", ")", ")", ")", "for", "filename", "in", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "negative_path", ",", "'*.txt'", ")", ")", ":", "with", "open", "(", "filename", ",", "'r'", ")", "as", "f", ":", "dataset", ".", "append", "(", "(", "neg_label", ",", "f", ".", "read", "(", ")", ")", ")", "shuffle", "(", "dataset", ")", "return", "dataset" ]
This is dependent on your training data source but we will try to generalize it as best as possible.
[ "This", "is", "dependent", "on", "your", "training", "data", "source", "but", "we", "will", "try", "to", "generalize", "it", "as", "best", "as", "possible", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch09.py#L141-L163
231,453
totalgood/nlpia
src/nlpia/book/examples/ch09.py
pad_trunc
def pad_trunc(data, maxlen): """ For a given dataset pad with zero vectors or truncate to maxlen """ new_data = [] # Create a vector of 0's the length of our word vectors zero_vector = [] for _ in range(len(data[0][0])): zero_vector.append(0.0) for sample in data: if len(sample) > maxlen: temp = sample[:maxlen] elif len(sample) < maxlen: temp = sample additional_elems = maxlen - len(sample) for _ in range(additional_elems): temp.append(zero_vector) else: temp = sample new_data.append(temp) return new_data
python
def pad_trunc(data, maxlen): """ For a given dataset pad with zero vectors or truncate to maxlen """ new_data = [] # Create a vector of 0's the length of our word vectors zero_vector = [] for _ in range(len(data[0][0])): zero_vector.append(0.0) for sample in data: if len(sample) > maxlen: temp = sample[:maxlen] elif len(sample) < maxlen: temp = sample additional_elems = maxlen - len(sample) for _ in range(additional_elems): temp.append(zero_vector) else: temp = sample new_data.append(temp) return new_data
[ "def", "pad_trunc", "(", "data", ",", "maxlen", ")", ":", "new_data", "=", "[", "]", "# Create a vector of 0's the length of our word vectors", "zero_vector", "=", "[", "]", "for", "_", "in", "range", "(", "len", "(", "data", "[", "0", "]", "[", "0", "]", ")", ")", ":", "zero_vector", ".", "append", "(", "0.0", ")", "for", "sample", "in", "data", ":", "if", "len", "(", "sample", ")", ">", "maxlen", ":", "temp", "=", "sample", "[", ":", "maxlen", "]", "elif", "len", "(", "sample", ")", "<", "maxlen", ":", "temp", "=", "sample", "additional_elems", "=", "maxlen", "-", "len", "(", "sample", ")", "for", "_", "in", "range", "(", "additional_elems", ")", ":", "temp", ".", "append", "(", "zero_vector", ")", "else", ":", "temp", "=", "sample", "new_data", ".", "append", "(", "temp", ")", "return", "new_data" ]
For a given dataset pad with zero vectors or truncate to maxlen
[ "For", "a", "given", "dataset", "pad", "with", "zero", "vectors", "or", "truncate", "to", "maxlen" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch09.py#L207-L228
231,454
totalgood/nlpia
src/nlpia/book/examples/ch09.py
clean_data
def clean_data(data): """ Shift to lower case, replace unknowns with UNK, and listify """ new_data = [] VALID = 'abcdefghijklmnopqrstuvwxyz123456789"\'?!.,:; ' for sample in data: new_sample = [] for char in sample[1].lower(): # Just grab the string, not the label if char in VALID: new_sample.append(char) else: new_sample.append('UNK') new_data.append(new_sample) return new_data
python
def clean_data(data): """ Shift to lower case, replace unknowns with UNK, and listify """ new_data = [] VALID = 'abcdefghijklmnopqrstuvwxyz123456789"\'?!.,:; ' for sample in data: new_sample = [] for char in sample[1].lower(): # Just grab the string, not the label if char in VALID: new_sample.append(char) else: new_sample.append('UNK') new_data.append(new_sample) return new_data
[ "def", "clean_data", "(", "data", ")", ":", "new_data", "=", "[", "]", "VALID", "=", "'abcdefghijklmnopqrstuvwxyz123456789\"\\'?!.,:; '", "for", "sample", "in", "data", ":", "new_sample", "=", "[", "]", "for", "char", "in", "sample", "[", "1", "]", ".", "lower", "(", ")", ":", "# Just grab the string, not the label", "if", "char", "in", "VALID", ":", "new_sample", ".", "append", "(", "char", ")", "else", ":", "new_sample", ".", "append", "(", "'UNK'", ")", "new_data", ".", "append", "(", "new_sample", ")", "return", "new_data" ]
Shift to lower case, replace unknowns with UNK, and listify
[ "Shift", "to", "lower", "case", "replace", "unknowns", "with", "UNK", "and", "listify" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch09.py#L436-L449
231,455
totalgood/nlpia
src/nlpia/book/examples/ch09.py
char_pad_trunc
def char_pad_trunc(data, maxlen): """ We truncate to maxlen or add in PAD tokens """ new_dataset = [] for sample in data: if len(sample) > maxlen: new_data = sample[:maxlen] elif len(sample) < maxlen: pads = maxlen - len(sample) new_data = sample + ['PAD'] * pads else: new_data = sample new_dataset.append(new_data) return new_dataset
python
def char_pad_trunc(data, maxlen): """ We truncate to maxlen or add in PAD tokens """ new_dataset = [] for sample in data: if len(sample) > maxlen: new_data = sample[:maxlen] elif len(sample) < maxlen: pads = maxlen - len(sample) new_data = sample + ['PAD'] * pads else: new_data = sample new_dataset.append(new_data) return new_dataset
[ "def", "char_pad_trunc", "(", "data", ",", "maxlen", ")", ":", "new_dataset", "=", "[", "]", "for", "sample", "in", "data", ":", "if", "len", "(", "sample", ")", ">", "maxlen", ":", "new_data", "=", "sample", "[", ":", "maxlen", "]", "elif", "len", "(", "sample", ")", "<", "maxlen", ":", "pads", "=", "maxlen", "-", "len", "(", "sample", ")", "new_data", "=", "sample", "+", "[", "'PAD'", "]", "*", "pads", "else", ":", "new_data", "=", "sample", "new_dataset", ".", "append", "(", "new_data", ")", "return", "new_dataset" ]
We truncate to maxlen or add in PAD tokens
[ "We", "truncate", "to", "maxlen", "or", "add", "in", "PAD", "tokens" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch09.py#L458-L470
231,456
totalgood/nlpia
src/nlpia/book/examples/ch09.py
create_dicts
def create_dicts(data): """ Modified from Keras LSTM example""" chars = set() for sample in data: chars.update(set(sample)) char_indices = dict((c, i) for i, c in enumerate(chars)) indices_char = dict((i, c) for i, c in enumerate(chars)) return char_indices, indices_char
python
def create_dicts(data): """ Modified from Keras LSTM example""" chars = set() for sample in data: chars.update(set(sample)) char_indices = dict((c, i) for i, c in enumerate(chars)) indices_char = dict((i, c) for i, c in enumerate(chars)) return char_indices, indices_char
[ "def", "create_dicts", "(", "data", ")", ":", "chars", "=", "set", "(", ")", "for", "sample", "in", "data", ":", "chars", ".", "update", "(", "set", "(", "sample", ")", ")", "char_indices", "=", "dict", "(", "(", "c", ",", "i", ")", "for", "i", ",", "c", "in", "enumerate", "(", "chars", ")", ")", "indices_char", "=", "dict", "(", "(", "i", ",", "c", ")", "for", "i", ",", "c", "in", "enumerate", "(", "chars", ")", ")", "return", "char_indices", ",", "indices_char" ]
Modified from Keras LSTM example
[ "Modified", "from", "Keras", "LSTM", "example" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch09.py#L479-L486
231,457
totalgood/nlpia
src/nlpia/book/examples/ch09.py
onehot_encode
def onehot_encode(dataset, char_indices, maxlen): """ One hot encode the tokens Args: dataset list of lists of tokens char_indices dictionary of {key=character, value=index to use encoding vector} maxlen int Length of each sample Return: np array of shape (samples, tokens, encoding length) """ X = np.zeros((len(dataset), maxlen, len(char_indices.keys()))) for i, sentence in enumerate(dataset): for t, char in enumerate(sentence): X[i, t, char_indices[char]] = 1 return X
python
def onehot_encode(dataset, char_indices, maxlen): """ One hot encode the tokens Args: dataset list of lists of tokens char_indices dictionary of {key=character, value=index to use encoding vector} maxlen int Length of each sample Return: np array of shape (samples, tokens, encoding length) """ X = np.zeros((len(dataset), maxlen, len(char_indices.keys()))) for i, sentence in enumerate(dataset): for t, char in enumerate(sentence): X[i, t, char_indices[char]] = 1 return X
[ "def", "onehot_encode", "(", "dataset", ",", "char_indices", ",", "maxlen", ")", ":", "X", "=", "np", ".", "zeros", "(", "(", "len", "(", "dataset", ")", ",", "maxlen", ",", "len", "(", "char_indices", ".", "keys", "(", ")", ")", ")", ")", "for", "i", ",", "sentence", "in", "enumerate", "(", "dataset", ")", ":", "for", "t", ",", "char", "in", "enumerate", "(", "sentence", ")", ":", "X", "[", "i", ",", "t", ",", "char_indices", "[", "char", "]", "]", "=", "1", "return", "X" ]
One hot encode the tokens Args: dataset list of lists of tokens char_indices dictionary of {key=character, value=index to use encoding vector} maxlen int Length of each sample Return: np array of shape (samples, tokens, encoding length)
[ "One", "hot", "encode", "the", "tokens" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch09.py#L495-L510
231,458
totalgood/nlpia
src/nlpia/book/examples/ch04_sklearn_pca_source.py
_fit_full
def _fit_full(self=self, X=X, n_components=6): """Fit the model by computing full SVD on X""" n_samples, n_features = X.shape # Center data self.mean_ = np.mean(X, axis=0) print(self.mean_) X -= self.mean_ print(X.round(2)) U, S, V = linalg.svd(X, full_matrices=False) print(V.round(2)) # flip eigenvectors' sign to enforce deterministic output U, V = svd_flip(U, V) components_ = V print(components_.round(2)) # Get variance explained by singular values explained_variance_ = (S ** 2) / (n_samples - 1) total_var = explained_variance_.sum() explained_variance_ratio_ = explained_variance_ / total_var singular_values_ = S.copy() # Store the singular values. # Postprocess the number of components required if n_components == 'mle': n_components = \ _infer_dimension_(explained_variance_, n_samples, n_features) elif 0 < n_components < 1.0: # number of components for which the cumulated explained # variance percentage is superior to the desired threshold ratio_cumsum = stable_cumsum(explained_variance_ratio_) n_components = np.searchsorted(ratio_cumsum, n_components) + 1 # Compute noise covariance using Probabilistic PCA model # The sigma2 maximum likelihood (cf. eq. 12.46) if n_components < min(n_features, n_samples): self.noise_variance_ = explained_variance_[n_components:].mean() else: self.noise_variance_ = 0. self.n_samples_, self.n_features_ = n_samples, n_features self.components_ = components_[:n_components] print(self.components_.round(2)) self.n_components_ = n_components self.explained_variance_ = explained_variance_[:n_components] self.explained_variance_ratio_ = \ explained_variance_ratio_[:n_components] self.singular_values_ = singular_values_[:n_components] return U, S, V
python
def _fit_full(self=self, X=X, n_components=6): """Fit the model by computing full SVD on X""" n_samples, n_features = X.shape # Center data self.mean_ = np.mean(X, axis=0) print(self.mean_) X -= self.mean_ print(X.round(2)) U, S, V = linalg.svd(X, full_matrices=False) print(V.round(2)) # flip eigenvectors' sign to enforce deterministic output U, V = svd_flip(U, V) components_ = V print(components_.round(2)) # Get variance explained by singular values explained_variance_ = (S ** 2) / (n_samples - 1) total_var = explained_variance_.sum() explained_variance_ratio_ = explained_variance_ / total_var singular_values_ = S.copy() # Store the singular values. # Postprocess the number of components required if n_components == 'mle': n_components = \ _infer_dimension_(explained_variance_, n_samples, n_features) elif 0 < n_components < 1.0: # number of components for which the cumulated explained # variance percentage is superior to the desired threshold ratio_cumsum = stable_cumsum(explained_variance_ratio_) n_components = np.searchsorted(ratio_cumsum, n_components) + 1 # Compute noise covariance using Probabilistic PCA model # The sigma2 maximum likelihood (cf. eq. 12.46) if n_components < min(n_features, n_samples): self.noise_variance_ = explained_variance_[n_components:].mean() else: self.noise_variance_ = 0. self.n_samples_, self.n_features_ = n_samples, n_features self.components_ = components_[:n_components] print(self.components_.round(2)) self.n_components_ = n_components self.explained_variance_ = explained_variance_[:n_components] self.explained_variance_ratio_ = \ explained_variance_ratio_[:n_components] self.singular_values_ = singular_values_[:n_components] return U, S, V
[ "def", "_fit_full", "(", "self", "=", "self", ",", "X", "=", "X", ",", "n_components", "=", "6", ")", ":", "n_samples", ",", "n_features", "=", "X", ".", "shape", "# Center data", "self", ".", "mean_", "=", "np", ".", "mean", "(", "X", ",", "axis", "=", "0", ")", "print", "(", "self", ".", "mean_", ")", "X", "-=", "self", ".", "mean_", "print", "(", "X", ".", "round", "(", "2", ")", ")", "U", ",", "S", ",", "V", "=", "linalg", ".", "svd", "(", "X", ",", "full_matrices", "=", "False", ")", "print", "(", "V", ".", "round", "(", "2", ")", ")", "# flip eigenvectors' sign to enforce deterministic output", "U", ",", "V", "=", "svd_flip", "(", "U", ",", "V", ")", "components_", "=", "V", "print", "(", "components_", ".", "round", "(", "2", ")", ")", "# Get variance explained by singular values", "explained_variance_", "=", "(", "S", "**", "2", ")", "/", "(", "n_samples", "-", "1", ")", "total_var", "=", "explained_variance_", ".", "sum", "(", ")", "explained_variance_ratio_", "=", "explained_variance_", "/", "total_var", "singular_values_", "=", "S", ".", "copy", "(", ")", "# Store the singular values.", "# Postprocess the number of components required", "if", "n_components", "==", "'mle'", ":", "n_components", "=", "_infer_dimension_", "(", "explained_variance_", ",", "n_samples", ",", "n_features", ")", "elif", "0", "<", "n_components", "<", "1.0", ":", "# number of components for which the cumulated explained", "# variance percentage is superior to the desired threshold", "ratio_cumsum", "=", "stable_cumsum", "(", "explained_variance_ratio_", ")", "n_components", "=", "np", ".", "searchsorted", "(", "ratio_cumsum", ",", "n_components", ")", "+", "1", "# Compute noise covariance using Probabilistic PCA model", "# The sigma2 maximum likelihood (cf. eq. 12.46)", "if", "n_components", "<", "min", "(", "n_features", ",", "n_samples", ")", ":", "self", ".", "noise_variance_", "=", "explained_variance_", "[", "n_components", ":", "]", ".", "mean", "(", ")", "else", ":", "self", ".", "noise_variance_", "=", "0.", "self", ".", "n_samples_", ",", "self", ".", "n_features_", "=", "n_samples", ",", "n_features", "self", ".", "components_", "=", "components_", "[", ":", "n_components", "]", "print", "(", "self", ".", "components_", ".", "round", "(", "2", ")", ")", "self", ".", "n_components_", "=", "n_components", "self", ".", "explained_variance_", "=", "explained_variance_", "[", ":", "n_components", "]", "self", ".", "explained_variance_ratio_", "=", "explained_variance_ratio_", "[", ":", "n_components", "]", "self", ".", "singular_values_", "=", "singular_values_", "[", ":", "n_components", "]", "return", "U", ",", "S", ",", "V" ]
Fit the model by computing full SVD on X
[ "Fit", "the", "model", "by", "computing", "full", "SVD", "on", "X" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch04_sklearn_pca_source.py#L136-L186
231,459
totalgood/nlpia
src/nlpia/clean_alice.py
extract_aiml
def extract_aiml(path='aiml-en-us-foundation-alice.v1-9'): """ Extract an aiml.zip file if it hasn't been already and return a list of aiml file paths """ path = find_data_path(path) or path if os.path.isdir(path): paths = os.listdir(path) paths = [os.path.join(path, p) for p in paths] else: zf = zipfile.ZipFile(path) paths = [] for name in zf.namelist(): if '.hg/' in name: continue paths.append(zf.extract(name, path=BIGDATA_PATH)) return paths
python
def extract_aiml(path='aiml-en-us-foundation-alice.v1-9'): """ Extract an aiml.zip file if it hasn't been already and return a list of aiml file paths """ path = find_data_path(path) or path if os.path.isdir(path): paths = os.listdir(path) paths = [os.path.join(path, p) for p in paths] else: zf = zipfile.ZipFile(path) paths = [] for name in zf.namelist(): if '.hg/' in name: continue paths.append(zf.extract(name, path=BIGDATA_PATH)) return paths
[ "def", "extract_aiml", "(", "path", "=", "'aiml-en-us-foundation-alice.v1-9'", ")", ":", "path", "=", "find_data_path", "(", "path", ")", "or", "path", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "paths", "=", "os", ".", "listdir", "(", "path", ")", "paths", "=", "[", "os", ".", "path", ".", "join", "(", "path", ",", "p", ")", "for", "p", "in", "paths", "]", "else", ":", "zf", "=", "zipfile", ".", "ZipFile", "(", "path", ")", "paths", "=", "[", "]", "for", "name", "in", "zf", ".", "namelist", "(", ")", ":", "if", "'.hg/'", "in", "name", ":", "continue", "paths", ".", "append", "(", "zf", ".", "extract", "(", "name", ",", "path", "=", "BIGDATA_PATH", ")", ")", "return", "paths" ]
Extract an aiml.zip file if it hasn't been already and return a list of aiml file paths
[ "Extract", "an", "aiml", ".", "zip", "file", "if", "it", "hasn", "t", "been", "already", "and", "return", "a", "list", "of", "aiml", "file", "paths" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/clean_alice.py#L85-L98
231,460
totalgood/nlpia
src/nlpia/clean_alice.py
create_brain
def create_brain(path='aiml-en-us-foundation-alice.v1-9.zip'): """ Create an aiml_bot.Bot brain from an AIML zip file or directory of AIML files """ path = find_data_path(path) or path bot = Bot() num_templates = bot._brain.template_count paths = extract_aiml(path=path) for path in paths: if not path.lower().endswith('.aiml'): continue try: bot.learn(path) except AimlParserError: logger.error(format_exc()) logger.warning('AIML Parse Error: {}'.format(path)) num_templates = bot._brain.template_count - num_templates logger.info('Loaded {} trigger-response pairs.\n'.format(num_templates)) print('Loaded {} trigger-response pairs from {} AIML files.'.format(bot._brain.template_count, len(paths))) return bot
python
def create_brain(path='aiml-en-us-foundation-alice.v1-9.zip'): """ Create an aiml_bot.Bot brain from an AIML zip file or directory of AIML files """ path = find_data_path(path) or path bot = Bot() num_templates = bot._brain.template_count paths = extract_aiml(path=path) for path in paths: if not path.lower().endswith('.aiml'): continue try: bot.learn(path) except AimlParserError: logger.error(format_exc()) logger.warning('AIML Parse Error: {}'.format(path)) num_templates = bot._brain.template_count - num_templates logger.info('Loaded {} trigger-response pairs.\n'.format(num_templates)) print('Loaded {} trigger-response pairs from {} AIML files.'.format(bot._brain.template_count, len(paths))) return bot
[ "def", "create_brain", "(", "path", "=", "'aiml-en-us-foundation-alice.v1-9.zip'", ")", ":", "path", "=", "find_data_path", "(", "path", ")", "or", "path", "bot", "=", "Bot", "(", ")", "num_templates", "=", "bot", ".", "_brain", ".", "template_count", "paths", "=", "extract_aiml", "(", "path", "=", "path", ")", "for", "path", "in", "paths", ":", "if", "not", "path", ".", "lower", "(", ")", ".", "endswith", "(", "'.aiml'", ")", ":", "continue", "try", ":", "bot", ".", "learn", "(", "path", ")", "except", "AimlParserError", ":", "logger", ".", "error", "(", "format_exc", "(", ")", ")", "logger", ".", "warning", "(", "'AIML Parse Error: {}'", ".", "format", "(", "path", ")", ")", "num_templates", "=", "bot", ".", "_brain", ".", "template_count", "-", "num_templates", "logger", ".", "info", "(", "'Loaded {} trigger-response pairs.\\n'", ".", "format", "(", "num_templates", ")", ")", "print", "(", "'Loaded {} trigger-response pairs from {} AIML files.'", ".", "format", "(", "bot", ".", "_brain", ".", "template_count", ",", "len", "(", "paths", ")", ")", ")", "return", "bot" ]
Create an aiml_bot.Bot brain from an AIML zip file or directory of AIML files
[ "Create", "an", "aiml_bot", ".", "Bot", "brain", "from", "an", "AIML", "zip", "file", "or", "directory", "of", "AIML", "files" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/clean_alice.py#L101-L119
231,461
totalgood/nlpia
src/nlpia/transcoders.py
minify_urls
def minify_urls(filepath, ext='asc', url_regex=None, output_ext='.urls_minified', access_token=None): """ Use bitly or similar minifier to shrink all URLs in text files within a folder structure. Used for the NLPIA manuscript directory for Manning Publishing bitly API: https://dev.bitly.com/links.html Args: path (str): Directory or file path ext (str): File name extension to filter text files by. default='.asc' output_ext (str): Extension to append to filenames of altered files default='' (in-place replacement of URLs) FIXME: NotImplementedError! Untested! """ access_token = access_token or secrets.bitly.access_token output_ext = output_ext or '' url_regex = regex.compile(url_regex) if isinstance(url_regex, str) else url_regex filemetas = [] for filemeta in find_files(filepath, ext=ext): filemetas += [filemeta] altered_text = '' with open(filemeta['path'], 'rt') as fin: text = fin.read() end = 0 for match in url_regex.finditer(text): url = match.group() start = match.start() altered_text += text[:start] resp = requests.get('https://api-ssl.bitly.com/v3/shorten?access_token={}&longUrl={}'.format( access_token, url), allow_redirects=True, timeout=5) js = resp.json() short_url = js['shortUrl'] altered_text += short_url end = start + len(url) altered_text += text[end:] with open(filemeta['path'] + (output_ext or ''), 'wt') as fout: fout.write(altered_text) return altered_text
python
def minify_urls(filepath, ext='asc', url_regex=None, output_ext='.urls_minified', access_token=None): """ Use bitly or similar minifier to shrink all URLs in text files within a folder structure. Used for the NLPIA manuscript directory for Manning Publishing bitly API: https://dev.bitly.com/links.html Args: path (str): Directory or file path ext (str): File name extension to filter text files by. default='.asc' output_ext (str): Extension to append to filenames of altered files default='' (in-place replacement of URLs) FIXME: NotImplementedError! Untested! """ access_token = access_token or secrets.bitly.access_token output_ext = output_ext or '' url_regex = regex.compile(url_regex) if isinstance(url_regex, str) else url_regex filemetas = [] for filemeta in find_files(filepath, ext=ext): filemetas += [filemeta] altered_text = '' with open(filemeta['path'], 'rt') as fin: text = fin.read() end = 0 for match in url_regex.finditer(text): url = match.group() start = match.start() altered_text += text[:start] resp = requests.get('https://api-ssl.bitly.com/v3/shorten?access_token={}&longUrl={}'.format( access_token, url), allow_redirects=True, timeout=5) js = resp.json() short_url = js['shortUrl'] altered_text += short_url end = start + len(url) altered_text += text[end:] with open(filemeta['path'] + (output_ext or ''), 'wt') as fout: fout.write(altered_text) return altered_text
[ "def", "minify_urls", "(", "filepath", ",", "ext", "=", "'asc'", ",", "url_regex", "=", "None", ",", "output_ext", "=", "'.urls_minified'", ",", "access_token", "=", "None", ")", ":", "access_token", "=", "access_token", "or", "secrets", ".", "bitly", ".", "access_token", "output_ext", "=", "output_ext", "or", "''", "url_regex", "=", "regex", ".", "compile", "(", "url_regex", ")", "if", "isinstance", "(", "url_regex", ",", "str", ")", "else", "url_regex", "filemetas", "=", "[", "]", "for", "filemeta", "in", "find_files", "(", "filepath", ",", "ext", "=", "ext", ")", ":", "filemetas", "+=", "[", "filemeta", "]", "altered_text", "=", "''", "with", "open", "(", "filemeta", "[", "'path'", "]", ",", "'rt'", ")", "as", "fin", ":", "text", "=", "fin", ".", "read", "(", ")", "end", "=", "0", "for", "match", "in", "url_regex", ".", "finditer", "(", "text", ")", ":", "url", "=", "match", ".", "group", "(", ")", "start", "=", "match", ".", "start", "(", ")", "altered_text", "+=", "text", "[", ":", "start", "]", "resp", "=", "requests", ".", "get", "(", "'https://api-ssl.bitly.com/v3/shorten?access_token={}&longUrl={}'", ".", "format", "(", "access_token", ",", "url", ")", ",", "allow_redirects", "=", "True", ",", "timeout", "=", "5", ")", "js", "=", "resp", ".", "json", "(", ")", "short_url", "=", "js", "[", "'shortUrl'", "]", "altered_text", "+=", "short_url", "end", "=", "start", "+", "len", "(", "url", ")", "altered_text", "+=", "text", "[", "end", ":", "]", "with", "open", "(", "filemeta", "[", "'path'", "]", "+", "(", "output_ext", "or", "''", ")", ",", "'wt'", ")", "as", "fout", ":", "fout", ".", "write", "(", "altered_text", ")", "return", "altered_text" ]
Use bitly or similar minifier to shrink all URLs in text files within a folder structure. Used for the NLPIA manuscript directory for Manning Publishing bitly API: https://dev.bitly.com/links.html Args: path (str): Directory or file path ext (str): File name extension to filter text files by. default='.asc' output_ext (str): Extension to append to filenames of altered files default='' (in-place replacement of URLs) FIXME: NotImplementedError! Untested!
[ "Use", "bitly", "or", "similar", "minifier", "to", "shrink", "all", "URLs", "in", "text", "files", "within", "a", "folder", "structure", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/transcoders.py#L22-L59
231,462
totalgood/nlpia
src/nlpia/transcoders.py
delimit_slug
def delimit_slug(slug, sep=' '): """ Return a str of separated tokens found within a slugLike_This => 'slug Like This' >>> delimit_slug("slugLike_ThisW/aTLA's") 'slug Like This W a TLA s' >>> delimit_slug('slugLike_ThisW/aTLA', '|') 'slug|Like|This|W|a|TLA' """ hyphenated_slug = re.sub(CRE_SLUG_DELIMITTER, sep, slug) return hyphenated_slug
python
def delimit_slug(slug, sep=' '): """ Return a str of separated tokens found within a slugLike_This => 'slug Like This' >>> delimit_slug("slugLike_ThisW/aTLA's") 'slug Like This W a TLA s' >>> delimit_slug('slugLike_ThisW/aTLA', '|') 'slug|Like|This|W|a|TLA' """ hyphenated_slug = re.sub(CRE_SLUG_DELIMITTER, sep, slug) return hyphenated_slug
[ "def", "delimit_slug", "(", "slug", ",", "sep", "=", "' '", ")", ":", "hyphenated_slug", "=", "re", ".", "sub", "(", "CRE_SLUG_DELIMITTER", ",", "sep", ",", "slug", ")", "return", "hyphenated_slug" ]
Return a str of separated tokens found within a slugLike_This => 'slug Like This' >>> delimit_slug("slugLike_ThisW/aTLA's") 'slug Like This W a TLA s' >>> delimit_slug('slugLike_ThisW/aTLA', '|') 'slug|Like|This|W|a|TLA'
[ "Return", "a", "str", "of", "separated", "tokens", "found", "within", "a", "slugLike_This", "=", ">", "slug", "Like", "This" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/transcoders.py#L62-L71
231,463
totalgood/nlpia
src/nlpia/transcoders.py
clean_asciidoc
def clean_asciidoc(text): r""" Transform asciidoc text into ASCII text that NL parsers can handle TODO: Tag lines and words with meta data like italics, underlined, bold, title, heading 1, etc >>> clean_asciidoc('**Hello** _world_!') '"Hello" "world"!' """ text = re.sub(r'(\b|^)[\[_*]{1,2}([a-zA-Z0-9])', r'"\2', text) text = re.sub(r'([a-zA-Z0-9])[\]_*]{1,2}', r'\1"', text) return text
python
def clean_asciidoc(text): r""" Transform asciidoc text into ASCII text that NL parsers can handle TODO: Tag lines and words with meta data like italics, underlined, bold, title, heading 1, etc >>> clean_asciidoc('**Hello** _world_!') '"Hello" "world"!' """ text = re.sub(r'(\b|^)[\[_*]{1,2}([a-zA-Z0-9])', r'"\2', text) text = re.sub(r'([a-zA-Z0-9])[\]_*]{1,2}', r'\1"', text) return text
[ "def", "clean_asciidoc", "(", "text", ")", ":", "text", "=", "re", ".", "sub", "(", "r'(\\b|^)[\\[_*]{1,2}([a-zA-Z0-9])'", ",", "r'\"\\2'", ",", "text", ")", "text", "=", "re", ".", "sub", "(", "r'([a-zA-Z0-9])[\\]_*]{1,2}'", ",", "r'\\1\"'", ",", "text", ")", "return", "text" ]
r""" Transform asciidoc text into ASCII text that NL parsers can handle TODO: Tag lines and words with meta data like italics, underlined, bold, title, heading 1, etc >>> clean_asciidoc('**Hello** _world_!') '"Hello" "world"!'
[ "r", "Transform", "asciidoc", "text", "into", "ASCII", "text", "that", "NL", "parsers", "can", "handle" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/transcoders.py#L121-L132
231,464
totalgood/nlpia
src/nlpia/transcoders.py
split_sentences_regex
def split_sentences_regex(text): """ Use dead-simple regex to split text into sentences. Very poor accuracy. >>> split_sentences_regex("Hello World. I'm I.B.M.'s Watson. --Watson") ['Hello World.', "I'm I.B.M.'s Watson.", '--Watson'] """ parts = regex.split(r'([a-zA-Z0-9][.?!])[\s$]', text) sentences = [''.join(s) for s in zip(parts[0::2], parts[1::2])] return sentences + [parts[-1]] if len(parts) % 2 else sentences
python
def split_sentences_regex(text): """ Use dead-simple regex to split text into sentences. Very poor accuracy. >>> split_sentences_regex("Hello World. I'm I.B.M.'s Watson. --Watson") ['Hello World.', "I'm I.B.M.'s Watson.", '--Watson'] """ parts = regex.split(r'([a-zA-Z0-9][.?!])[\s$]', text) sentences = [''.join(s) for s in zip(parts[0::2], parts[1::2])] return sentences + [parts[-1]] if len(parts) % 2 else sentences
[ "def", "split_sentences_regex", "(", "text", ")", ":", "parts", "=", "regex", ".", "split", "(", "r'([a-zA-Z0-9][.?!])[\\s$]'", ",", "text", ")", "sentences", "=", "[", "''", ".", "join", "(", "s", ")", "for", "s", "in", "zip", "(", "parts", "[", "0", ":", ":", "2", "]", ",", "parts", "[", "1", ":", ":", "2", "]", ")", "]", "return", "sentences", "+", "[", "parts", "[", "-", "1", "]", "]", "if", "len", "(", "parts", ")", "%", "2", "else", "sentences" ]
Use dead-simple regex to split text into sentences. Very poor accuracy. >>> split_sentences_regex("Hello World. I'm I.B.M.'s Watson. --Watson") ['Hello World.', "I'm I.B.M.'s Watson.", '--Watson']
[ "Use", "dead", "-", "simple", "regex", "to", "split", "text", "into", "sentences", ".", "Very", "poor", "accuracy", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/transcoders.py#L157-L165
231,465
totalgood/nlpia
src/nlpia/transcoders.py
split_sentences_spacy
def split_sentences_spacy(text, language_model='en'): r""" You must download a spacy language model with python -m download 'en' The default English language model for spacy tends to be a lot more agressive than NLTK's punkt: >>> split_sentences_nltk("Hi Ms. Lovelace.\nI'm a wanna-\nbe human @ I.B.M. ;) --Watson 2.0") ['Hi Ms. Lovelace.', "I'm a wanna-\nbe human @ I.B.M.", ';) --Watson 2.0'] >>> split_sentences_spacy("Hi Ms. Lovelace.\nI'm a wanna-\nbe human @ I.B.M. ;) --Watson 2.0") ['Hi Ms. Lovelace.', "I'm a wanna-", 'be human @', 'I.B.M. ;) --Watson 2.0'] >>> split_sentences_spacy("Hi Ms. Lovelace. I'm at I.B.M. --Watson 2.0") ['Hi Ms. Lovelace.', "I'm at I.B.M. --Watson 2.0"] >>> split_sentences_nltk("Hi Ms. Lovelace. I'm at I.B.M. --Watson 2.0") ['Hi Ms. Lovelace.', "I'm at I.B.M.", '--Watson 2.0'] """ doc = nlp(text) sentences = [] if not hasattr(doc, 'sents'): logger.warning("Using NLTK sentence tokenizer because SpaCy language model hasn't been loaded") return split_sentences_nltk(text) for w, span in enumerate(doc.sents): sent = ''.join(doc[i].string for i in range(span.start, span.end)).strip() if len(sent): sentences.append(sent) return sentences
python
def split_sentences_spacy(text, language_model='en'): r""" You must download a spacy language model with python -m download 'en' The default English language model for spacy tends to be a lot more agressive than NLTK's punkt: >>> split_sentences_nltk("Hi Ms. Lovelace.\nI'm a wanna-\nbe human @ I.B.M. ;) --Watson 2.0") ['Hi Ms. Lovelace.', "I'm a wanna-\nbe human @ I.B.M.", ';) --Watson 2.0'] >>> split_sentences_spacy("Hi Ms. Lovelace.\nI'm a wanna-\nbe human @ I.B.M. ;) --Watson 2.0") ['Hi Ms. Lovelace.', "I'm a wanna-", 'be human @', 'I.B.M. ;) --Watson 2.0'] >>> split_sentences_spacy("Hi Ms. Lovelace. I'm at I.B.M. --Watson 2.0") ['Hi Ms. Lovelace.', "I'm at I.B.M. --Watson 2.0"] >>> split_sentences_nltk("Hi Ms. Lovelace. I'm at I.B.M. --Watson 2.0") ['Hi Ms. Lovelace.', "I'm at I.B.M.", '--Watson 2.0'] """ doc = nlp(text) sentences = [] if not hasattr(doc, 'sents'): logger.warning("Using NLTK sentence tokenizer because SpaCy language model hasn't been loaded") return split_sentences_nltk(text) for w, span in enumerate(doc.sents): sent = ''.join(doc[i].string for i in range(span.start, span.end)).strip() if len(sent): sentences.append(sent) return sentences
[ "def", "split_sentences_spacy", "(", "text", ",", "language_model", "=", "'en'", ")", ":", "doc", "=", "nlp", "(", "text", ")", "sentences", "=", "[", "]", "if", "not", "hasattr", "(", "doc", ",", "'sents'", ")", ":", "logger", ".", "warning", "(", "\"Using NLTK sentence tokenizer because SpaCy language model hasn't been loaded\"", ")", "return", "split_sentences_nltk", "(", "text", ")", "for", "w", ",", "span", "in", "enumerate", "(", "doc", ".", "sents", ")", ":", "sent", "=", "''", ".", "join", "(", "doc", "[", "i", "]", ".", "string", "for", "i", "in", "range", "(", "span", ".", "start", ",", "span", ".", "end", ")", ")", ".", "strip", "(", ")", "if", "len", "(", "sent", ")", ":", "sentences", ".", "append", "(", "sent", ")", "return", "sentences" ]
r""" You must download a spacy language model with python -m download 'en' The default English language model for spacy tends to be a lot more agressive than NLTK's punkt: >>> split_sentences_nltk("Hi Ms. Lovelace.\nI'm a wanna-\nbe human @ I.B.M. ;) --Watson 2.0") ['Hi Ms. Lovelace.', "I'm a wanna-\nbe human @ I.B.M.", ';) --Watson 2.0'] >>> split_sentences_spacy("Hi Ms. Lovelace.\nI'm a wanna-\nbe human @ I.B.M. ;) --Watson 2.0") ['Hi Ms. Lovelace.', "I'm a wanna-", 'be human @', 'I.B.M. ;) --Watson 2.0'] >>> split_sentences_spacy("Hi Ms. Lovelace. I'm at I.B.M. --Watson 2.0") ['Hi Ms. Lovelace.', "I'm at I.B.M. --Watson 2.0"] >>> split_sentences_nltk("Hi Ms. Lovelace. I'm at I.B.M. --Watson 2.0") ['Hi Ms. Lovelace.', "I'm at I.B.M.", '--Watson 2.0']
[ "r", "You", "must", "download", "a", "spacy", "language", "model", "with", "python", "-", "m", "download", "en" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/transcoders.py#L168-L192
231,466
totalgood/nlpia
src/nlpia/transcoders.py
segment_sentences
def segment_sentences(path=os.path.join(DATA_PATH, 'book'), splitter=split_sentences_nltk, **find_files_kwargs): """ Return a list of all sentences and empty lines. TODO: 1. process each line with an aggressive sentence segmenter, like DetectorMorse 2. process our manuscript to create a complete-sentence and heading training set normalized/simplified syntax net tree is the input feature set common words and N-grams inserted with their label as additional feature 3. process a training set with a grammar checker and syntax to bootstrap a "complete sentence" labeler. 4. process each 1-3 line window (breaking on empty lines) with syntax net to label them 5. label each 1-3-line window of lines as "complete sentence, partial sentence/phrase, or multi-sentence" >>> 10000 > len(segment_sentences(path=os.path.join(DATA_PATH, 'book'))) >= 4 True >>> len(segment_sentences(path=os.path.join(DATA_PATH, 'psychology-scripts.txt'), splitter=split_sentences_nltk)) 23 """ sentences = [] if os.path.isdir(path): for filemeta in find_files(path, **find_files_kwargs): with open(filemeta['path']) as fin: i, batch = 0, [] try: for i, line in enumerate(fin): if not line.strip(): sentences.extend(splitter('\n'.join(batch))) batch = [line] # may contain all whitespace else: batch.append(line) except (UnicodeDecodeError, IOError): logger.error('UnicodeDecodeError or IOError on line {} in file {} from stat: {}'.format( i + 1, fin.name, filemeta)) raise if len(batch): # TODO: tag sentences with line + filename where they started sentences.extend(splitter('\n'.join(batch))) else: batch = [] for i, line in enumerate(iter_lines(path)): # TODO: filter out code and meta lines using asciidoc or markdown parser # split into batches based on empty lines if not line.strip(): sentences.extend(splitter('\n'.join(batch))) # first line may contain all whitespace batch = [line] else: batch.append(line) if len(batch): # TODO: tag sentences with line + filename where they started sentences.extend(splitter('\n'.join(batch))) return sentences
python
def segment_sentences(path=os.path.join(DATA_PATH, 'book'), splitter=split_sentences_nltk, **find_files_kwargs): """ Return a list of all sentences and empty lines. TODO: 1. process each line with an aggressive sentence segmenter, like DetectorMorse 2. process our manuscript to create a complete-sentence and heading training set normalized/simplified syntax net tree is the input feature set common words and N-grams inserted with their label as additional feature 3. process a training set with a grammar checker and syntax to bootstrap a "complete sentence" labeler. 4. process each 1-3 line window (breaking on empty lines) with syntax net to label them 5. label each 1-3-line window of lines as "complete sentence, partial sentence/phrase, or multi-sentence" >>> 10000 > len(segment_sentences(path=os.path.join(DATA_PATH, 'book'))) >= 4 True >>> len(segment_sentences(path=os.path.join(DATA_PATH, 'psychology-scripts.txt'), splitter=split_sentences_nltk)) 23 """ sentences = [] if os.path.isdir(path): for filemeta in find_files(path, **find_files_kwargs): with open(filemeta['path']) as fin: i, batch = 0, [] try: for i, line in enumerate(fin): if not line.strip(): sentences.extend(splitter('\n'.join(batch))) batch = [line] # may contain all whitespace else: batch.append(line) except (UnicodeDecodeError, IOError): logger.error('UnicodeDecodeError or IOError on line {} in file {} from stat: {}'.format( i + 1, fin.name, filemeta)) raise if len(batch): # TODO: tag sentences with line + filename where they started sentences.extend(splitter('\n'.join(batch))) else: batch = [] for i, line in enumerate(iter_lines(path)): # TODO: filter out code and meta lines using asciidoc or markdown parser # split into batches based on empty lines if not line.strip(): sentences.extend(splitter('\n'.join(batch))) # first line may contain all whitespace batch = [line] else: batch.append(line) if len(batch): # TODO: tag sentences with line + filename where they started sentences.extend(splitter('\n'.join(batch))) return sentences
[ "def", "segment_sentences", "(", "path", "=", "os", ".", "path", ".", "join", "(", "DATA_PATH", ",", "'book'", ")", ",", "splitter", "=", "split_sentences_nltk", ",", "*", "*", "find_files_kwargs", ")", ":", "sentences", "=", "[", "]", "if", "os", ".", "path", ".", "isdir", "(", "path", ")", ":", "for", "filemeta", "in", "find_files", "(", "path", ",", "*", "*", "find_files_kwargs", ")", ":", "with", "open", "(", "filemeta", "[", "'path'", "]", ")", "as", "fin", ":", "i", ",", "batch", "=", "0", ",", "[", "]", "try", ":", "for", "i", ",", "line", "in", "enumerate", "(", "fin", ")", ":", "if", "not", "line", ".", "strip", "(", ")", ":", "sentences", ".", "extend", "(", "splitter", "(", "'\\n'", ".", "join", "(", "batch", ")", ")", ")", "batch", "=", "[", "line", "]", "# may contain all whitespace", "else", ":", "batch", ".", "append", "(", "line", ")", "except", "(", "UnicodeDecodeError", ",", "IOError", ")", ":", "logger", ".", "error", "(", "'UnicodeDecodeError or IOError on line {} in file {} from stat: {}'", ".", "format", "(", "i", "+", "1", ",", "fin", ".", "name", ",", "filemeta", ")", ")", "raise", "if", "len", "(", "batch", ")", ":", "# TODO: tag sentences with line + filename where they started", "sentences", ".", "extend", "(", "splitter", "(", "'\\n'", ".", "join", "(", "batch", ")", ")", ")", "else", ":", "batch", "=", "[", "]", "for", "i", ",", "line", "in", "enumerate", "(", "iter_lines", "(", "path", ")", ")", ":", "# TODO: filter out code and meta lines using asciidoc or markdown parser", "# split into batches based on empty lines", "if", "not", "line", ".", "strip", "(", ")", ":", "sentences", ".", "extend", "(", "splitter", "(", "'\\n'", ".", "join", "(", "batch", ")", ")", ")", "# first line may contain all whitespace", "batch", "=", "[", "line", "]", "else", ":", "batch", ".", "append", "(", "line", ")", "if", "len", "(", "batch", ")", ":", "# TODO: tag sentences with line + filename where they started", "sentences", ".", "extend", "(", "splitter", "(", "'\\n'", ".", "join", "(", "batch", ")", ")", ")", "return", "sentences" ]
Return a list of all sentences and empty lines. TODO: 1. process each line with an aggressive sentence segmenter, like DetectorMorse 2. process our manuscript to create a complete-sentence and heading training set normalized/simplified syntax net tree is the input feature set common words and N-grams inserted with their label as additional feature 3. process a training set with a grammar checker and syntax to bootstrap a "complete sentence" labeler. 4. process each 1-3 line window (breaking on empty lines) with syntax net to label them 5. label each 1-3-line window of lines as "complete sentence, partial sentence/phrase, or multi-sentence" >>> 10000 > len(segment_sentences(path=os.path.join(DATA_PATH, 'book'))) >= 4 True >>> len(segment_sentences(path=os.path.join(DATA_PATH, 'psychology-scripts.txt'), splitter=split_sentences_nltk)) 23
[ "Return", "a", "list", "of", "all", "sentences", "and", "empty", "lines", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/transcoders.py#L216-L267
231,467
totalgood/nlpia
src/nlpia/transcoders.py
fix_hunspell_json
def fix_hunspell_json(badjson_path='en_us.json', goodjson_path='en_us_fixed.json'): """Fix the invalid hunspellToJSON.py json format by inserting double-quotes in list of affix strings Args: badjson_path (str): path to input json file that doesn't properly quote goodjson_path (str): path to output json file with properly quoted strings in list of affixes Returns: list of all words with all possible affixes in *.txt format (simplified .dic format) References: Syed Faisal Ali 's Hunspell dic parser: https://github.com/SyedFaisalAli/HunspellToJSON """ with open(badjson_path, 'r') as fin: with open(goodjson_path, 'w') as fout: for i, line in enumerate(fin): line2 = regex.sub(r'\[(\w)', r'["\1', line) line2 = regex.sub(r'(\w)\]', r'\1"]', line2) line2 = regex.sub(r'(\w),(\w)', r'\1","\2', line2) fout.write(line2) with open(goodjson_path, 'r') as fin: words = [] with open(goodjson_path + '.txt', 'w') as fout: hunspell = json.load(fin) for word, affixes in hunspell['words'].items(): words += [word] fout.write(word + '\n') for affix in affixes: words += [affix] fout.write(affix + '\n') return words
python
def fix_hunspell_json(badjson_path='en_us.json', goodjson_path='en_us_fixed.json'): """Fix the invalid hunspellToJSON.py json format by inserting double-quotes in list of affix strings Args: badjson_path (str): path to input json file that doesn't properly quote goodjson_path (str): path to output json file with properly quoted strings in list of affixes Returns: list of all words with all possible affixes in *.txt format (simplified .dic format) References: Syed Faisal Ali 's Hunspell dic parser: https://github.com/SyedFaisalAli/HunspellToJSON """ with open(badjson_path, 'r') as fin: with open(goodjson_path, 'w') as fout: for i, line in enumerate(fin): line2 = regex.sub(r'\[(\w)', r'["\1', line) line2 = regex.sub(r'(\w)\]', r'\1"]', line2) line2 = regex.sub(r'(\w),(\w)', r'\1","\2', line2) fout.write(line2) with open(goodjson_path, 'r') as fin: words = [] with open(goodjson_path + '.txt', 'w') as fout: hunspell = json.load(fin) for word, affixes in hunspell['words'].items(): words += [word] fout.write(word + '\n') for affix in affixes: words += [affix] fout.write(affix + '\n') return words
[ "def", "fix_hunspell_json", "(", "badjson_path", "=", "'en_us.json'", ",", "goodjson_path", "=", "'en_us_fixed.json'", ")", ":", "with", "open", "(", "badjson_path", ",", "'r'", ")", "as", "fin", ":", "with", "open", "(", "goodjson_path", ",", "'w'", ")", "as", "fout", ":", "for", "i", ",", "line", "in", "enumerate", "(", "fin", ")", ":", "line2", "=", "regex", ".", "sub", "(", "r'\\[(\\w)'", ",", "r'[\"\\1'", ",", "line", ")", "line2", "=", "regex", ".", "sub", "(", "r'(\\w)\\]'", ",", "r'\\1\"]'", ",", "line2", ")", "line2", "=", "regex", ".", "sub", "(", "r'(\\w),(\\w)'", ",", "r'\\1\",\"\\2'", ",", "line2", ")", "fout", ".", "write", "(", "line2", ")", "with", "open", "(", "goodjson_path", ",", "'r'", ")", "as", "fin", ":", "words", "=", "[", "]", "with", "open", "(", "goodjson_path", "+", "'.txt'", ",", "'w'", ")", "as", "fout", ":", "hunspell", "=", "json", ".", "load", "(", "fin", ")", "for", "word", ",", "affixes", "in", "hunspell", "[", "'words'", "]", ".", "items", "(", ")", ":", "words", "+=", "[", "word", "]", "fout", ".", "write", "(", "word", "+", "'\\n'", ")", "for", "affix", "in", "affixes", ":", "words", "+=", "[", "affix", "]", "fout", ".", "write", "(", "affix", "+", "'\\n'", ")", "return", "words" ]
Fix the invalid hunspellToJSON.py json format by inserting double-quotes in list of affix strings Args: badjson_path (str): path to input json file that doesn't properly quote goodjson_path (str): path to output json file with properly quoted strings in list of affixes Returns: list of all words with all possible affixes in *.txt format (simplified .dic format) References: Syed Faisal Ali 's Hunspell dic parser: https://github.com/SyedFaisalAli/HunspellToJSON
[ "Fix", "the", "invalid", "hunspellToJSON", ".", "py", "json", "format", "by", "inserting", "double", "-", "quotes", "in", "list", "of", "affix", "strings" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/transcoders.py#L295-L327
231,468
totalgood/nlpia
src/nlpia/book/examples/ch12_retrieval.py
format_ubuntu_dialog
def format_ubuntu_dialog(df): """ Print statements paired with replies, formatted for easy review """ s = '' for i, record in df.iterrows(): statement = list(split_turns(record.Context))[-1] # <1> reply = list(split_turns(record.Utterance))[-1] # <2> s += 'Statement: {}\n'.format(statement) s += 'Reply: {}\n\n'.format(reply) return s
python
def format_ubuntu_dialog(df): """ Print statements paired with replies, formatted for easy review """ s = '' for i, record in df.iterrows(): statement = list(split_turns(record.Context))[-1] # <1> reply = list(split_turns(record.Utterance))[-1] # <2> s += 'Statement: {}\n'.format(statement) s += 'Reply: {}\n\n'.format(reply) return s
[ "def", "format_ubuntu_dialog", "(", "df", ")", ":", "s", "=", "''", "for", "i", ",", "record", "in", "df", ".", "iterrows", "(", ")", ":", "statement", "=", "list", "(", "split_turns", "(", "record", ".", "Context", ")", ")", "[", "-", "1", "]", "# <1>", "reply", "=", "list", "(", "split_turns", "(", "record", ".", "Utterance", ")", ")", "[", "-", "1", "]", "# <2>", "s", "+=", "'Statement: {}\\n'", ".", "format", "(", "statement", ")", "s", "+=", "'Reply: {}\\n\\n'", ".", "format", "(", "reply", ")", "return", "s" ]
Print statements paired with replies, formatted for easy review
[ "Print", "statements", "paired", "with", "replies", "formatted", "for", "easy", "review" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch12_retrieval.py#L40-L48
231,469
totalgood/nlpia
src/nlpia/regexes.py
splitext
def splitext(filepath): """ Like os.path.splitext except splits compound extensions as one long one >>> splitext('~/.bashrc.asciidoc.ext.ps4.42') ('~/.bashrc', '.asciidoc.ext.ps4.42') >>> splitext('~/.bash_profile') ('~/.bash_profile', '') """ exts = getattr(CRE_FILENAME_EXT.search(filepath), 'group', str)() return (filepath[:(-len(exts) or None)], exts)
python
def splitext(filepath): """ Like os.path.splitext except splits compound extensions as one long one >>> splitext('~/.bashrc.asciidoc.ext.ps4.42') ('~/.bashrc', '.asciidoc.ext.ps4.42') >>> splitext('~/.bash_profile') ('~/.bash_profile', '') """ exts = getattr(CRE_FILENAME_EXT.search(filepath), 'group', str)() return (filepath[:(-len(exts) or None)], exts)
[ "def", "splitext", "(", "filepath", ")", ":", "exts", "=", "getattr", "(", "CRE_FILENAME_EXT", ".", "search", "(", "filepath", ")", ",", "'group'", ",", "str", ")", "(", ")", "return", "(", "filepath", "[", ":", "(", "-", "len", "(", "exts", ")", "or", "None", ")", "]", ",", "exts", ")" ]
Like os.path.splitext except splits compound extensions as one long one >>> splitext('~/.bashrc.asciidoc.ext.ps4.42') ('~/.bashrc', '.asciidoc.ext.ps4.42') >>> splitext('~/.bash_profile') ('~/.bash_profile', '')
[ "Like", "os", ".", "path", ".", "splitext", "except", "splits", "compound", "extensions", "as", "one", "long", "one" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/regexes.py#L109-L118
231,470
totalgood/nlpia
src/nlpia/plots.py
offline_plotly_scatter3d
def offline_plotly_scatter3d(df, x=0, y=1, z=-1): """ Plot an offline scatter plot colored according to the categories in the 'name' column. >> df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/iris.csv') >> offline_plotly(df) """ data = [] # clusters = [] colors = ['rgb(228,26,28)', 'rgb(55,126,184)', 'rgb(77,175,74)'] # df.columns = clean_columns(df.columns) x = get_array(df, x, default=0) y = get_array(df, y, default=1) z = get_array(df, z, default=-1) for i in range(len(df['name'].unique())): name = df['Name'].unique()[i] color = colors[i] x = x[pd.np.array(df['name'] == name)] y = y[pd.np.array(df['name'] == name)] z = z[pd.np.array(df['name'] == name)] trace = dict( name=name, x=x, y=y, z=z, type="scatter3d", mode='markers', marker=dict(size=3, color=color, line=dict(width=0))) data.append(trace) layout = dict( width=800, height=550, autosize=False, title='Iris dataset', scene=dict( xaxis=dict( gridcolor='rgb(255, 255, 255)', zerolinecolor='rgb(255, 255, 255)', showbackground=True, backgroundcolor='rgb(230, 230,230)' ), yaxis=dict( gridcolor='rgb(255, 255, 255)', zerolinecolor='rgb(255, 255, 255)', showbackground=True, backgroundcolor='rgb(230, 230,230)' ), zaxis=dict( gridcolor='rgb(255, 255, 255)', zerolinecolor='rgb(255, 255, 255)', showbackground=True, backgroundcolor='rgb(230, 230,230)' ), aspectratio=dict(x=1, y=1, z=0.7), aspectmode='manual' ), ) fig = dict(data=data, layout=layout) # IPython notebook # plotly.iplot(fig, filename='pandas-3d-iris', validate=False) url = plotly.offline.plot(fig, filename='pandas-3d-iris', validate=False) return url
python
def offline_plotly_scatter3d(df, x=0, y=1, z=-1): """ Plot an offline scatter plot colored according to the categories in the 'name' column. >> df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/iris.csv') >> offline_plotly(df) """ data = [] # clusters = [] colors = ['rgb(228,26,28)', 'rgb(55,126,184)', 'rgb(77,175,74)'] # df.columns = clean_columns(df.columns) x = get_array(df, x, default=0) y = get_array(df, y, default=1) z = get_array(df, z, default=-1) for i in range(len(df['name'].unique())): name = df['Name'].unique()[i] color = colors[i] x = x[pd.np.array(df['name'] == name)] y = y[pd.np.array(df['name'] == name)] z = z[pd.np.array(df['name'] == name)] trace = dict( name=name, x=x, y=y, z=z, type="scatter3d", mode='markers', marker=dict(size=3, color=color, line=dict(width=0))) data.append(trace) layout = dict( width=800, height=550, autosize=False, title='Iris dataset', scene=dict( xaxis=dict( gridcolor='rgb(255, 255, 255)', zerolinecolor='rgb(255, 255, 255)', showbackground=True, backgroundcolor='rgb(230, 230,230)' ), yaxis=dict( gridcolor='rgb(255, 255, 255)', zerolinecolor='rgb(255, 255, 255)', showbackground=True, backgroundcolor='rgb(230, 230,230)' ), zaxis=dict( gridcolor='rgb(255, 255, 255)', zerolinecolor='rgb(255, 255, 255)', showbackground=True, backgroundcolor='rgb(230, 230,230)' ), aspectratio=dict(x=1, y=1, z=0.7), aspectmode='manual' ), ) fig = dict(data=data, layout=layout) # IPython notebook # plotly.iplot(fig, filename='pandas-3d-iris', validate=False) url = plotly.offline.plot(fig, filename='pandas-3d-iris', validate=False) return url
[ "def", "offline_plotly_scatter3d", "(", "df", ",", "x", "=", "0", ",", "y", "=", "1", ",", "z", "=", "-", "1", ")", ":", "data", "=", "[", "]", "# clusters = []", "colors", "=", "[", "'rgb(228,26,28)'", ",", "'rgb(55,126,184)'", ",", "'rgb(77,175,74)'", "]", "# df.columns = clean_columns(df.columns)", "x", "=", "get_array", "(", "df", ",", "x", ",", "default", "=", "0", ")", "y", "=", "get_array", "(", "df", ",", "y", ",", "default", "=", "1", ")", "z", "=", "get_array", "(", "df", ",", "z", ",", "default", "=", "-", "1", ")", "for", "i", "in", "range", "(", "len", "(", "df", "[", "'name'", "]", ".", "unique", "(", ")", ")", ")", ":", "name", "=", "df", "[", "'Name'", "]", ".", "unique", "(", ")", "[", "i", "]", "color", "=", "colors", "[", "i", "]", "x", "=", "x", "[", "pd", ".", "np", ".", "array", "(", "df", "[", "'name'", "]", "==", "name", ")", "]", "y", "=", "y", "[", "pd", ".", "np", ".", "array", "(", "df", "[", "'name'", "]", "==", "name", ")", "]", "z", "=", "z", "[", "pd", ".", "np", ".", "array", "(", "df", "[", "'name'", "]", "==", "name", ")", "]", "trace", "=", "dict", "(", "name", "=", "name", ",", "x", "=", "x", ",", "y", "=", "y", ",", "z", "=", "z", ",", "type", "=", "\"scatter3d\"", ",", "mode", "=", "'markers'", ",", "marker", "=", "dict", "(", "size", "=", "3", ",", "color", "=", "color", ",", "line", "=", "dict", "(", "width", "=", "0", ")", ")", ")", "data", ".", "append", "(", "trace", ")", "layout", "=", "dict", "(", "width", "=", "800", ",", "height", "=", "550", ",", "autosize", "=", "False", ",", "title", "=", "'Iris dataset'", ",", "scene", "=", "dict", "(", "xaxis", "=", "dict", "(", "gridcolor", "=", "'rgb(255, 255, 255)'", ",", "zerolinecolor", "=", "'rgb(255, 255, 255)'", ",", "showbackground", "=", "True", ",", "backgroundcolor", "=", "'rgb(230, 230,230)'", ")", ",", "yaxis", "=", "dict", "(", "gridcolor", "=", "'rgb(255, 255, 255)'", ",", "zerolinecolor", "=", "'rgb(255, 255, 255)'", ",", "showbackground", "=", "True", ",", "backgroundcolor", "=", "'rgb(230, 230,230)'", ")", ",", "zaxis", "=", "dict", "(", "gridcolor", "=", "'rgb(255, 255, 255)'", ",", "zerolinecolor", "=", "'rgb(255, 255, 255)'", ",", "showbackground", "=", "True", ",", "backgroundcolor", "=", "'rgb(230, 230,230)'", ")", ",", "aspectratio", "=", "dict", "(", "x", "=", "1", ",", "y", "=", "1", ",", "z", "=", "0.7", ")", ",", "aspectmode", "=", "'manual'", ")", ",", ")", "fig", "=", "dict", "(", "data", "=", "data", ",", "layout", "=", "layout", ")", "# IPython notebook", "# plotly.iplot(fig, filename='pandas-3d-iris', validate=False)", "url", "=", "plotly", ".", "offline", ".", "plot", "(", "fig", ",", "filename", "=", "'pandas-3d-iris'", ",", "validate", "=", "False", ")", "return", "url" ]
Plot an offline scatter plot colored according to the categories in the 'name' column. >> df = pd.read_csv('https://raw.githubusercontent.com/plotly/datasets/master/iris.csv') >> offline_plotly(df)
[ "Plot", "an", "offline", "scatter", "plot", "colored", "according", "to", "the", "categories", "in", "the", "name", "column", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/plots.py#L107-L172
231,471
totalgood/nlpia
src/nlpia/plots.py
offline_plotly_data
def offline_plotly_data(data, filename=None, config=None, validate=True, default_width='100%', default_height=525, global_requirejs=False): r""" Write a plotly scatter plot to HTML file that doesn't require server >>> from nlpia.loaders import get_data >>> df = get_data('etpinard') # pd.read_csv('https://plot.ly/~etpinard/191.csv') >>> df.columns = [eval(c) if c[0] in '"\'' else str(c) for c in df.columns] >>> data = {'data': [ ... Scatter(x=df[continent+', x'], ... y=df[continent+', y'], ... text=df[continent+', text'], ... marker=Marker(size=df[continent+', size'].fillna(10000), sizemode='area', sizeref=131868,), ... mode='markers', ... name=continent) for continent in ['Africa', 'Americas', 'Asia', 'Europe', 'Oceania'] ... ], ... 'layout': Layout(xaxis=XAxis(title='Life Expectancy'), yaxis=YAxis(title='GDP per Capita', type='log')) ... } >>> html = offline_plotly_data(data, filename=None) """ config_default = dict(DEFAULT_PLOTLY_CONFIG) if config is not None: config_default.update(config) with open(os.path.join(DATA_PATH, 'plotly.js.min'), 'rt') as f: js = f.read() html, divid, width, height = _plot_html( data, config=config_default, validate=validate, default_width=default_width, default_height=default_height, global_requirejs=global_requirejs) html = PLOTLY_HTML.format(plotlyjs=js, plotlyhtml=html) if filename and isinstance(filename, str): with open(filename, 'wt') as f: f.write(html) return html
python
def offline_plotly_data(data, filename=None, config=None, validate=True, default_width='100%', default_height=525, global_requirejs=False): r""" Write a plotly scatter plot to HTML file that doesn't require server >>> from nlpia.loaders import get_data >>> df = get_data('etpinard') # pd.read_csv('https://plot.ly/~etpinard/191.csv') >>> df.columns = [eval(c) if c[0] in '"\'' else str(c) for c in df.columns] >>> data = {'data': [ ... Scatter(x=df[continent+', x'], ... y=df[continent+', y'], ... text=df[continent+', text'], ... marker=Marker(size=df[continent+', size'].fillna(10000), sizemode='area', sizeref=131868,), ... mode='markers', ... name=continent) for continent in ['Africa', 'Americas', 'Asia', 'Europe', 'Oceania'] ... ], ... 'layout': Layout(xaxis=XAxis(title='Life Expectancy'), yaxis=YAxis(title='GDP per Capita', type='log')) ... } >>> html = offline_plotly_data(data, filename=None) """ config_default = dict(DEFAULT_PLOTLY_CONFIG) if config is not None: config_default.update(config) with open(os.path.join(DATA_PATH, 'plotly.js.min'), 'rt') as f: js = f.read() html, divid, width, height = _plot_html( data, config=config_default, validate=validate, default_width=default_width, default_height=default_height, global_requirejs=global_requirejs) html = PLOTLY_HTML.format(plotlyjs=js, plotlyhtml=html) if filename and isinstance(filename, str): with open(filename, 'wt') as f: f.write(html) return html
[ "def", "offline_plotly_data", "(", "data", ",", "filename", "=", "None", ",", "config", "=", "None", ",", "validate", "=", "True", ",", "default_width", "=", "'100%'", ",", "default_height", "=", "525", ",", "global_requirejs", "=", "False", ")", ":", "config_default", "=", "dict", "(", "DEFAULT_PLOTLY_CONFIG", ")", "if", "config", "is", "not", "None", ":", "config_default", ".", "update", "(", "config", ")", "with", "open", "(", "os", ".", "path", ".", "join", "(", "DATA_PATH", ",", "'plotly.js.min'", ")", ",", "'rt'", ")", "as", "f", ":", "js", "=", "f", ".", "read", "(", ")", "html", ",", "divid", ",", "width", ",", "height", "=", "_plot_html", "(", "data", ",", "config", "=", "config_default", ",", "validate", "=", "validate", ",", "default_width", "=", "default_width", ",", "default_height", "=", "default_height", ",", "global_requirejs", "=", "global_requirejs", ")", "html", "=", "PLOTLY_HTML", ".", "format", "(", "plotlyjs", "=", "js", ",", "plotlyhtml", "=", "html", ")", "if", "filename", "and", "isinstance", "(", "filename", ",", "str", ")", ":", "with", "open", "(", "filename", ",", "'wt'", ")", "as", "f", ":", "f", ".", "write", "(", "html", ")", "return", "html" ]
r""" Write a plotly scatter plot to HTML file that doesn't require server >>> from nlpia.loaders import get_data >>> df = get_data('etpinard') # pd.read_csv('https://plot.ly/~etpinard/191.csv') >>> df.columns = [eval(c) if c[0] in '"\'' else str(c) for c in df.columns] >>> data = {'data': [ ... Scatter(x=df[continent+', x'], ... y=df[continent+', y'], ... text=df[continent+', text'], ... marker=Marker(size=df[continent+', size'].fillna(10000), sizemode='area', sizeref=131868,), ... mode='markers', ... name=continent) for continent in ['Africa', 'Americas', 'Asia', 'Europe', 'Oceania'] ... ], ... 'layout': Layout(xaxis=XAxis(title='Life Expectancy'), yaxis=YAxis(title='GDP per Capita', type='log')) ... } >>> html = offline_plotly_data(data, filename=None)
[ "r", "Write", "a", "plotly", "scatter", "plot", "to", "HTML", "file", "that", "doesn", "t", "require", "server" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/plots.py#L189-L223
231,472
totalgood/nlpia
src/nlpia/plots.py
normalize_etpinard_df
def normalize_etpinard_df(df='https://plot.ly/~etpinard/191.csv', columns='x y size text'.split(), category_col='category', possible_categories=['Africa', 'Americas', 'Asia', 'Europe', 'Oceania']): """Reformat a dataframe in etpinard's format for use in plot functions and sklearn models""" possible_categories = ['Africa', 'Americas', 'Asia', 'Europe', 'Oceania'] if possible_categories is None else possible_categories df.columns = clean_columns(df.columns) df = pd.read_csv(df) if isinstance(df, str) else df columns = clean_columns(list(columns)) df2 = pd.DataFrame(columns=columns) df2[category_col] = np.concatenate([np.array([categ] * len(df)) for categ in possible_categories]) columns = zip(columns, [[clean_columns(categ + ', ' + column) for categ in possible_categories] for column in columns]) for col, category_cols in columns: df2[col] = np.concatenate([df[label].values for label in category_cols]) return df2
python
def normalize_etpinard_df(df='https://plot.ly/~etpinard/191.csv', columns='x y size text'.split(), category_col='category', possible_categories=['Africa', 'Americas', 'Asia', 'Europe', 'Oceania']): """Reformat a dataframe in etpinard's format for use in plot functions and sklearn models""" possible_categories = ['Africa', 'Americas', 'Asia', 'Europe', 'Oceania'] if possible_categories is None else possible_categories df.columns = clean_columns(df.columns) df = pd.read_csv(df) if isinstance(df, str) else df columns = clean_columns(list(columns)) df2 = pd.DataFrame(columns=columns) df2[category_col] = np.concatenate([np.array([categ] * len(df)) for categ in possible_categories]) columns = zip(columns, [[clean_columns(categ + ', ' + column) for categ in possible_categories] for column in columns]) for col, category_cols in columns: df2[col] = np.concatenate([df[label].values for label in category_cols]) return df2
[ "def", "normalize_etpinard_df", "(", "df", "=", "'https://plot.ly/~etpinard/191.csv'", ",", "columns", "=", "'x y size text'", ".", "split", "(", ")", ",", "category_col", "=", "'category'", ",", "possible_categories", "=", "[", "'Africa'", ",", "'Americas'", ",", "'Asia'", ",", "'Europe'", ",", "'Oceania'", "]", ")", ":", "possible_categories", "=", "[", "'Africa'", ",", "'Americas'", ",", "'Asia'", ",", "'Europe'", ",", "'Oceania'", "]", "if", "possible_categories", "is", "None", "else", "possible_categories", "df", ".", "columns", "=", "clean_columns", "(", "df", ".", "columns", ")", "df", "=", "pd", ".", "read_csv", "(", "df", ")", "if", "isinstance", "(", "df", ",", "str", ")", "else", "df", "columns", "=", "clean_columns", "(", "list", "(", "columns", ")", ")", "df2", "=", "pd", ".", "DataFrame", "(", "columns", "=", "columns", ")", "df2", "[", "category_col", "]", "=", "np", ".", "concatenate", "(", "[", "np", ".", "array", "(", "[", "categ", "]", "*", "len", "(", "df", ")", ")", "for", "categ", "in", "possible_categories", "]", ")", "columns", "=", "zip", "(", "columns", ",", "[", "[", "clean_columns", "(", "categ", "+", "', '", "+", "column", ")", "for", "categ", "in", "possible_categories", "]", "for", "column", "in", "columns", "]", ")", "for", "col", ",", "category_cols", "in", "columns", ":", "df2", "[", "col", "]", "=", "np", ".", "concatenate", "(", "[", "df", "[", "label", "]", ".", "values", "for", "label", "in", "category_cols", "]", ")", "return", "df2" ]
Reformat a dataframe in etpinard's format for use in plot functions and sklearn models
[ "Reformat", "a", "dataframe", "in", "etpinard", "s", "format", "for", "use", "in", "plot", "functions", "and", "sklearn", "models" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/plots.py#L226-L239
231,473
totalgood/nlpia
src/nlpia/plots.py
offline_plotly_scatter_bubble
def offline_plotly_scatter_bubble(df, x='x', y='y', size_col='size', text_col='text', category_col='category', possible_categories=None, filename=None, config={'displaylogo': False}, xscale=None, yscale='log', layout={'hovermode': 'closest', 'showlegend': False, 'autosize': True}, marker={'sizemode': 'area'}, min_size=10, ): r"""Interactive scatterplot of a DataFrame with the size and color of circles linke to two columns config keys: fillFrame setBackground displaylogo sendData showLink linkText staticPlot scrollZoom plot3dPixelRatio displayModeBar showTips workspace doubleClick autosizable editable layout keys: angularaxis annotations autosize bargap bargroupgap barmode barnorm boxgap boxgroupgap boxmode calendar direction dragmode font geo height hiddenlabels hiddenlabelssrc hidesources hovermode images legend mapbox margin orientation paper_bgcolor plot_bgcolor radialaxis scene separators shapes showlegend sliders smith ternary title titlefont updatemenus width xaxis yaxis marker keys: autocolorscale blend border cauto cmax cmin color colorbar colors colorscale colorsrc colorssrc line maxdisplayed opacity opacitysrc outliercolor reversescale showscale size sizemax sizemin sizemode sizeref sizesrc symbol symbolsrc marker['sizeref'] gives the denominator of the circle scaling factor. Typically it should be about a tenth of the minimum 'size' column value >>> from nlpia.data.loaders import get_data >>> df = get_data('cities_us_wordvectors_pca2_meta').iloc[:100] >>> html = offline_plotly_scatter_bubble( ... df.sort_values('population', ascending=False)[:350].copy().sort_values('population'), ... x='x', y='y', ... size_col='population', text_col='name', category_col='timezone', ... xscale=None, yscale=None, # 'log' or None ... layout={}, marker={'sizeref': 3000}) """ config_default = dict(DEFAULT_PLOTLY_CONFIG) marker_default = { 'size': size_col or min_size, 'sizemode': 'area', 'sizeref': int(df[size_col].min() * .8) if size_col else min_size} marker_default.update(marker) size_col = marker_default.pop('size') layout_default = { 'xaxis': XAxis(title=x, type=xscale), 'yaxis': YAxis(title=y, type=yscale), } layout_default.update(**layout) if config is not None: config_default.update(config) df.columns = clean_columns(df.columns) if possible_categories is None and category_col is not None: if category_col in df.columns: category_labels = df[category_col] else: category_labels = np.array(category_col) possible_categories = list(set(category_labels)) possible_categories = [None] if possible_categories is None else possible_categories if category_col and category_col in df: masks = [np.array(df[category_col] == label) for label in possible_categories] else: masks = [np.array([True] * len(df))] * len(possible_categories) data = {'data': [ Scatter(x=df[x][mask].values, y=df[y][mask].values, text=df[text_col][mask].values, marker=Marker(size=df[size_col][mask] if size_col in df.columns else size_col, **marker_default), mode='markers', name=str(category_name)) for (category_name, mask) in zip(possible_categories, masks) ], 'layout': Layout(**layout_default) } return offline_plotly_data(data, filename=filename, config=config_default)
python
def offline_plotly_scatter_bubble(df, x='x', y='y', size_col='size', text_col='text', category_col='category', possible_categories=None, filename=None, config={'displaylogo': False}, xscale=None, yscale='log', layout={'hovermode': 'closest', 'showlegend': False, 'autosize': True}, marker={'sizemode': 'area'}, min_size=10, ): r"""Interactive scatterplot of a DataFrame with the size and color of circles linke to two columns config keys: fillFrame setBackground displaylogo sendData showLink linkText staticPlot scrollZoom plot3dPixelRatio displayModeBar showTips workspace doubleClick autosizable editable layout keys: angularaxis annotations autosize bargap bargroupgap barmode barnorm boxgap boxgroupgap boxmode calendar direction dragmode font geo height hiddenlabels hiddenlabelssrc hidesources hovermode images legend mapbox margin orientation paper_bgcolor plot_bgcolor radialaxis scene separators shapes showlegend sliders smith ternary title titlefont updatemenus width xaxis yaxis marker keys: autocolorscale blend border cauto cmax cmin color colorbar colors colorscale colorsrc colorssrc line maxdisplayed opacity opacitysrc outliercolor reversescale showscale size sizemax sizemin sizemode sizeref sizesrc symbol symbolsrc marker['sizeref'] gives the denominator of the circle scaling factor. Typically it should be about a tenth of the minimum 'size' column value >>> from nlpia.data.loaders import get_data >>> df = get_data('cities_us_wordvectors_pca2_meta').iloc[:100] >>> html = offline_plotly_scatter_bubble( ... df.sort_values('population', ascending=False)[:350].copy().sort_values('population'), ... x='x', y='y', ... size_col='population', text_col='name', category_col='timezone', ... xscale=None, yscale=None, # 'log' or None ... layout={}, marker={'sizeref': 3000}) """ config_default = dict(DEFAULT_PLOTLY_CONFIG) marker_default = { 'size': size_col or min_size, 'sizemode': 'area', 'sizeref': int(df[size_col].min() * .8) if size_col else min_size} marker_default.update(marker) size_col = marker_default.pop('size') layout_default = { 'xaxis': XAxis(title=x, type=xscale), 'yaxis': YAxis(title=y, type=yscale), } layout_default.update(**layout) if config is not None: config_default.update(config) df.columns = clean_columns(df.columns) if possible_categories is None and category_col is not None: if category_col in df.columns: category_labels = df[category_col] else: category_labels = np.array(category_col) possible_categories = list(set(category_labels)) possible_categories = [None] if possible_categories is None else possible_categories if category_col and category_col in df: masks = [np.array(df[category_col] == label) for label in possible_categories] else: masks = [np.array([True] * len(df))] * len(possible_categories) data = {'data': [ Scatter(x=df[x][mask].values, y=df[y][mask].values, text=df[text_col][mask].values, marker=Marker(size=df[size_col][mask] if size_col in df.columns else size_col, **marker_default), mode='markers', name=str(category_name)) for (category_name, mask) in zip(possible_categories, masks) ], 'layout': Layout(**layout_default) } return offline_plotly_data(data, filename=filename, config=config_default)
[ "def", "offline_plotly_scatter_bubble", "(", "df", ",", "x", "=", "'x'", ",", "y", "=", "'y'", ",", "size_col", "=", "'size'", ",", "text_col", "=", "'text'", ",", "category_col", "=", "'category'", ",", "possible_categories", "=", "None", ",", "filename", "=", "None", ",", "config", "=", "{", "'displaylogo'", ":", "False", "}", ",", "xscale", "=", "None", ",", "yscale", "=", "'log'", ",", "layout", "=", "{", "'hovermode'", ":", "'closest'", ",", "'showlegend'", ":", "False", ",", "'autosize'", ":", "True", "}", ",", "marker", "=", "{", "'sizemode'", ":", "'area'", "}", ",", "min_size", "=", "10", ",", ")", ":", "config_default", "=", "dict", "(", "DEFAULT_PLOTLY_CONFIG", ")", "marker_default", "=", "{", "'size'", ":", "size_col", "or", "min_size", ",", "'sizemode'", ":", "'area'", ",", "'sizeref'", ":", "int", "(", "df", "[", "size_col", "]", ".", "min", "(", ")", "*", ".8", ")", "if", "size_col", "else", "min_size", "}", "marker_default", ".", "update", "(", "marker", ")", "size_col", "=", "marker_default", ".", "pop", "(", "'size'", ")", "layout_default", "=", "{", "'xaxis'", ":", "XAxis", "(", "title", "=", "x", ",", "type", "=", "xscale", ")", ",", "'yaxis'", ":", "YAxis", "(", "title", "=", "y", ",", "type", "=", "yscale", ")", ",", "}", "layout_default", ".", "update", "(", "*", "*", "layout", ")", "if", "config", "is", "not", "None", ":", "config_default", ".", "update", "(", "config", ")", "df", ".", "columns", "=", "clean_columns", "(", "df", ".", "columns", ")", "if", "possible_categories", "is", "None", "and", "category_col", "is", "not", "None", ":", "if", "category_col", "in", "df", ".", "columns", ":", "category_labels", "=", "df", "[", "category_col", "]", "else", ":", "category_labels", "=", "np", ".", "array", "(", "category_col", ")", "possible_categories", "=", "list", "(", "set", "(", "category_labels", ")", ")", "possible_categories", "=", "[", "None", "]", "if", "possible_categories", "is", "None", "else", "possible_categories", "if", "category_col", "and", "category_col", "in", "df", ":", "masks", "=", "[", "np", ".", "array", "(", "df", "[", "category_col", "]", "==", "label", ")", "for", "label", "in", "possible_categories", "]", "else", ":", "masks", "=", "[", "np", ".", "array", "(", "[", "True", "]", "*", "len", "(", "df", ")", ")", "]", "*", "len", "(", "possible_categories", ")", "data", "=", "{", "'data'", ":", "[", "Scatter", "(", "x", "=", "df", "[", "x", "]", "[", "mask", "]", ".", "values", ",", "y", "=", "df", "[", "y", "]", "[", "mask", "]", ".", "values", ",", "text", "=", "df", "[", "text_col", "]", "[", "mask", "]", ".", "values", ",", "marker", "=", "Marker", "(", "size", "=", "df", "[", "size_col", "]", "[", "mask", "]", "if", "size_col", "in", "df", ".", "columns", "else", "size_col", ",", "*", "*", "marker_default", ")", ",", "mode", "=", "'markers'", ",", "name", "=", "str", "(", "category_name", ")", ")", "for", "(", "category_name", ",", "mask", ")", "in", "zip", "(", "possible_categories", ",", "masks", ")", "]", ",", "'layout'", ":", "Layout", "(", "*", "*", "layout_default", ")", "}", "return", "offline_plotly_data", "(", "data", ",", "filename", "=", "filename", ",", "config", "=", "config_default", ")" ]
r"""Interactive scatterplot of a DataFrame with the size and color of circles linke to two columns config keys: fillFrame setBackground displaylogo sendData showLink linkText staticPlot scrollZoom plot3dPixelRatio displayModeBar showTips workspace doubleClick autosizable editable layout keys: angularaxis annotations autosize bargap bargroupgap barmode barnorm boxgap boxgroupgap boxmode calendar direction dragmode font geo height hiddenlabels hiddenlabelssrc hidesources hovermode images legend mapbox margin orientation paper_bgcolor plot_bgcolor radialaxis scene separators shapes showlegend sliders smith ternary title titlefont updatemenus width xaxis yaxis marker keys: autocolorscale blend border cauto cmax cmin color colorbar colors colorscale colorsrc colorssrc line maxdisplayed opacity opacitysrc outliercolor reversescale showscale size sizemax sizemin sizemode sizeref sizesrc symbol symbolsrc marker['sizeref'] gives the denominator of the circle scaling factor. Typically it should be about a tenth of the minimum 'size' column value >>> from nlpia.data.loaders import get_data >>> df = get_data('cities_us_wordvectors_pca2_meta').iloc[:100] >>> html = offline_plotly_scatter_bubble( ... df.sort_values('population', ascending=False)[:350].copy().sort_values('population'), ... x='x', y='y', ... size_col='population', text_col='name', category_col='timezone', ... xscale=None, yscale=None, # 'log' or None ... layout={}, marker={'sizeref': 3000})
[ "r", "Interactive", "scatterplot", "of", "a", "DataFrame", "with", "the", "size", "and", "color", "of", "circles", "linke", "to", "two", "columns" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/plots.py#L242-L316
231,474
totalgood/nlpia
src/nlpia/data_utils.py
format_hex
def format_hex(i, num_bytes=4, prefix='0x'): """ Format hexidecimal string from decimal integer value >>> format_hex(42, num_bytes=8, prefix=None) '0000002a' >>> format_hex(23) '0x0017' """ prefix = str(prefix or '') i = int(i or 0) return prefix + '{0:0{1}x}'.format(i, num_bytes)
python
def format_hex(i, num_bytes=4, prefix='0x'): """ Format hexidecimal string from decimal integer value >>> format_hex(42, num_bytes=8, prefix=None) '0000002a' >>> format_hex(23) '0x0017' """ prefix = str(prefix or '') i = int(i or 0) return prefix + '{0:0{1}x}'.format(i, num_bytes)
[ "def", "format_hex", "(", "i", ",", "num_bytes", "=", "4", ",", "prefix", "=", "'0x'", ")", ":", "prefix", "=", "str", "(", "prefix", "or", "''", ")", "i", "=", "int", "(", "i", "or", "0", ")", "return", "prefix", "+", "'{0:0{1}x}'", ".", "format", "(", "i", ",", "num_bytes", ")" ]
Format hexidecimal string from decimal integer value >>> format_hex(42, num_bytes=8, prefix=None) '0000002a' >>> format_hex(23) '0x0017'
[ "Format", "hexidecimal", "string", "from", "decimal", "integer", "value" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/data_utils.py#L38-L48
231,475
totalgood/nlpia
src/nlpia/data_utils.py
is_up_url
def is_up_url(url, allow_redirects=False, timeout=5): r""" Check URL to see if it is a valid web page, return the redirected location if it is Returns: None if ConnectionError False if url is invalid (any HTTP error code) cleaned up URL (following redirects and possibly adding HTTP schema "http://") >>> is_up_url("duckduckgo.com") # a more private, less manipulative search engine 'https://duckduckgo.com/' >>> urlisup = is_up_url("totalgood.org") >>> not urlisup or str(urlisup).startswith('http') True >>> urlisup = is_up_url("wikipedia.org") >>> str(urlisup).startswith('http') True >>> 'wikipedia.org' in str(urlisup) True >>> bool(is_up_url('8158989668202919656')) False >>> is_up_url('invalidurlwithoutadomain') False """ if not isinstance(url, basestring) or '.' not in url: return False normalized_url = prepend_http(url) session = requests.Session() session.mount(url, HTTPAdapter(max_retries=2)) try: resp = session.get(normalized_url, allow_redirects=allow_redirects, timeout=timeout) except ConnectionError: return None except: return None if resp.status_code in (301, 302, 307) or resp.headers.get('location', None): return resp.headers.get('location', None) # return redirected URL elif 100 <= resp.status_code < 400: return normalized_url # return the original URL that was requested/visited else: return False
python
def is_up_url(url, allow_redirects=False, timeout=5): r""" Check URL to see if it is a valid web page, return the redirected location if it is Returns: None if ConnectionError False if url is invalid (any HTTP error code) cleaned up URL (following redirects and possibly adding HTTP schema "http://") >>> is_up_url("duckduckgo.com") # a more private, less manipulative search engine 'https://duckduckgo.com/' >>> urlisup = is_up_url("totalgood.org") >>> not urlisup or str(urlisup).startswith('http') True >>> urlisup = is_up_url("wikipedia.org") >>> str(urlisup).startswith('http') True >>> 'wikipedia.org' in str(urlisup) True >>> bool(is_up_url('8158989668202919656')) False >>> is_up_url('invalidurlwithoutadomain') False """ if not isinstance(url, basestring) or '.' not in url: return False normalized_url = prepend_http(url) session = requests.Session() session.mount(url, HTTPAdapter(max_retries=2)) try: resp = session.get(normalized_url, allow_redirects=allow_redirects, timeout=timeout) except ConnectionError: return None except: return None if resp.status_code in (301, 302, 307) or resp.headers.get('location', None): return resp.headers.get('location', None) # return redirected URL elif 100 <= resp.status_code < 400: return normalized_url # return the original URL that was requested/visited else: return False
[ "def", "is_up_url", "(", "url", ",", "allow_redirects", "=", "False", ",", "timeout", "=", "5", ")", ":", "if", "not", "isinstance", "(", "url", ",", "basestring", ")", "or", "'.'", "not", "in", "url", ":", "return", "False", "normalized_url", "=", "prepend_http", "(", "url", ")", "session", "=", "requests", ".", "Session", "(", ")", "session", ".", "mount", "(", "url", ",", "HTTPAdapter", "(", "max_retries", "=", "2", ")", ")", "try", ":", "resp", "=", "session", ".", "get", "(", "normalized_url", ",", "allow_redirects", "=", "allow_redirects", ",", "timeout", "=", "timeout", ")", "except", "ConnectionError", ":", "return", "None", "except", ":", "return", "None", "if", "resp", ".", "status_code", "in", "(", "301", ",", "302", ",", "307", ")", "or", "resp", ".", "headers", ".", "get", "(", "'location'", ",", "None", ")", ":", "return", "resp", ".", "headers", ".", "get", "(", "'location'", ",", "None", ")", "# return redirected URL", "elif", "100", "<=", "resp", ".", "status_code", "<", "400", ":", "return", "normalized_url", "# return the original URL that was requested/visited", "else", ":", "return", "False" ]
r""" Check URL to see if it is a valid web page, return the redirected location if it is Returns: None if ConnectionError False if url is invalid (any HTTP error code) cleaned up URL (following redirects and possibly adding HTTP schema "http://") >>> is_up_url("duckduckgo.com") # a more private, less manipulative search engine 'https://duckduckgo.com/' >>> urlisup = is_up_url("totalgood.org") >>> not urlisup or str(urlisup).startswith('http') True >>> urlisup = is_up_url("wikipedia.org") >>> str(urlisup).startswith('http') True >>> 'wikipedia.org' in str(urlisup) True >>> bool(is_up_url('8158989668202919656')) False >>> is_up_url('invalidurlwithoutadomain') False
[ "r", "Check", "URL", "to", "see", "if", "it", "is", "a", "valid", "web", "page", "return", "the", "redirected", "location", "if", "it", "is" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/data_utils.py#L83-L122
231,476
totalgood/nlpia
src/nlpia/data_utils.py
get_markdown_levels
def get_markdown_levels(lines, levels=set((0, 1, 2, 3, 4, 5, 6))): r""" Return a list of 2-tuples with a level integer for the heading levels >>> get_markdown_levels('paragraph \n##bad\n# hello\n ### world\n') [(0, 'paragraph '), (2, 'bad'), (0, '# hello'), (3, 'world')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n') [(0, '- bullet '), (2, 'bad'), (0, '# hello'), (3, 'world')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 2) [(2, 'bad')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 1) [] """ if isinstance(levels, (int, float, basestring, str, bytes)): levels = [float(levels)] levels = set([int(i) for i in levels]) if isinstance(lines, basestring): lines = lines.splitlines() level_lines = [] for line in lines: level_line = None if 0 in levels: level_line = (0, line) lstripped = line.lstrip() for i in range(6, 1, -1): if lstripped.startswith('#' * i): level_line = (i, lstripped[i:].lstrip()) break if level_line and level_line[0] in levels: level_lines.append(level_line) return level_lines
python
def get_markdown_levels(lines, levels=set((0, 1, 2, 3, 4, 5, 6))): r""" Return a list of 2-tuples with a level integer for the heading levels >>> get_markdown_levels('paragraph \n##bad\n# hello\n ### world\n') [(0, 'paragraph '), (2, 'bad'), (0, '# hello'), (3, 'world')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n') [(0, '- bullet '), (2, 'bad'), (0, '# hello'), (3, 'world')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 2) [(2, 'bad')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 1) [] """ if isinstance(levels, (int, float, basestring, str, bytes)): levels = [float(levels)] levels = set([int(i) for i in levels]) if isinstance(lines, basestring): lines = lines.splitlines() level_lines = [] for line in lines: level_line = None if 0 in levels: level_line = (0, line) lstripped = line.lstrip() for i in range(6, 1, -1): if lstripped.startswith('#' * i): level_line = (i, lstripped[i:].lstrip()) break if level_line and level_line[0] in levels: level_lines.append(level_line) return level_lines
[ "def", "get_markdown_levels", "(", "lines", ",", "levels", "=", "set", "(", "(", "0", ",", "1", ",", "2", ",", "3", ",", "4", ",", "5", ",", "6", ")", ")", ")", ":", "if", "isinstance", "(", "levels", ",", "(", "int", ",", "float", ",", "basestring", ",", "str", ",", "bytes", ")", ")", ":", "levels", "=", "[", "float", "(", "levels", ")", "]", "levels", "=", "set", "(", "[", "int", "(", "i", ")", "for", "i", "in", "levels", "]", ")", "if", "isinstance", "(", "lines", ",", "basestring", ")", ":", "lines", "=", "lines", ".", "splitlines", "(", ")", "level_lines", "=", "[", "]", "for", "line", "in", "lines", ":", "level_line", "=", "None", "if", "0", "in", "levels", ":", "level_line", "=", "(", "0", ",", "line", ")", "lstripped", "=", "line", ".", "lstrip", "(", ")", "for", "i", "in", "range", "(", "6", ",", "1", ",", "-", "1", ")", ":", "if", "lstripped", ".", "startswith", "(", "'#'", "*", "i", ")", ":", "level_line", "=", "(", "i", ",", "lstripped", "[", "i", ":", "]", ".", "lstrip", "(", ")", ")", "break", "if", "level_line", "and", "level_line", "[", "0", "]", "in", "levels", ":", "level_lines", ".", "append", "(", "level_line", ")", "return", "level_lines" ]
r""" Return a list of 2-tuples with a level integer for the heading levels >>> get_markdown_levels('paragraph \n##bad\n# hello\n ### world\n') [(0, 'paragraph '), (2, 'bad'), (0, '# hello'), (3, 'world')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n') [(0, '- bullet '), (2, 'bad'), (0, '# hello'), (3, 'world')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 2) [(2, 'bad')] >>> get_markdown_levels('- bullet \n##bad\n# hello\n ### world\n', 1) []
[ "r", "Return", "a", "list", "of", "2", "-", "tuples", "with", "a", "level", "integer", "for", "the", "heading", "levels" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/data_utils.py#L125-L155
231,477
totalgood/nlpia
src/nlpia/data_utils.py
iter_lines
def iter_lines(url_or_text, ext=None, mode='rt'): r""" Return an iterator over the lines of a file or URI response. >>> len(list(iter_lines('cats_and_dogs.txt'))) 263 >>> len(list(iter_lines(list('abcdefgh')))) 8 >>> len(list(iter_lines('abc\n def\n gh\n'))) 3 >>> len(list(iter_lines('abc\n def\n gh'))) 3 >>> 20000 > len(list(iter_lines(BOOK_PATH))) > 200 True """ if url_or_text is None or not url_or_text: return [] # url_or_text = 'https://www.fileformat.info/info/charset/UTF-8/list.htm' elif isinstance(url_or_text, (str, bytes, basestring)): if '\n' in url_or_text or '\r' in url_or_text: return StringIO(url_or_text) elif os.path.isfile(os.path.join(DATA_PATH, url_or_text)): return open(os.path.join(DATA_PATH, url_or_text), mode=mode) elif os.path.isfile(url_or_text): return open(os.path.join(url_or_text), mode=mode) if os.path.isdir(url_or_text): filepaths = [filemeta['path'] for filemeta in find_files(url_or_text, ext=ext)] return itertools.chain.from_iterable(map(open, filepaths)) url = looks_like_url(url_or_text) if url: for i in range(3): return requests.get(url, stream=True, allow_redirects=True, timeout=5) else: return StringIO(url_or_text) elif isinstance(url_or_text, (list, tuple)): # FIXME: make this lazy with chain and map so it doesn't gobble up RAM text = '' for s in url_or_text: text += '\n'.join(list(iter_lines(s, ext=ext, mode=mode))) + '\n' return iter_lines(text)
python
def iter_lines(url_or_text, ext=None, mode='rt'): r""" Return an iterator over the lines of a file or URI response. >>> len(list(iter_lines('cats_and_dogs.txt'))) 263 >>> len(list(iter_lines(list('abcdefgh')))) 8 >>> len(list(iter_lines('abc\n def\n gh\n'))) 3 >>> len(list(iter_lines('abc\n def\n gh'))) 3 >>> 20000 > len(list(iter_lines(BOOK_PATH))) > 200 True """ if url_or_text is None or not url_or_text: return [] # url_or_text = 'https://www.fileformat.info/info/charset/UTF-8/list.htm' elif isinstance(url_or_text, (str, bytes, basestring)): if '\n' in url_or_text or '\r' in url_or_text: return StringIO(url_or_text) elif os.path.isfile(os.path.join(DATA_PATH, url_or_text)): return open(os.path.join(DATA_PATH, url_or_text), mode=mode) elif os.path.isfile(url_or_text): return open(os.path.join(url_or_text), mode=mode) if os.path.isdir(url_or_text): filepaths = [filemeta['path'] for filemeta in find_files(url_or_text, ext=ext)] return itertools.chain.from_iterable(map(open, filepaths)) url = looks_like_url(url_or_text) if url: for i in range(3): return requests.get(url, stream=True, allow_redirects=True, timeout=5) else: return StringIO(url_or_text) elif isinstance(url_or_text, (list, tuple)): # FIXME: make this lazy with chain and map so it doesn't gobble up RAM text = '' for s in url_or_text: text += '\n'.join(list(iter_lines(s, ext=ext, mode=mode))) + '\n' return iter_lines(text)
[ "def", "iter_lines", "(", "url_or_text", ",", "ext", "=", "None", ",", "mode", "=", "'rt'", ")", ":", "if", "url_or_text", "is", "None", "or", "not", "url_or_text", ":", "return", "[", "]", "# url_or_text = 'https://www.fileformat.info/info/charset/UTF-8/list.htm'", "elif", "isinstance", "(", "url_or_text", ",", "(", "str", ",", "bytes", ",", "basestring", ")", ")", ":", "if", "'\\n'", "in", "url_or_text", "or", "'\\r'", "in", "url_or_text", ":", "return", "StringIO", "(", "url_or_text", ")", "elif", "os", ".", "path", ".", "isfile", "(", "os", ".", "path", ".", "join", "(", "DATA_PATH", ",", "url_or_text", ")", ")", ":", "return", "open", "(", "os", ".", "path", ".", "join", "(", "DATA_PATH", ",", "url_or_text", ")", ",", "mode", "=", "mode", ")", "elif", "os", ".", "path", ".", "isfile", "(", "url_or_text", ")", ":", "return", "open", "(", "os", ".", "path", ".", "join", "(", "url_or_text", ")", ",", "mode", "=", "mode", ")", "if", "os", ".", "path", ".", "isdir", "(", "url_or_text", ")", ":", "filepaths", "=", "[", "filemeta", "[", "'path'", "]", "for", "filemeta", "in", "find_files", "(", "url_or_text", ",", "ext", "=", "ext", ")", "]", "return", "itertools", ".", "chain", ".", "from_iterable", "(", "map", "(", "open", ",", "filepaths", ")", ")", "url", "=", "looks_like_url", "(", "url_or_text", ")", "if", "url", ":", "for", "i", "in", "range", "(", "3", ")", ":", "return", "requests", ".", "get", "(", "url", ",", "stream", "=", "True", ",", "allow_redirects", "=", "True", ",", "timeout", "=", "5", ")", "else", ":", "return", "StringIO", "(", "url_or_text", ")", "elif", "isinstance", "(", "url_or_text", ",", "(", "list", ",", "tuple", ")", ")", ":", "# FIXME: make this lazy with chain and map so it doesn't gobble up RAM", "text", "=", "''", "for", "s", "in", "url_or_text", ":", "text", "+=", "'\\n'", ".", "join", "(", "list", "(", "iter_lines", "(", "s", ",", "ext", "=", "ext", ",", "mode", "=", "mode", ")", ")", ")", "+", "'\\n'", "return", "iter_lines", "(", "text", ")" ]
r""" Return an iterator over the lines of a file or URI response. >>> len(list(iter_lines('cats_and_dogs.txt'))) 263 >>> len(list(iter_lines(list('abcdefgh')))) 8 >>> len(list(iter_lines('abc\n def\n gh\n'))) 3 >>> len(list(iter_lines('abc\n def\n gh'))) 3 >>> 20000 > len(list(iter_lines(BOOK_PATH))) > 200 True
[ "r", "Return", "an", "iterator", "over", "the", "lines", "of", "a", "file", "or", "URI", "response", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/data_utils.py#L186-L224
231,478
totalgood/nlpia
src/nlpia/data_utils.py
parse_utf_html
def parse_utf_html(url=os.path.join(DATA_PATH, 'utf8_table.html')): """ Parse HTML table UTF8 char descriptions returning DataFrame with `ascii` and `mutliascii` """ utf = pd.read_html(url) utf = [df for df in utf if len(df) > 1023 and len(df.columns) > 2][0] utf = utf.iloc[:1024] if len(utf) == 1025 else utf utf.columns = 'char name hex'.split() utf.name = utf.name.str.replace('<control>', 'CONTTROL CHARACTER') multiascii = [' '] * len(utf) asc = [' '] * len(utf) rows = [] for i, name in enumerate(utf.name): if i < 128 and str.isprintable(chr(i)): asc[i] = chr(i) else: asc[i] = ' ' big = re.findall(r'CAPITAL\ LETTER\ ([a-z0-9A-Z ]+$)', name) small = re.findall(r'SMALL\ LETTER\ ([a-z0-9A-Z ]+$)', name) pattern = r'(?P<description>' \ r'(?P<lang>LATIN|GREEK|COPTIC|CYRILLIC)?[\s]*' \ r'(?P<case>CAPITAL|SMALL)?[\s]*' \ r'(?P<length>CHARACTER|LETTER)?[\s]*' \ r'(?P<ukrainian>BYELORUSSIAN-UKRAINIAN)?[\s]*' \ r'(?P<name>[-_><a-z0-9A-Z\s ]+)[\s]*' \ r'\(?(?P<code_point>U\+[- a-fA-F0-9]{4,8})?\)?)[\s]*' # noqa match = re.match(pattern, name) gd = match.groupdict() gd['char'] = chr(i) gd['suffix'] = None gd['wordwith'] = None withprefix = re.match(r'(?P<prefix>DOTLESS|TURNED|SMALL)(?P<name>.*)' + r'(?P<wordwith>WITH|SUPERSCRIPT|SUBSCRIPT|DIGRAPH)\s+(?P<suffix>[-_><a-z0-9A-Z\s ]+)', gd['name']) if withprefix: gd.update(withprefix.groupdict()) withsuffix = re.match(r'(?P<name>.*)(?P<wordwith>WITH|SUPERSCRIPT|SUBSCRIPT|DIGRAPH)\s+' + r'(?P<suffix>[-_><a-z0-9A-Z\s ]+)', gd['name']) if withsuffix: gd.update(withsuffix.groupdict()) gd['code_point'] = gd['code_point'] or format_hex(i, num_bytes=4, prefix='U+').upper() if i < 128: gd['ascii'] = chr(i) else: multiascii = gd['name'] if gd['suffix'] and gd['wordwith']: multiascii = NAME_ACCENT.get(gd['suffix'], "'") else: if big: m = big[0] multiascii[i] = m if len(m) == 1: asc[i] = m elif small: multiascii[i] = small[0].lower() if len(multiascii[i]) == 1: asc[i] = small[0].lower() rows.append(gd) df = pd.DataFrame(rows) df.multiascii = df.multiascii.str.strip() df['ascii'] = df['ascii'].str.strip() df.name = df.name.str.strip() return df
python
def parse_utf_html(url=os.path.join(DATA_PATH, 'utf8_table.html')): """ Parse HTML table UTF8 char descriptions returning DataFrame with `ascii` and `mutliascii` """ utf = pd.read_html(url) utf = [df for df in utf if len(df) > 1023 and len(df.columns) > 2][0] utf = utf.iloc[:1024] if len(utf) == 1025 else utf utf.columns = 'char name hex'.split() utf.name = utf.name.str.replace('<control>', 'CONTTROL CHARACTER') multiascii = [' '] * len(utf) asc = [' '] * len(utf) rows = [] for i, name in enumerate(utf.name): if i < 128 and str.isprintable(chr(i)): asc[i] = chr(i) else: asc[i] = ' ' big = re.findall(r'CAPITAL\ LETTER\ ([a-z0-9A-Z ]+$)', name) small = re.findall(r'SMALL\ LETTER\ ([a-z0-9A-Z ]+$)', name) pattern = r'(?P<description>' \ r'(?P<lang>LATIN|GREEK|COPTIC|CYRILLIC)?[\s]*' \ r'(?P<case>CAPITAL|SMALL)?[\s]*' \ r'(?P<length>CHARACTER|LETTER)?[\s]*' \ r'(?P<ukrainian>BYELORUSSIAN-UKRAINIAN)?[\s]*' \ r'(?P<name>[-_><a-z0-9A-Z\s ]+)[\s]*' \ r'\(?(?P<code_point>U\+[- a-fA-F0-9]{4,8})?\)?)[\s]*' # noqa match = re.match(pattern, name) gd = match.groupdict() gd['char'] = chr(i) gd['suffix'] = None gd['wordwith'] = None withprefix = re.match(r'(?P<prefix>DOTLESS|TURNED|SMALL)(?P<name>.*)' + r'(?P<wordwith>WITH|SUPERSCRIPT|SUBSCRIPT|DIGRAPH)\s+(?P<suffix>[-_><a-z0-9A-Z\s ]+)', gd['name']) if withprefix: gd.update(withprefix.groupdict()) withsuffix = re.match(r'(?P<name>.*)(?P<wordwith>WITH|SUPERSCRIPT|SUBSCRIPT|DIGRAPH)\s+' + r'(?P<suffix>[-_><a-z0-9A-Z\s ]+)', gd['name']) if withsuffix: gd.update(withsuffix.groupdict()) gd['code_point'] = gd['code_point'] or format_hex(i, num_bytes=4, prefix='U+').upper() if i < 128: gd['ascii'] = chr(i) else: multiascii = gd['name'] if gd['suffix'] and gd['wordwith']: multiascii = NAME_ACCENT.get(gd['suffix'], "'") else: if big: m = big[0] multiascii[i] = m if len(m) == 1: asc[i] = m elif small: multiascii[i] = small[0].lower() if len(multiascii[i]) == 1: asc[i] = small[0].lower() rows.append(gd) df = pd.DataFrame(rows) df.multiascii = df.multiascii.str.strip() df['ascii'] = df['ascii'].str.strip() df.name = df.name.str.strip() return df
[ "def", "parse_utf_html", "(", "url", "=", "os", ".", "path", ".", "join", "(", "DATA_PATH", ",", "'utf8_table.html'", ")", ")", ":", "utf", "=", "pd", ".", "read_html", "(", "url", ")", "utf", "=", "[", "df", "for", "df", "in", "utf", "if", "len", "(", "df", ")", ">", "1023", "and", "len", "(", "df", ".", "columns", ")", ">", "2", "]", "[", "0", "]", "utf", "=", "utf", ".", "iloc", "[", ":", "1024", "]", "if", "len", "(", "utf", ")", "==", "1025", "else", "utf", "utf", ".", "columns", "=", "'char name hex'", ".", "split", "(", ")", "utf", ".", "name", "=", "utf", ".", "name", ".", "str", ".", "replace", "(", "'<control>'", ",", "'CONTTROL CHARACTER'", ")", "multiascii", "=", "[", "' '", "]", "*", "len", "(", "utf", ")", "asc", "=", "[", "' '", "]", "*", "len", "(", "utf", ")", "rows", "=", "[", "]", "for", "i", ",", "name", "in", "enumerate", "(", "utf", ".", "name", ")", ":", "if", "i", "<", "128", "and", "str", ".", "isprintable", "(", "chr", "(", "i", ")", ")", ":", "asc", "[", "i", "]", "=", "chr", "(", "i", ")", "else", ":", "asc", "[", "i", "]", "=", "' '", "big", "=", "re", ".", "findall", "(", "r'CAPITAL\\ LETTER\\ ([a-z0-9A-Z ]+$)'", ",", "name", ")", "small", "=", "re", ".", "findall", "(", "r'SMALL\\ LETTER\\ ([a-z0-9A-Z ]+$)'", ",", "name", ")", "pattern", "=", "r'(?P<description>'", "r'(?P<lang>LATIN|GREEK|COPTIC|CYRILLIC)?[\\s]*'", "r'(?P<case>CAPITAL|SMALL)?[\\s]*'", "r'(?P<length>CHARACTER|LETTER)?[\\s]*'", "r'(?P<ukrainian>BYELORUSSIAN-UKRAINIAN)?[\\s]*'", "r'(?P<name>[-_><a-z0-9A-Z\\s ]+)[\\s]*'", "r'\\(?(?P<code_point>U\\+[- a-fA-F0-9]{4,8})?\\)?)[\\s]*'", "# noqa", "match", "=", "re", ".", "match", "(", "pattern", ",", "name", ")", "gd", "=", "match", ".", "groupdict", "(", ")", "gd", "[", "'char'", "]", "=", "chr", "(", "i", ")", "gd", "[", "'suffix'", "]", "=", "None", "gd", "[", "'wordwith'", "]", "=", "None", "withprefix", "=", "re", ".", "match", "(", "r'(?P<prefix>DOTLESS|TURNED|SMALL)(?P<name>.*)'", "+", "r'(?P<wordwith>WITH|SUPERSCRIPT|SUBSCRIPT|DIGRAPH)\\s+(?P<suffix>[-_><a-z0-9A-Z\\s ]+)'", ",", "gd", "[", "'name'", "]", ")", "if", "withprefix", ":", "gd", ".", "update", "(", "withprefix", ".", "groupdict", "(", ")", ")", "withsuffix", "=", "re", ".", "match", "(", "r'(?P<name>.*)(?P<wordwith>WITH|SUPERSCRIPT|SUBSCRIPT|DIGRAPH)\\s+'", "+", "r'(?P<suffix>[-_><a-z0-9A-Z\\s ]+)'", ",", "gd", "[", "'name'", "]", ")", "if", "withsuffix", ":", "gd", ".", "update", "(", "withsuffix", ".", "groupdict", "(", ")", ")", "gd", "[", "'code_point'", "]", "=", "gd", "[", "'code_point'", "]", "or", "format_hex", "(", "i", ",", "num_bytes", "=", "4", ",", "prefix", "=", "'U+'", ")", ".", "upper", "(", ")", "if", "i", "<", "128", ":", "gd", "[", "'ascii'", "]", "=", "chr", "(", "i", ")", "else", ":", "multiascii", "=", "gd", "[", "'name'", "]", "if", "gd", "[", "'suffix'", "]", "and", "gd", "[", "'wordwith'", "]", ":", "multiascii", "=", "NAME_ACCENT", ".", "get", "(", "gd", "[", "'suffix'", "]", ",", "\"'\"", ")", "else", ":", "if", "big", ":", "m", "=", "big", "[", "0", "]", "multiascii", "[", "i", "]", "=", "m", "if", "len", "(", "m", ")", "==", "1", ":", "asc", "[", "i", "]", "=", "m", "elif", "small", ":", "multiascii", "[", "i", "]", "=", "small", "[", "0", "]", ".", "lower", "(", ")", "if", "len", "(", "multiascii", "[", "i", "]", ")", "==", "1", ":", "asc", "[", "i", "]", "=", "small", "[", "0", "]", ".", "lower", "(", ")", "rows", ".", "append", "(", "gd", ")", "df", "=", "pd", ".", "DataFrame", "(", "rows", ")", "df", ".", "multiascii", "=", "df", ".", "multiascii", ".", "str", ".", "strip", "(", ")", "df", "[", "'ascii'", "]", "=", "df", "[", "'ascii'", "]", ".", "str", ".", "strip", "(", ")", "df", ".", "name", "=", "df", ".", "name", ".", "str", ".", "strip", "(", ")", "return", "df" ]
Parse HTML table UTF8 char descriptions returning DataFrame with `ascii` and `mutliascii`
[ "Parse", "HTML", "table", "UTF8", "char", "descriptions", "returning", "DataFrame", "with", "ascii", "and", "mutliascii" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/data_utils.py#L227-L291
231,479
totalgood/nlpia
src/nlpia/data_utils.py
clean_csvs
def clean_csvs(dialogpath=None): """ Translate non-ASCII characters to spaces or equivalent ASCII characters """ dialogdir = os.dirname(dialogpath) if os.path.isfile(dialogpath) else dialogpath filenames = [dialogpath.split(os.path.sep)[-1]] if os.path.isfile(dialogpath) else os.listdir(dialogpath) for filename in filenames: filepath = os.path.join(dialogdir, filename) df = clean_df(filepath) df.to_csv(filepath, header=None) return filenames
python
def clean_csvs(dialogpath=None): """ Translate non-ASCII characters to spaces or equivalent ASCII characters """ dialogdir = os.dirname(dialogpath) if os.path.isfile(dialogpath) else dialogpath filenames = [dialogpath.split(os.path.sep)[-1]] if os.path.isfile(dialogpath) else os.listdir(dialogpath) for filename in filenames: filepath = os.path.join(dialogdir, filename) df = clean_df(filepath) df.to_csv(filepath, header=None) return filenames
[ "def", "clean_csvs", "(", "dialogpath", "=", "None", ")", ":", "dialogdir", "=", "os", ".", "dirname", "(", "dialogpath", ")", "if", "os", ".", "path", ".", "isfile", "(", "dialogpath", ")", "else", "dialogpath", "filenames", "=", "[", "dialogpath", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "[", "-", "1", "]", "]", "if", "os", ".", "path", ".", "isfile", "(", "dialogpath", ")", "else", "os", ".", "listdir", "(", "dialogpath", ")", "for", "filename", "in", "filenames", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "dialogdir", ",", "filename", ")", "df", "=", "clean_df", "(", "filepath", ")", "df", ".", "to_csv", "(", "filepath", ",", "header", "=", "None", ")", "return", "filenames" ]
Translate non-ASCII characters to spaces or equivalent ASCII characters
[ "Translate", "non", "-", "ASCII", "characters", "to", "spaces", "or", "equivalent", "ASCII", "characters" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/data_utils.py#L294-L302
231,480
totalgood/nlpia
src/nlpia/data_utils.py
unicode2ascii
def unicode2ascii(text, expand=True): r""" Translate UTF8 characters to ASCII >> unicode2ascii("żółw") zozw utf8_letters = 'ą ę ć ź ż ó ł ń ś “ ” ’'.split() ascii_letters = 'a e c z z o l n s " " \'' """ translate = UTF8_TO_ASCII if not expand else UTF8_TO_MULTIASCII output = '' for c in text: if not c or ord(c) < 128: output += c else: output += translate[c] if c in translate else ' ' return output.strip()
python
def unicode2ascii(text, expand=True): r""" Translate UTF8 characters to ASCII >> unicode2ascii("żółw") zozw utf8_letters = 'ą ę ć ź ż ó ł ń ś “ ” ’'.split() ascii_letters = 'a e c z z o l n s " " \'' """ translate = UTF8_TO_ASCII if not expand else UTF8_TO_MULTIASCII output = '' for c in text: if not c or ord(c) < 128: output += c else: output += translate[c] if c in translate else ' ' return output.strip()
[ "def", "unicode2ascii", "(", "text", ",", "expand", "=", "True", ")", ":", "translate", "=", "UTF8_TO_ASCII", "if", "not", "expand", "else", "UTF8_TO_MULTIASCII", "output", "=", "''", "for", "c", "in", "text", ":", "if", "not", "c", "or", "ord", "(", "c", ")", "<", "128", ":", "output", "+=", "c", "else", ":", "output", "+=", "translate", "[", "c", "]", "if", "c", "in", "translate", "else", "' '", "return", "output", ".", "strip", "(", ")" ]
r""" Translate UTF8 characters to ASCII >> unicode2ascii("żółw") zozw utf8_letters = 'ą ę ć ź ż ó ł ń ś “ ” ’'.split() ascii_letters = 'a e c z z o l n s " " \''
[ "r", "Translate", "UTF8", "characters", "to", "ASCII" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/data_utils.py#L305-L321
231,481
totalgood/nlpia
src/nlpia/data_utils.py
clean_df
def clean_df(df, header=None, **read_csv_kwargs): """ Convert UTF8 characters in a CSV file or dataframe into ASCII Args: df (DataFrame or str): DataFrame or path or url to CSV """ df = read_csv(df, header=header, **read_csv_kwargs) df = df.fillna(' ') for col in df.columns: df[col] = df[col].apply(unicode2ascii) return df
python
def clean_df(df, header=None, **read_csv_kwargs): """ Convert UTF8 characters in a CSV file or dataframe into ASCII Args: df (DataFrame or str): DataFrame or path or url to CSV """ df = read_csv(df, header=header, **read_csv_kwargs) df = df.fillna(' ') for col in df.columns: df[col] = df[col].apply(unicode2ascii) return df
[ "def", "clean_df", "(", "df", ",", "header", "=", "None", ",", "*", "*", "read_csv_kwargs", ")", ":", "df", "=", "read_csv", "(", "df", ",", "header", "=", "header", ",", "*", "*", "read_csv_kwargs", ")", "df", "=", "df", ".", "fillna", "(", "' '", ")", "for", "col", "in", "df", ".", "columns", ":", "df", "[", "col", "]", "=", "df", "[", "col", "]", ".", "apply", "(", "unicode2ascii", ")", "return", "df" ]
Convert UTF8 characters in a CSV file or dataframe into ASCII Args: df (DataFrame or str): DataFrame or path or url to CSV
[ "Convert", "UTF8", "characters", "in", "a", "CSV", "file", "or", "dataframe", "into", "ASCII" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/data_utils.py#L324-L334
231,482
totalgood/nlpia
src/nlpia/book_parser.py
get_acronyms
def get_acronyms(manuscript=os.path.expanduser('~/code/nlpia/lane/manuscript')): """ Find all the 2 and 3-letter acronyms in the manuscript and return as a sorted list of tuples """ acronyms = [] for f, lines in get_lines(manuscript): for line in lines: matches = CRE_ACRONYM.finditer(line) if matches: for m in matches: if m.group('a2'): acronyms.append((m.group('a2'), m.group('s2'))) elif m.group('a3'): acronyms.append((m.group('a3'), m.group('s3'))) elif m.group('a4'): acronyms.append((m.group('a4'), m.group('s4'))) elif m.group('a5'): acronyms.append((m.group('a5'), m.group('s5'))) return sorted(dict(acronyms).items())
python
def get_acronyms(manuscript=os.path.expanduser('~/code/nlpia/lane/manuscript')): """ Find all the 2 and 3-letter acronyms in the manuscript and return as a sorted list of tuples """ acronyms = [] for f, lines in get_lines(manuscript): for line in lines: matches = CRE_ACRONYM.finditer(line) if matches: for m in matches: if m.group('a2'): acronyms.append((m.group('a2'), m.group('s2'))) elif m.group('a3'): acronyms.append((m.group('a3'), m.group('s3'))) elif m.group('a4'): acronyms.append((m.group('a4'), m.group('s4'))) elif m.group('a5'): acronyms.append((m.group('a5'), m.group('s5'))) return sorted(dict(acronyms).items())
[ "def", "get_acronyms", "(", "manuscript", "=", "os", ".", "path", ".", "expanduser", "(", "'~/code/nlpia/lane/manuscript'", ")", ")", ":", "acronyms", "=", "[", "]", "for", "f", ",", "lines", "in", "get_lines", "(", "manuscript", ")", ":", "for", "line", "in", "lines", ":", "matches", "=", "CRE_ACRONYM", ".", "finditer", "(", "line", ")", "if", "matches", ":", "for", "m", "in", "matches", ":", "if", "m", ".", "group", "(", "'a2'", ")", ":", "acronyms", ".", "append", "(", "(", "m", ".", "group", "(", "'a2'", ")", ",", "m", ".", "group", "(", "'s2'", ")", ")", ")", "elif", "m", ".", "group", "(", "'a3'", ")", ":", "acronyms", ".", "append", "(", "(", "m", ".", "group", "(", "'a3'", ")", ",", "m", ".", "group", "(", "'s3'", ")", ")", ")", "elif", "m", ".", "group", "(", "'a4'", ")", ":", "acronyms", ".", "append", "(", "(", "m", ".", "group", "(", "'a4'", ")", ",", "m", ".", "group", "(", "'s4'", ")", ")", ")", "elif", "m", ".", "group", "(", "'a5'", ")", ":", "acronyms", ".", "append", "(", "(", "m", ".", "group", "(", "'a5'", ")", ",", "m", ".", "group", "(", "'s5'", ")", ")", ")", "return", "sorted", "(", "dict", "(", "acronyms", ")", ".", "items", "(", ")", ")" ]
Find all the 2 and 3-letter acronyms in the manuscript and return as a sorted list of tuples
[ "Find", "all", "the", "2", "and", "3", "-", "letter", "acronyms", "in", "the", "manuscript", "and", "return", "as", "a", "sorted", "list", "of", "tuples" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book_parser.py#L90-L107
231,483
totalgood/nlpia
src/nlpia/book_parser.py
write_glossary
def write_glossary(manuscript=os.path.expanduser('~/code/nlpia/lane/manuscript'), linesep=None): """ Compose an asciidoc string with acronyms culled from the manuscript """ linesep = linesep or os.linesep lines = ['[acronyms]', '== Acronyms', '', '[acronyms,template="glossary",id="terms"]'] acronyms = get_acronyms(manuscript) for a in acronyms: lines.append('*{}*:: {} -- '.format(a[0], a[1][0].upper() + a[1][1:])) return linesep.join(lines)
python
def write_glossary(manuscript=os.path.expanduser('~/code/nlpia/lane/manuscript'), linesep=None): """ Compose an asciidoc string with acronyms culled from the manuscript """ linesep = linesep or os.linesep lines = ['[acronyms]', '== Acronyms', '', '[acronyms,template="glossary",id="terms"]'] acronyms = get_acronyms(manuscript) for a in acronyms: lines.append('*{}*:: {} -- '.format(a[0], a[1][0].upper() + a[1][1:])) return linesep.join(lines)
[ "def", "write_glossary", "(", "manuscript", "=", "os", ".", "path", ".", "expanduser", "(", "'~/code/nlpia/lane/manuscript'", ")", ",", "linesep", "=", "None", ")", ":", "linesep", "=", "linesep", "or", "os", ".", "linesep", "lines", "=", "[", "'[acronyms]'", ",", "'== Acronyms'", ",", "''", ",", "'[acronyms,template=\"glossary\",id=\"terms\"]'", "]", "acronyms", "=", "get_acronyms", "(", "manuscript", ")", "for", "a", "in", "acronyms", ":", "lines", ".", "append", "(", "'*{}*:: {} -- '", ".", "format", "(", "a", "[", "0", "]", ",", "a", "[", "1", "]", "[", "0", "]", ".", "upper", "(", ")", "+", "a", "[", "1", "]", "[", "1", ":", "]", ")", ")", "return", "linesep", ".", "join", "(", "lines", ")" ]
Compose an asciidoc string with acronyms culled from the manuscript
[ "Compose", "an", "asciidoc", "string", "with", "acronyms", "culled", "from", "the", "manuscript" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book_parser.py#L110-L117
231,484
totalgood/nlpia
src/nlpia/book_parser.py
infer_url_title
def infer_url_title(url): """ Guess what the page title is going to be from the path and FQDN in the URL >>> infer_url_title('https://ai.googleblog.com/2018/09/the-what-if-tool-code-free-probing-of.html') 'the what if tool code free probing of' """ meta = get_url_filemeta(url) title = '' if meta: if meta.get('hostname', url) == 'drive.google.com': title = get_url_title(url) else: title = meta.get('filename', meta['hostname']) or meta['hostname'] title, fileext = splitext(title) else: logging.error('Unable to retrieve URL: {}'.format(url)) return None return delimit_slug(title, ' ')
python
def infer_url_title(url): """ Guess what the page title is going to be from the path and FQDN in the URL >>> infer_url_title('https://ai.googleblog.com/2018/09/the-what-if-tool-code-free-probing-of.html') 'the what if tool code free probing of' """ meta = get_url_filemeta(url) title = '' if meta: if meta.get('hostname', url) == 'drive.google.com': title = get_url_title(url) else: title = meta.get('filename', meta['hostname']) or meta['hostname'] title, fileext = splitext(title) else: logging.error('Unable to retrieve URL: {}'.format(url)) return None return delimit_slug(title, ' ')
[ "def", "infer_url_title", "(", "url", ")", ":", "meta", "=", "get_url_filemeta", "(", "url", ")", "title", "=", "''", "if", "meta", ":", "if", "meta", ".", "get", "(", "'hostname'", ",", "url", ")", "==", "'drive.google.com'", ":", "title", "=", "get_url_title", "(", "url", ")", "else", ":", "title", "=", "meta", ".", "get", "(", "'filename'", ",", "meta", "[", "'hostname'", "]", ")", "or", "meta", "[", "'hostname'", "]", "title", ",", "fileext", "=", "splitext", "(", "title", ")", "else", ":", "logging", ".", "error", "(", "'Unable to retrieve URL: {}'", ".", "format", "(", "url", ")", ")", "return", "None", "return", "delimit_slug", "(", "title", ",", "' '", ")" ]
Guess what the page title is going to be from the path and FQDN in the URL >>> infer_url_title('https://ai.googleblog.com/2018/09/the-what-if-tool-code-free-probing-of.html') 'the what if tool code free probing of'
[ "Guess", "what", "the", "page", "title", "is", "going", "to", "be", "from", "the", "path", "and", "FQDN", "in", "the", "URL" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book_parser.py#L245-L262
231,485
totalgood/nlpia
src/nlpia/book_parser.py
translate_book
def translate_book(translators=(HyperlinkStyleCorrector().translate, translate_line_footnotes), book_dir=BOOK_PATH, dest=None, include_tags=None, ext='.nlpiabak', skip_untitled=True): """ Fix any style corrections listed in `translate` list of translation functions >>> len(translate_book(book_dir=BOOK_PATH, dest='cleaned_hyperlinks')) 3 >>> rm_rf(os.path.join(BOOK_PATH, 'cleaned_hyperlinks')) """ if callable(translators) or not hasattr(translators, '__len__'): translators = (translators,) sections = get_tagged_sections(book_dir=book_dir, include_tags=include_tags) file_line_maps = [] for fileid, (filepath, tagged_lines) in enumerate(sections): logger.info('filepath={}'.format(filepath)) destpath = filepath if not dest: copyfile(filepath, filepath + '.' + ext.lstrip('.')) elif os.path.sep in dest: destpath = os.path.join(dest, os.path.basename(filepath)) else: destpath = os.path.join(os.path.dirname(filepath), dest, os.path.basename(filepath)) ensure_dir_exists(os.path.dirname(destpath)) with open(destpath, 'w') as fout: logger.info('destpath={}'.format(destpath)) for lineno, (tag, line) in enumerate(tagged_lines): if (include_tags is None or tag in include_tags or any((tag.startswith(t) for t in include_tags))): for translate in translators: new_line = translate(line) # TODO: be smarter about writing to files in-place if line != new_line: file_line_maps.append((fileid, lineno, filepath, destpath, line, new_line)) line = new_line fout.write(line) return file_line_maps
python
def translate_book(translators=(HyperlinkStyleCorrector().translate, translate_line_footnotes), book_dir=BOOK_PATH, dest=None, include_tags=None, ext='.nlpiabak', skip_untitled=True): """ Fix any style corrections listed in `translate` list of translation functions >>> len(translate_book(book_dir=BOOK_PATH, dest='cleaned_hyperlinks')) 3 >>> rm_rf(os.path.join(BOOK_PATH, 'cleaned_hyperlinks')) """ if callable(translators) or not hasattr(translators, '__len__'): translators = (translators,) sections = get_tagged_sections(book_dir=book_dir, include_tags=include_tags) file_line_maps = [] for fileid, (filepath, tagged_lines) in enumerate(sections): logger.info('filepath={}'.format(filepath)) destpath = filepath if not dest: copyfile(filepath, filepath + '.' + ext.lstrip('.')) elif os.path.sep in dest: destpath = os.path.join(dest, os.path.basename(filepath)) else: destpath = os.path.join(os.path.dirname(filepath), dest, os.path.basename(filepath)) ensure_dir_exists(os.path.dirname(destpath)) with open(destpath, 'w') as fout: logger.info('destpath={}'.format(destpath)) for lineno, (tag, line) in enumerate(tagged_lines): if (include_tags is None or tag in include_tags or any((tag.startswith(t) for t in include_tags))): for translate in translators: new_line = translate(line) # TODO: be smarter about writing to files in-place if line != new_line: file_line_maps.append((fileid, lineno, filepath, destpath, line, new_line)) line = new_line fout.write(line) return file_line_maps
[ "def", "translate_book", "(", "translators", "=", "(", "HyperlinkStyleCorrector", "(", ")", ".", "translate", ",", "translate_line_footnotes", ")", ",", "book_dir", "=", "BOOK_PATH", ",", "dest", "=", "None", ",", "include_tags", "=", "None", ",", "ext", "=", "'.nlpiabak'", ",", "skip_untitled", "=", "True", ")", ":", "if", "callable", "(", "translators", ")", "or", "not", "hasattr", "(", "translators", ",", "'__len__'", ")", ":", "translators", "=", "(", "translators", ",", ")", "sections", "=", "get_tagged_sections", "(", "book_dir", "=", "book_dir", ",", "include_tags", "=", "include_tags", ")", "file_line_maps", "=", "[", "]", "for", "fileid", ",", "(", "filepath", ",", "tagged_lines", ")", "in", "enumerate", "(", "sections", ")", ":", "logger", ".", "info", "(", "'filepath={}'", ".", "format", "(", "filepath", ")", ")", "destpath", "=", "filepath", "if", "not", "dest", ":", "copyfile", "(", "filepath", ",", "filepath", "+", "'.'", "+", "ext", ".", "lstrip", "(", "'.'", ")", ")", "elif", "os", ".", "path", ".", "sep", "in", "dest", ":", "destpath", "=", "os", ".", "path", ".", "join", "(", "dest", ",", "os", ".", "path", ".", "basename", "(", "filepath", ")", ")", "else", ":", "destpath", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "filepath", ")", ",", "dest", ",", "os", ".", "path", ".", "basename", "(", "filepath", ")", ")", "ensure_dir_exists", "(", "os", ".", "path", ".", "dirname", "(", "destpath", ")", ")", "with", "open", "(", "destpath", ",", "'w'", ")", "as", "fout", ":", "logger", ".", "info", "(", "'destpath={}'", ".", "format", "(", "destpath", ")", ")", "for", "lineno", ",", "(", "tag", ",", "line", ")", "in", "enumerate", "(", "tagged_lines", ")", ":", "if", "(", "include_tags", "is", "None", "or", "tag", "in", "include_tags", "or", "any", "(", "(", "tag", ".", "startswith", "(", "t", ")", "for", "t", "in", "include_tags", ")", ")", ")", ":", "for", "translate", "in", "translators", ":", "new_line", "=", "translate", "(", "line", ")", "# TODO: be smarter about writing to files in-place", "if", "line", "!=", "new_line", ":", "file_line_maps", ".", "append", "(", "(", "fileid", ",", "lineno", ",", "filepath", ",", "destpath", ",", "line", ",", "new_line", ")", ")", "line", "=", "new_line", "fout", ".", "write", "(", "line", ")", "return", "file_line_maps" ]
Fix any style corrections listed in `translate` list of translation functions >>> len(translate_book(book_dir=BOOK_PATH, dest='cleaned_hyperlinks')) 3 >>> rm_rf(os.path.join(BOOK_PATH, 'cleaned_hyperlinks'))
[ "Fix", "any", "style", "corrections", "listed", "in", "translate", "list", "of", "translation", "functions" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book_parser.py#L335-L371
231,486
totalgood/nlpia
src/nlpia/book_parser.py
filter_lines
def filter_lines(input_file, output_file, translate=lambda line: line): """ Translate all the lines of a single file """ filepath, lines = get_lines([input_file])[0] return filepath, [(tag, translate(line=line, tag=tag)) for (tag, line) in lines]
python
def filter_lines(input_file, output_file, translate=lambda line: line): """ Translate all the lines of a single file """ filepath, lines = get_lines([input_file])[0] return filepath, [(tag, translate(line=line, tag=tag)) for (tag, line) in lines]
[ "def", "filter_lines", "(", "input_file", ",", "output_file", ",", "translate", "=", "lambda", "line", ":", "line", ")", ":", "filepath", ",", "lines", "=", "get_lines", "(", "[", "input_file", "]", ")", "[", "0", "]", "return", "filepath", ",", "[", "(", "tag", ",", "translate", "(", "line", "=", "line", ",", "tag", "=", "tag", ")", ")", "for", "(", "tag", ",", "line", ")", "in", "lines", "]" ]
Translate all the lines of a single file
[ "Translate", "all", "the", "lines", "of", "a", "single", "file" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book_parser.py#L407-L410
231,487
totalgood/nlpia
src/nlpia/book_parser.py
filter_tagged_lines
def filter_tagged_lines(tagged_lines, include_tags=None, exclude_tags=None): r""" Return iterable of tagged lines where the tags all start with one of the include_tags prefixes >>> filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')]) <generator object filter_tagged_lines at ...> >>> list(filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')], ... include_tags='natural')) [('natural', 'Hello.')] """ include_tags = (include_tags,) if isinstance(include_tags, str) else include_tags exclude_tags = (exclude_tags,) if isinstance(exclude_tags, str) else exclude_tags for tagged_line in tagged_lines: if (include_tags is None or tagged_line[0] in include_tags or any((tagged_line[0].startswith(t) for t in include_tags))): if exclude_tags is None or not any((tagged_line[0].startswith(t) for t in exclude_tags)): yield tagged_line else: logger.debug('skipping tag {} because it starts with one of the exclude_tags={}'.format( tagged_line[0], exclude_tags)) else: logger.debug('skipping tag {} because not in {}'.format(tagged_line[0], include_tags))
python
def filter_tagged_lines(tagged_lines, include_tags=None, exclude_tags=None): r""" Return iterable of tagged lines where the tags all start with one of the include_tags prefixes >>> filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')]) <generator object filter_tagged_lines at ...> >>> list(filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')], ... include_tags='natural')) [('natural', 'Hello.')] """ include_tags = (include_tags,) if isinstance(include_tags, str) else include_tags exclude_tags = (exclude_tags,) if isinstance(exclude_tags, str) else exclude_tags for tagged_line in tagged_lines: if (include_tags is None or tagged_line[0] in include_tags or any((tagged_line[0].startswith(t) for t in include_tags))): if exclude_tags is None or not any((tagged_line[0].startswith(t) for t in exclude_tags)): yield tagged_line else: logger.debug('skipping tag {} because it starts with one of the exclude_tags={}'.format( tagged_line[0], exclude_tags)) else: logger.debug('skipping tag {} because not in {}'.format(tagged_line[0], include_tags))
[ "def", "filter_tagged_lines", "(", "tagged_lines", ",", "include_tags", "=", "None", ",", "exclude_tags", "=", "None", ")", ":", "include_tags", "=", "(", "include_tags", ",", ")", "if", "isinstance", "(", "include_tags", ",", "str", ")", "else", "include_tags", "exclude_tags", "=", "(", "exclude_tags", ",", ")", "if", "isinstance", "(", "exclude_tags", ",", "str", ")", "else", "exclude_tags", "for", "tagged_line", "in", "tagged_lines", ":", "if", "(", "include_tags", "is", "None", "or", "tagged_line", "[", "0", "]", "in", "include_tags", "or", "any", "(", "(", "tagged_line", "[", "0", "]", ".", "startswith", "(", "t", ")", "for", "t", "in", "include_tags", ")", ")", ")", ":", "if", "exclude_tags", "is", "None", "or", "not", "any", "(", "(", "tagged_line", "[", "0", "]", ".", "startswith", "(", "t", ")", "for", "t", "in", "exclude_tags", ")", ")", ":", "yield", "tagged_line", "else", ":", "logger", ".", "debug", "(", "'skipping tag {} because it starts with one of the exclude_tags={}'", ".", "format", "(", "tagged_line", "[", "0", "]", ",", "exclude_tags", ")", ")", "else", ":", "logger", ".", "debug", "(", "'skipping tag {} because not in {}'", ".", "format", "(", "tagged_line", "[", "0", "]", ",", "include_tags", ")", ")" ]
r""" Return iterable of tagged lines where the tags all start with one of the include_tags prefixes >>> filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')]) <generator object filter_tagged_lines at ...> >>> list(filter_tagged_lines([('natural', "Hello."), ('code', '[source,python]'), ('code', '>>> hello()')], ... include_tags='natural')) [('natural', 'Hello.')]
[ "r", "Return", "iterable", "of", "tagged", "lines", "where", "the", "tags", "all", "start", "with", "one", "of", "the", "include_tags", "prefixes" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book_parser.py#L413-L434
231,488
totalgood/nlpia
src/nlpia/book/examples/ch04_catdog_lsa_sorted.py
accuracy_study
def accuracy_study(tdm=None, u=None, s=None, vt=None, verbosity=0, **kwargs): """ Reconstruct the term-document matrix and measure error as SVD terms are truncated """ smat = np.zeros((len(u), len(vt))) np.fill_diagonal(smat, s) smat = pd.DataFrame(smat, columns=vt.index, index=u.index) if verbosity: print() print('Sigma:') print(smat.round(2)) print() print('Sigma without zeroing any dim:') print(np.diag(smat.round(2))) tdm_prime = u.values.dot(smat.values).dot(vt.values) if verbosity: print() print('Reconstructed Term-Document Matrix') print(tdm_prime.round(2)) err = [np.sqrt(((tdm_prime - tdm).values.flatten() ** 2).sum() / np.product(tdm.shape))] if verbosity: print() print('Error without reducing dimensions:') print(err[-1]) # 2.3481474529927113e-15 smat2 = smat.copy() for numdim in range(len(s) - 1, 0, -1): smat2.iloc[numdim, numdim] = 0 if verbosity: print('Sigma after zeroing out dim {}'.format(numdim)) print(np.diag(smat2.round(2))) # d0 d1 d2 d3 d4 d5 # ship 2.16 0.00 0.0 0.0 0.0 0.0 # boat 0.00 1.59 0.0 0.0 0.0 0.0 # ocean 0.00 0.00 0.0 0.0 0.0 0.0 # voyage 0.00 0.00 0.0 0.0 0.0 0.0 # trip 0.00 0.00 0.0 0.0 0.0 0.0 tdm_prime2 = u.values.dot(smat2.values).dot(vt.values) err += [np.sqrt(((tdm_prime2 - tdm).values.flatten() ** 2).sum() / np.product(tdm.shape))] if verbosity: print('Error after zeroing out dim {}'.format(numdim)) print(err[-1]) return err
python
def accuracy_study(tdm=None, u=None, s=None, vt=None, verbosity=0, **kwargs): """ Reconstruct the term-document matrix and measure error as SVD terms are truncated """ smat = np.zeros((len(u), len(vt))) np.fill_diagonal(smat, s) smat = pd.DataFrame(smat, columns=vt.index, index=u.index) if verbosity: print() print('Sigma:') print(smat.round(2)) print() print('Sigma without zeroing any dim:') print(np.diag(smat.round(2))) tdm_prime = u.values.dot(smat.values).dot(vt.values) if verbosity: print() print('Reconstructed Term-Document Matrix') print(tdm_prime.round(2)) err = [np.sqrt(((tdm_prime - tdm).values.flatten() ** 2).sum() / np.product(tdm.shape))] if verbosity: print() print('Error without reducing dimensions:') print(err[-1]) # 2.3481474529927113e-15 smat2 = smat.copy() for numdim in range(len(s) - 1, 0, -1): smat2.iloc[numdim, numdim] = 0 if verbosity: print('Sigma after zeroing out dim {}'.format(numdim)) print(np.diag(smat2.round(2))) # d0 d1 d2 d3 d4 d5 # ship 2.16 0.00 0.0 0.0 0.0 0.0 # boat 0.00 1.59 0.0 0.0 0.0 0.0 # ocean 0.00 0.00 0.0 0.0 0.0 0.0 # voyage 0.00 0.00 0.0 0.0 0.0 0.0 # trip 0.00 0.00 0.0 0.0 0.0 0.0 tdm_prime2 = u.values.dot(smat2.values).dot(vt.values) err += [np.sqrt(((tdm_prime2 - tdm).values.flatten() ** 2).sum() / np.product(tdm.shape))] if verbosity: print('Error after zeroing out dim {}'.format(numdim)) print(err[-1]) return err
[ "def", "accuracy_study", "(", "tdm", "=", "None", ",", "u", "=", "None", ",", "s", "=", "None", ",", "vt", "=", "None", ",", "verbosity", "=", "0", ",", "*", "*", "kwargs", ")", ":", "smat", "=", "np", ".", "zeros", "(", "(", "len", "(", "u", ")", ",", "len", "(", "vt", ")", ")", ")", "np", ".", "fill_diagonal", "(", "smat", ",", "s", ")", "smat", "=", "pd", ".", "DataFrame", "(", "smat", ",", "columns", "=", "vt", ".", "index", ",", "index", "=", "u", ".", "index", ")", "if", "verbosity", ":", "print", "(", ")", "print", "(", "'Sigma:'", ")", "print", "(", "smat", ".", "round", "(", "2", ")", ")", "print", "(", ")", "print", "(", "'Sigma without zeroing any dim:'", ")", "print", "(", "np", ".", "diag", "(", "smat", ".", "round", "(", "2", ")", ")", ")", "tdm_prime", "=", "u", ".", "values", ".", "dot", "(", "smat", ".", "values", ")", ".", "dot", "(", "vt", ".", "values", ")", "if", "verbosity", ":", "print", "(", ")", "print", "(", "'Reconstructed Term-Document Matrix'", ")", "print", "(", "tdm_prime", ".", "round", "(", "2", ")", ")", "err", "=", "[", "np", ".", "sqrt", "(", "(", "(", "tdm_prime", "-", "tdm", ")", ".", "values", ".", "flatten", "(", ")", "**", "2", ")", ".", "sum", "(", ")", "/", "np", ".", "product", "(", "tdm", ".", "shape", ")", ")", "]", "if", "verbosity", ":", "print", "(", ")", "print", "(", "'Error without reducing dimensions:'", ")", "print", "(", "err", "[", "-", "1", "]", ")", "# 2.3481474529927113e-15", "smat2", "=", "smat", ".", "copy", "(", ")", "for", "numdim", "in", "range", "(", "len", "(", "s", ")", "-", "1", ",", "0", ",", "-", "1", ")", ":", "smat2", ".", "iloc", "[", "numdim", ",", "numdim", "]", "=", "0", "if", "verbosity", ":", "print", "(", "'Sigma after zeroing out dim {}'", ".", "format", "(", "numdim", ")", ")", "print", "(", "np", ".", "diag", "(", "smat2", ".", "round", "(", "2", ")", ")", ")", "# d0 d1 d2 d3 d4 d5", "# ship 2.16 0.00 0.0 0.0 0.0 0.0", "# boat 0.00 1.59 0.0 0.0 0.0 0.0", "# ocean 0.00 0.00 0.0 0.0 0.0 0.0", "# voyage 0.00 0.00 0.0 0.0 0.0 0.0", "# trip 0.00 0.00 0.0 0.0 0.0 0.0", "tdm_prime2", "=", "u", ".", "values", ".", "dot", "(", "smat2", ".", "values", ")", ".", "dot", "(", "vt", ".", "values", ")", "err", "+=", "[", "np", ".", "sqrt", "(", "(", "(", "tdm_prime2", "-", "tdm", ")", ".", "values", ".", "flatten", "(", ")", "**", "2", ")", ".", "sum", "(", ")", "/", "np", ".", "product", "(", "tdm", ".", "shape", ")", ")", "]", "if", "verbosity", ":", "print", "(", "'Error after zeroing out dim {}'", ".", "format", "(", "numdim", ")", ")", "print", "(", "err", "[", "-", "1", "]", ")", "return", "err" ]
Reconstruct the term-document matrix and measure error as SVD terms are truncated
[ "Reconstruct", "the", "term", "-", "document", "matrix", "and", "measure", "error", "as", "SVD", "terms", "are", "truncated" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/book/examples/ch04_catdog_lsa_sorted.py#L143-L187
231,489
totalgood/nlpia
src/nlpia/anki.py
get_anki_phrases
def get_anki_phrases(lang='english', limit=None): """ Retrieve as many anki paired-statement corpora as you can for the requested language If `ankis` (requested languages) is more than one, then get the english texts associated with those languages. TODO: improve modularity: def function that takes a single language and call it recursively if necessary >>> get_anki_phrases('afr')[:2] ["'n Groen piesang is nie ryp genoeg om te eet nie.", "'n Hond het agter die kat aan gehardloop."] """ lang = lang.strip().lower()[:3] lang = LANG2ANKI[lang[:2]] if lang not in ANKI_LANGUAGES else lang if lang[:2] == 'en': return get_anki_phrases_english(limit=limit) return sorted(get_data(lang).iloc[:, -1].str.strip().values)
python
def get_anki_phrases(lang='english', limit=None): """ Retrieve as many anki paired-statement corpora as you can for the requested language If `ankis` (requested languages) is more than one, then get the english texts associated with those languages. TODO: improve modularity: def function that takes a single language and call it recursively if necessary >>> get_anki_phrases('afr')[:2] ["'n Groen piesang is nie ryp genoeg om te eet nie.", "'n Hond het agter die kat aan gehardloop."] """ lang = lang.strip().lower()[:3] lang = LANG2ANKI[lang[:2]] if lang not in ANKI_LANGUAGES else lang if lang[:2] == 'en': return get_anki_phrases_english(limit=limit) return sorted(get_data(lang).iloc[:, -1].str.strip().values)
[ "def", "get_anki_phrases", "(", "lang", "=", "'english'", ",", "limit", "=", "None", ")", ":", "lang", "=", "lang", ".", "strip", "(", ")", ".", "lower", "(", ")", "[", ":", "3", "]", "lang", "=", "LANG2ANKI", "[", "lang", "[", ":", "2", "]", "]", "if", "lang", "not", "in", "ANKI_LANGUAGES", "else", "lang", "if", "lang", "[", ":", "2", "]", "==", "'en'", ":", "return", "get_anki_phrases_english", "(", "limit", "=", "limit", ")", "return", "sorted", "(", "get_data", "(", "lang", ")", ".", "iloc", "[", ":", ",", "-", "1", "]", ".", "str", ".", "strip", "(", ")", ".", "values", ")" ]
Retrieve as many anki paired-statement corpora as you can for the requested language If `ankis` (requested languages) is more than one, then get the english texts associated with those languages. TODO: improve modularity: def function that takes a single language and call it recursively if necessary >>> get_anki_phrases('afr')[:2] ["'n Groen piesang is nie ryp genoeg om te eet nie.", "'n Hond het agter die kat aan gehardloop."]
[ "Retrieve", "as", "many", "anki", "paired", "-", "statement", "corpora", "as", "you", "can", "for", "the", "requested", "language" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/anki.py#L16-L30
231,490
totalgood/nlpia
src/nlpia/anki.py
get_anki_phrases_english
def get_anki_phrases_english(limit=None): """ Return all the English phrases in the Anki translation flashcards >>> len(get_anki_phrases_english(limit=100)) > 700 True """ texts = set() for lang in ANKI_LANGUAGES: df = get_data(lang) phrases = df.eng.str.strip().values texts = texts.union(set(phrases)) if limit and len(texts) >= limit: break return sorted(texts)
python
def get_anki_phrases_english(limit=None): """ Return all the English phrases in the Anki translation flashcards >>> len(get_anki_phrases_english(limit=100)) > 700 True """ texts = set() for lang in ANKI_LANGUAGES: df = get_data(lang) phrases = df.eng.str.strip().values texts = texts.union(set(phrases)) if limit and len(texts) >= limit: break return sorted(texts)
[ "def", "get_anki_phrases_english", "(", "limit", "=", "None", ")", ":", "texts", "=", "set", "(", ")", "for", "lang", "in", "ANKI_LANGUAGES", ":", "df", "=", "get_data", "(", "lang", ")", "phrases", "=", "df", ".", "eng", ".", "str", ".", "strip", "(", ")", ".", "values", "texts", "=", "texts", ".", "union", "(", "set", "(", "phrases", ")", ")", "if", "limit", "and", "len", "(", "texts", ")", ">=", "limit", ":", "break", "return", "sorted", "(", "texts", ")" ]
Return all the English phrases in the Anki translation flashcards >>> len(get_anki_phrases_english(limit=100)) > 700 True
[ "Return", "all", "the", "English", "phrases", "in", "the", "Anki", "translation", "flashcards" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/anki.py#L33-L46
231,491
totalgood/nlpia
src/nlpia/anki.py
get_vocab
def get_vocab(docs): """ Build a DataFrame containing all the words in the docs provided along with their POS tags etc >>> doc = nlp("Hey Mr. Tangerine Man!") <BLANKLINE> ... >>> get_vocab([doc]) word pos tag dep ent_type ent_iob sentiment 0 ! PUNCT . punct O 0.0 1 Hey INTJ UH intj O 0.0 2 Man NOUN NN ROOT PERSON I 0.0 3 Mr. PROPN NNP compound O 0.0 4 Tangerine PROPN NNP compound PERSON B 0.0 """ if isinstance(docs, spacy.tokens.doc.Doc): return get_vocab([docs]) vocab = set() for doc in tqdm(docs): for tok in doc: vocab.add((tok.text, tok.pos_, tok.tag_, tok.dep_, tok.ent_type_, tok.ent_iob_, tok.sentiment)) # TODO: add ent type info and other flags, e.g. like_url, like_email, etc return pd.DataFrame(sorted(vocab), columns='word pos tag dep ent_type ent_iob sentiment'.split())
python
def get_vocab(docs): """ Build a DataFrame containing all the words in the docs provided along with their POS tags etc >>> doc = nlp("Hey Mr. Tangerine Man!") <BLANKLINE> ... >>> get_vocab([doc]) word pos tag dep ent_type ent_iob sentiment 0 ! PUNCT . punct O 0.0 1 Hey INTJ UH intj O 0.0 2 Man NOUN NN ROOT PERSON I 0.0 3 Mr. PROPN NNP compound O 0.0 4 Tangerine PROPN NNP compound PERSON B 0.0 """ if isinstance(docs, spacy.tokens.doc.Doc): return get_vocab([docs]) vocab = set() for doc in tqdm(docs): for tok in doc: vocab.add((tok.text, tok.pos_, tok.tag_, tok.dep_, tok.ent_type_, tok.ent_iob_, tok.sentiment)) # TODO: add ent type info and other flags, e.g. like_url, like_email, etc return pd.DataFrame(sorted(vocab), columns='word pos tag dep ent_type ent_iob sentiment'.split())
[ "def", "get_vocab", "(", "docs", ")", ":", "if", "isinstance", "(", "docs", ",", "spacy", ".", "tokens", ".", "doc", ".", "Doc", ")", ":", "return", "get_vocab", "(", "[", "docs", "]", ")", "vocab", "=", "set", "(", ")", "for", "doc", "in", "tqdm", "(", "docs", ")", ":", "for", "tok", "in", "doc", ":", "vocab", ".", "add", "(", "(", "tok", ".", "text", ",", "tok", ".", "pos_", ",", "tok", ".", "tag_", ",", "tok", ".", "dep_", ",", "tok", ".", "ent_type_", ",", "tok", ".", "ent_iob_", ",", "tok", ".", "sentiment", ")", ")", "# TODO: add ent type info and other flags, e.g. like_url, like_email, etc", "return", "pd", ".", "DataFrame", "(", "sorted", "(", "vocab", ")", ",", "columns", "=", "'word pos tag dep ent_type ent_iob sentiment'", ".", "split", "(", ")", ")" ]
Build a DataFrame containing all the words in the docs provided along with their POS tags etc >>> doc = nlp("Hey Mr. Tangerine Man!") <BLANKLINE> ... >>> get_vocab([doc]) word pos tag dep ent_type ent_iob sentiment 0 ! PUNCT . punct O 0.0 1 Hey INTJ UH intj O 0.0 2 Man NOUN NN ROOT PERSON I 0.0 3 Mr. PROPN NNP compound O 0.0 4 Tangerine PROPN NNP compound PERSON B 0.0
[ "Build", "a", "DataFrame", "containing", "all", "the", "words", "in", "the", "docs", "provided", "along", "with", "their", "POS", "tags", "etc" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/anki.py#L49-L70
231,492
totalgood/nlpia
src/nlpia/anki.py
get_word_vectors
def get_word_vectors(vocab): """ Create a word2vec embedding matrix for all the words in the vocab """ wv = get_data('word2vec') vectors = np.array(len(vocab), len(wv['the'])) for i, tok in enumerate(vocab): word = tok[0] variations = (word, word.lower(), word.lower()[:-1]) for w in variations: if w in wv: vectors[i, :] = wv[w] if not np.sum(np.abs(vectors[i])): logger.warning('Unable to find {}, {}, or {} in word2vec.'.format(*variations)) return vectors
python
def get_word_vectors(vocab): """ Create a word2vec embedding matrix for all the words in the vocab """ wv = get_data('word2vec') vectors = np.array(len(vocab), len(wv['the'])) for i, tok in enumerate(vocab): word = tok[0] variations = (word, word.lower(), word.lower()[:-1]) for w in variations: if w in wv: vectors[i, :] = wv[w] if not np.sum(np.abs(vectors[i])): logger.warning('Unable to find {}, {}, or {} in word2vec.'.format(*variations)) return vectors
[ "def", "get_word_vectors", "(", "vocab", ")", ":", "wv", "=", "get_data", "(", "'word2vec'", ")", "vectors", "=", "np", ".", "array", "(", "len", "(", "vocab", ")", ",", "len", "(", "wv", "[", "'the'", "]", ")", ")", "for", "i", ",", "tok", "in", "enumerate", "(", "vocab", ")", ":", "word", "=", "tok", "[", "0", "]", "variations", "=", "(", "word", ",", "word", ".", "lower", "(", ")", ",", "word", ".", "lower", "(", ")", "[", ":", "-", "1", "]", ")", "for", "w", "in", "variations", ":", "if", "w", "in", "wv", ":", "vectors", "[", "i", ",", ":", "]", "=", "wv", "[", "w", "]", "if", "not", "np", ".", "sum", "(", "np", ".", "abs", "(", "vectors", "[", "i", "]", ")", ")", ":", "logger", ".", "warning", "(", "'Unable to find {}, {}, or {} in word2vec.'", ".", "format", "(", "*", "variations", ")", ")", "return", "vectors" ]
Create a word2vec embedding matrix for all the words in the vocab
[ "Create", "a", "word2vec", "embedding", "matrix", "for", "all", "the", "words", "in", "the", "vocab" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/anki.py#L73-L85
231,493
totalgood/nlpia
src/nlpia/anki.py
get_anki_vocab
def get_anki_vocab(lang=['eng'], limit=None, filename='anki_en_vocabulary.csv'): """ Get all the vocab words+tags+wordvectors for the tokens in the Anki translation corpus Returns a DataFrame of with columns = word, pos, tag, dep, ent, ent_iob, sentiment, vectors """ texts = get_anki_phrases(lang=lang, limit=limit) docs = nlp(texts, lang=lang) vocab = get_vocab(docs) vocab['vector'] = get_word_vectors(vocab) # TODO: turn this into a KeyedVectors object if filename: vocab.to_csv(os.path.join(BIGDATA_PATH, filename)) return vocab
python
def get_anki_vocab(lang=['eng'], limit=None, filename='anki_en_vocabulary.csv'): """ Get all the vocab words+tags+wordvectors for the tokens in the Anki translation corpus Returns a DataFrame of with columns = word, pos, tag, dep, ent, ent_iob, sentiment, vectors """ texts = get_anki_phrases(lang=lang, limit=limit) docs = nlp(texts, lang=lang) vocab = get_vocab(docs) vocab['vector'] = get_word_vectors(vocab) # TODO: turn this into a KeyedVectors object if filename: vocab.to_csv(os.path.join(BIGDATA_PATH, filename)) return vocab
[ "def", "get_anki_vocab", "(", "lang", "=", "[", "'eng'", "]", ",", "limit", "=", "None", ",", "filename", "=", "'anki_en_vocabulary.csv'", ")", ":", "texts", "=", "get_anki_phrases", "(", "lang", "=", "lang", ",", "limit", "=", "limit", ")", "docs", "=", "nlp", "(", "texts", ",", "lang", "=", "lang", ")", "vocab", "=", "get_vocab", "(", "docs", ")", "vocab", "[", "'vector'", "]", "=", "get_word_vectors", "(", "vocab", ")", "# TODO: turn this into a KeyedVectors object", "if", "filename", ":", "vocab", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "BIGDATA_PATH", ",", "filename", ")", ")", "return", "vocab" ]
Get all the vocab words+tags+wordvectors for the tokens in the Anki translation corpus Returns a DataFrame of with columns = word, pos, tag, dep, ent, ent_iob, sentiment, vectors
[ "Get", "all", "the", "vocab", "words", "+", "tags", "+", "wordvectors", "for", "the", "tokens", "in", "the", "Anki", "translation", "corpus" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/anki.py#L88-L99
231,494
totalgood/nlpia
src/nlpia/scripts/lsa_tweets.py
lsa_twitter
def lsa_twitter(cased_tokens): """ Latent Sentiment Analyis on random sampling of twitter search results for words listed in cased_tokens """ # Only 5 of these tokens are saved for a no_below=2 filter: # PyCons NLPS #PyCon2016 #NaturalLanguageProcessing #naturallanguageprocessing if cased_tokens is None: cased_tokens = ('PyConOpenSpaces PyCon PyCon2017 PyCon2018 PyCon2016 PyCon2015 OpenSpace PyconTutorial ' + 'NLP NaturalLanguageProcessing NLPInAction NaturalLanguageProcessingInAction NLPIA Twote Twip' ).split() cased_tokens += [s + 's' for s in cased_tokens] cased_tokens += 'TotalGood TotalGoods HobsonLane Hob Hobs TotalGood.com ' \ 'www.TotalGood.com http://www.TotalGood.com https://www.TotalGood.com'.split() allcase_tokens = cased_tokens + [s.lower() for s in cased_tokens] allcase_tokens += [s.title() for s in cased_tokens] allcase_tokens += [s.upper() for s in cased_tokens] KEEP_TOKENS = allcase_tokens + ['#' + s for s in allcase_tokens] # takes 15 minutes and 10GB of RAM for 500k tweets if you keep all 20M unique tokens/names URLs vocab_path = os.path.join(BIGDATA_PATH, 'vocab939370.pkl') if os.path.isfile(vocab_path): print('Loading vocab: {} ...'.format(vocab_path)) vocab = Dictionary.load(vocab_path) print(' len(vocab) loaded: {}'.format(len(vocab.dfs))) else: tweets_path = os.path.join(BIGDATA_PATH, 'tweets.csv.gz') print('Loading tweets: {} ...'.format(tweets_path)) tweets = read_csv(tweets_path) tweets = pd.np.array(tweets.text.str.split()) with gzip.open(os.path.join(BIGDATA_PATH, 'tweets.txt.gz'), 'w') as f: for tokens in tweets: f.write((' '.join(tokens) + '\n').encode('utf-8')) # tweets['text'] = tweets.text.apply(lambda s: eval(s).decode('utf-8')) # tweets['user'] = tweets.user.apply(lambda s: eval(s).decode('utf-8')) # tweets.to_csv('tweets.csv.gz', compression='gzip') print('Computing vocab from {} tweets...'.format(len(tweets))) vocab = Dictionary(tweets, no_below=NO_BELOW, no_above=NO_ABOVE, keep_tokens=set(KEEP_TOKENS)) vocab.filter_extremes(no_below=NO_BELOW, no_above=NO_ABOVE, keep_n=KEEP_N, keep_tokens=set(KEEP_TOKENS)) print(' len(vocab) after filtering: {}'.format(len(vocab.dfs))) # no time at all, just a bookeeping step, doesn't actually compute anything tfidf = TfidfModel(id2word=vocab, dictionary=vocab) tfidf.save(os.path.join(BIGDATA_PATH, 'tfidf{}.pkl'.format(len(vocab.dfs)))) tweets = [vocab.doc2bow(tw) for tw in tweets] json.dump(tweets, gzip.open(os.path.join(BIGDATA_PATH, 'tweet_bows.json.gz'), 'w')) gc.collect() # LSA is more useful name than LSA lsa = LsiModel(tfidf[tweets], num_topics=200, id2word=vocab, extra_samples=100, power_iters=2) return lsa
python
def lsa_twitter(cased_tokens): """ Latent Sentiment Analyis on random sampling of twitter search results for words listed in cased_tokens """ # Only 5 of these tokens are saved for a no_below=2 filter: # PyCons NLPS #PyCon2016 #NaturalLanguageProcessing #naturallanguageprocessing if cased_tokens is None: cased_tokens = ('PyConOpenSpaces PyCon PyCon2017 PyCon2018 PyCon2016 PyCon2015 OpenSpace PyconTutorial ' + 'NLP NaturalLanguageProcessing NLPInAction NaturalLanguageProcessingInAction NLPIA Twote Twip' ).split() cased_tokens += [s + 's' for s in cased_tokens] cased_tokens += 'TotalGood TotalGoods HobsonLane Hob Hobs TotalGood.com ' \ 'www.TotalGood.com http://www.TotalGood.com https://www.TotalGood.com'.split() allcase_tokens = cased_tokens + [s.lower() for s in cased_tokens] allcase_tokens += [s.title() for s in cased_tokens] allcase_tokens += [s.upper() for s in cased_tokens] KEEP_TOKENS = allcase_tokens + ['#' + s for s in allcase_tokens] # takes 15 minutes and 10GB of RAM for 500k tweets if you keep all 20M unique tokens/names URLs vocab_path = os.path.join(BIGDATA_PATH, 'vocab939370.pkl') if os.path.isfile(vocab_path): print('Loading vocab: {} ...'.format(vocab_path)) vocab = Dictionary.load(vocab_path) print(' len(vocab) loaded: {}'.format(len(vocab.dfs))) else: tweets_path = os.path.join(BIGDATA_PATH, 'tweets.csv.gz') print('Loading tweets: {} ...'.format(tweets_path)) tweets = read_csv(tweets_path) tweets = pd.np.array(tweets.text.str.split()) with gzip.open(os.path.join(BIGDATA_PATH, 'tweets.txt.gz'), 'w') as f: for tokens in tweets: f.write((' '.join(tokens) + '\n').encode('utf-8')) # tweets['text'] = tweets.text.apply(lambda s: eval(s).decode('utf-8')) # tweets['user'] = tweets.user.apply(lambda s: eval(s).decode('utf-8')) # tweets.to_csv('tweets.csv.gz', compression='gzip') print('Computing vocab from {} tweets...'.format(len(tweets))) vocab = Dictionary(tweets, no_below=NO_BELOW, no_above=NO_ABOVE, keep_tokens=set(KEEP_TOKENS)) vocab.filter_extremes(no_below=NO_BELOW, no_above=NO_ABOVE, keep_n=KEEP_N, keep_tokens=set(KEEP_TOKENS)) print(' len(vocab) after filtering: {}'.format(len(vocab.dfs))) # no time at all, just a bookeeping step, doesn't actually compute anything tfidf = TfidfModel(id2word=vocab, dictionary=vocab) tfidf.save(os.path.join(BIGDATA_PATH, 'tfidf{}.pkl'.format(len(vocab.dfs)))) tweets = [vocab.doc2bow(tw) for tw in tweets] json.dump(tweets, gzip.open(os.path.join(BIGDATA_PATH, 'tweet_bows.json.gz'), 'w')) gc.collect() # LSA is more useful name than LSA lsa = LsiModel(tfidf[tweets], num_topics=200, id2word=vocab, extra_samples=100, power_iters=2) return lsa
[ "def", "lsa_twitter", "(", "cased_tokens", ")", ":", "# Only 5 of these tokens are saved for a no_below=2 filter:", "# PyCons NLPS #PyCon2016 #NaturalLanguageProcessing #naturallanguageprocessing", "if", "cased_tokens", "is", "None", ":", "cased_tokens", "=", "(", "'PyConOpenSpaces PyCon PyCon2017 PyCon2018 PyCon2016 PyCon2015 OpenSpace PyconTutorial '", "+", "'NLP NaturalLanguageProcessing NLPInAction NaturalLanguageProcessingInAction NLPIA Twote Twip'", ")", ".", "split", "(", ")", "cased_tokens", "+=", "[", "s", "+", "'s'", "for", "s", "in", "cased_tokens", "]", "cased_tokens", "+=", "'TotalGood TotalGoods HobsonLane Hob Hobs TotalGood.com '", "'www.TotalGood.com http://www.TotalGood.com https://www.TotalGood.com'", ".", "split", "(", ")", "allcase_tokens", "=", "cased_tokens", "+", "[", "s", ".", "lower", "(", ")", "for", "s", "in", "cased_tokens", "]", "allcase_tokens", "+=", "[", "s", ".", "title", "(", ")", "for", "s", "in", "cased_tokens", "]", "allcase_tokens", "+=", "[", "s", ".", "upper", "(", ")", "for", "s", "in", "cased_tokens", "]", "KEEP_TOKENS", "=", "allcase_tokens", "+", "[", "'#'", "+", "s", "for", "s", "in", "allcase_tokens", "]", "# takes 15 minutes and 10GB of RAM for 500k tweets if you keep all 20M unique tokens/names URLs", "vocab_path", "=", "os", ".", "path", ".", "join", "(", "BIGDATA_PATH", ",", "'vocab939370.pkl'", ")", "if", "os", ".", "path", ".", "isfile", "(", "vocab_path", ")", ":", "print", "(", "'Loading vocab: {} ...'", ".", "format", "(", "vocab_path", ")", ")", "vocab", "=", "Dictionary", ".", "load", "(", "vocab_path", ")", "print", "(", "' len(vocab) loaded: {}'", ".", "format", "(", "len", "(", "vocab", ".", "dfs", ")", ")", ")", "else", ":", "tweets_path", "=", "os", ".", "path", ".", "join", "(", "BIGDATA_PATH", ",", "'tweets.csv.gz'", ")", "print", "(", "'Loading tweets: {} ...'", ".", "format", "(", "tweets_path", ")", ")", "tweets", "=", "read_csv", "(", "tweets_path", ")", "tweets", "=", "pd", ".", "np", ".", "array", "(", "tweets", ".", "text", ".", "str", ".", "split", "(", ")", ")", "with", "gzip", ".", "open", "(", "os", ".", "path", ".", "join", "(", "BIGDATA_PATH", ",", "'tweets.txt.gz'", ")", ",", "'w'", ")", "as", "f", ":", "for", "tokens", "in", "tweets", ":", "f", ".", "write", "(", "(", "' '", ".", "join", "(", "tokens", ")", "+", "'\\n'", ")", ".", "encode", "(", "'utf-8'", ")", ")", "# tweets['text'] = tweets.text.apply(lambda s: eval(s).decode('utf-8'))", "# tweets['user'] = tweets.user.apply(lambda s: eval(s).decode('utf-8'))", "# tweets.to_csv('tweets.csv.gz', compression='gzip')", "print", "(", "'Computing vocab from {} tweets...'", ".", "format", "(", "len", "(", "tweets", ")", ")", ")", "vocab", "=", "Dictionary", "(", "tweets", ",", "no_below", "=", "NO_BELOW", ",", "no_above", "=", "NO_ABOVE", ",", "keep_tokens", "=", "set", "(", "KEEP_TOKENS", ")", ")", "vocab", ".", "filter_extremes", "(", "no_below", "=", "NO_BELOW", ",", "no_above", "=", "NO_ABOVE", ",", "keep_n", "=", "KEEP_N", ",", "keep_tokens", "=", "set", "(", "KEEP_TOKENS", ")", ")", "print", "(", "' len(vocab) after filtering: {}'", ".", "format", "(", "len", "(", "vocab", ".", "dfs", ")", ")", ")", "# no time at all, just a bookeeping step, doesn't actually compute anything", "tfidf", "=", "TfidfModel", "(", "id2word", "=", "vocab", ",", "dictionary", "=", "vocab", ")", "tfidf", ".", "save", "(", "os", ".", "path", ".", "join", "(", "BIGDATA_PATH", ",", "'tfidf{}.pkl'", ".", "format", "(", "len", "(", "vocab", ".", "dfs", ")", ")", ")", ")", "tweets", "=", "[", "vocab", ".", "doc2bow", "(", "tw", ")", "for", "tw", "in", "tweets", "]", "json", ".", "dump", "(", "tweets", ",", "gzip", ".", "open", "(", "os", ".", "path", ".", "join", "(", "BIGDATA_PATH", ",", "'tweet_bows.json.gz'", ")", ",", "'w'", ")", ")", "gc", ".", "collect", "(", ")", "# LSA is more useful name than LSA", "lsa", "=", "LsiModel", "(", "tfidf", "[", "tweets", "]", ",", "num_topics", "=", "200", ",", "id2word", "=", "vocab", ",", "extra_samples", "=", "100", ",", "power_iters", "=", "2", ")", "return", "lsa" ]
Latent Sentiment Analyis on random sampling of twitter search results for words listed in cased_tokens
[ "Latent", "Sentiment", "Analyis", "on", "random", "sampling", "of", "twitter", "search", "results", "for", "words", "listed", "in", "cased_tokens" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/scripts/lsa_tweets.py#L18-L72
231,495
totalgood/nlpia
src/nlpia/futil.py
wc
def wc(f, verbose=False, nrows=None): r""" Count lines in a text file References: https://stackoverflow.com/q/845058/623735 >>> with open(os.path.join(DATA_PATH, 'dictionary_fda_drug_names.txt')) as fin: ... print(wc(fin) == wc(fin) == 7037 == wc(fin.name)) True >>> wc(fin.name) 7037 """ tqdm_prog = tqdm if verbose else no_tqdm with ensure_open(f, mode='r') as fin: for i, line in tqdm_prog(enumerate(fin)): if nrows is not None and i >= nrows - 1: break # fin.seek(0) return i + 1
python
def wc(f, verbose=False, nrows=None): r""" Count lines in a text file References: https://stackoverflow.com/q/845058/623735 >>> with open(os.path.join(DATA_PATH, 'dictionary_fda_drug_names.txt')) as fin: ... print(wc(fin) == wc(fin) == 7037 == wc(fin.name)) True >>> wc(fin.name) 7037 """ tqdm_prog = tqdm if verbose else no_tqdm with ensure_open(f, mode='r') as fin: for i, line in tqdm_prog(enumerate(fin)): if nrows is not None and i >= nrows - 1: break # fin.seek(0) return i + 1
[ "def", "wc", "(", "f", ",", "verbose", "=", "False", ",", "nrows", "=", "None", ")", ":", "tqdm_prog", "=", "tqdm", "if", "verbose", "else", "no_tqdm", "with", "ensure_open", "(", "f", ",", "mode", "=", "'r'", ")", "as", "fin", ":", "for", "i", ",", "line", "in", "tqdm_prog", "(", "enumerate", "(", "fin", ")", ")", ":", "if", "nrows", "is", "not", "None", "and", "i", ">=", "nrows", "-", "1", ":", "break", "# fin.seek(0)", "return", "i", "+", "1" ]
r""" Count lines in a text file References: https://stackoverflow.com/q/845058/623735 >>> with open(os.path.join(DATA_PATH, 'dictionary_fda_drug_names.txt')) as fin: ... print(wc(fin) == wc(fin) == 7037 == wc(fin.name)) True >>> wc(fin.name) 7037
[ "r", "Count", "lines", "in", "a", "text", "file" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/futil.py#L36-L54
231,496
totalgood/nlpia
src/nlpia/futil.py
normalize_filepath
def normalize_filepath(filepath): r""" Lowercase the filename and ext, expanding extensions like .tgz to .tar.gz. >>> normalize_filepath('/Hello_World.txt\n') 'hello_world.txt' >>> normalize_filepath('NLPIA/src/nlpia/bigdata/Goog New 300Dneg\f.bIn\n.GZ') 'NLPIA/src/nlpia/bigdata/goog new 300dneg.bin.gz' """ filename = os.path.basename(filepath) dirpath = filepath[:-len(filename)] cre_controlspace = re.compile(r'[\t\r\n\f]+') new_filename = cre_controlspace.sub('', filename) if not new_filename == filename: logger.warning('Stripping whitespace from filename: {} => {}'.format( repr(filename), repr(new_filename))) filename = new_filename filename = filename.lower() filename = normalize_ext(filename) if dirpath: dirpath = dirpath[:-1] # get rid of the trailing os.path.sep return os.path.join(dirpath, filename) return filename
python
def normalize_filepath(filepath): r""" Lowercase the filename and ext, expanding extensions like .tgz to .tar.gz. >>> normalize_filepath('/Hello_World.txt\n') 'hello_world.txt' >>> normalize_filepath('NLPIA/src/nlpia/bigdata/Goog New 300Dneg\f.bIn\n.GZ') 'NLPIA/src/nlpia/bigdata/goog new 300dneg.bin.gz' """ filename = os.path.basename(filepath) dirpath = filepath[:-len(filename)] cre_controlspace = re.compile(r'[\t\r\n\f]+') new_filename = cre_controlspace.sub('', filename) if not new_filename == filename: logger.warning('Stripping whitespace from filename: {} => {}'.format( repr(filename), repr(new_filename))) filename = new_filename filename = filename.lower() filename = normalize_ext(filename) if dirpath: dirpath = dirpath[:-1] # get rid of the trailing os.path.sep return os.path.join(dirpath, filename) return filename
[ "def", "normalize_filepath", "(", "filepath", ")", ":", "filename", "=", "os", ".", "path", ".", "basename", "(", "filepath", ")", "dirpath", "=", "filepath", "[", ":", "-", "len", "(", "filename", ")", "]", "cre_controlspace", "=", "re", ".", "compile", "(", "r'[\\t\\r\\n\\f]+'", ")", "new_filename", "=", "cre_controlspace", ".", "sub", "(", "''", ",", "filename", ")", "if", "not", "new_filename", "==", "filename", ":", "logger", ".", "warning", "(", "'Stripping whitespace from filename: {} => {}'", ".", "format", "(", "repr", "(", "filename", ")", ",", "repr", "(", "new_filename", ")", ")", ")", "filename", "=", "new_filename", "filename", "=", "filename", ".", "lower", "(", ")", "filename", "=", "normalize_ext", "(", "filename", ")", "if", "dirpath", ":", "dirpath", "=", "dirpath", "[", ":", "-", "1", "]", "# get rid of the trailing os.path.sep", "return", "os", ".", "path", ".", "join", "(", "dirpath", ",", "filename", ")", "return", "filename" ]
r""" Lowercase the filename and ext, expanding extensions like .tgz to .tar.gz. >>> normalize_filepath('/Hello_World.txt\n') 'hello_world.txt' >>> normalize_filepath('NLPIA/src/nlpia/bigdata/Goog New 300Dneg\f.bIn\n.GZ') 'NLPIA/src/nlpia/bigdata/goog new 300dneg.bin.gz'
[ "r", "Lowercase", "the", "filename", "and", "ext", "expanding", "extensions", "like", ".", "tgz", "to", ".", "tar", ".", "gz", "." ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/futil.py#L278-L299
231,497
totalgood/nlpia
src/nlpia/futil.py
find_filepath
def find_filepath( filename, basepaths=(os.path.curdir, DATA_PATH, BIGDATA_PATH, BASE_DIR, '~', '~/Downloads', os.path.join('/', 'tmp'), '..')): """ Given a filename or path see if it exists in any of the common places datafiles might be >>> p = find_filepath('iq_test.csv') >>> p == expand_filepath(os.path.join(DATA_PATH, 'iq_test.csv')) True >>> p[-len('iq_test.csv'):] 'iq_test.csv' >>> find_filepath('exponentially-crazy-filename-2.718281828459045.nonexistent') False """ if os.path.isfile(filename): return filename for basedir in basepaths: fullpath = expand_filepath(os.path.join(basedir, filename)) if os.path.isfile(fullpath): return fullpath return False
python
def find_filepath( filename, basepaths=(os.path.curdir, DATA_PATH, BIGDATA_PATH, BASE_DIR, '~', '~/Downloads', os.path.join('/', 'tmp'), '..')): """ Given a filename or path see if it exists in any of the common places datafiles might be >>> p = find_filepath('iq_test.csv') >>> p == expand_filepath(os.path.join(DATA_PATH, 'iq_test.csv')) True >>> p[-len('iq_test.csv'):] 'iq_test.csv' >>> find_filepath('exponentially-crazy-filename-2.718281828459045.nonexistent') False """ if os.path.isfile(filename): return filename for basedir in basepaths: fullpath = expand_filepath(os.path.join(basedir, filename)) if os.path.isfile(fullpath): return fullpath return False
[ "def", "find_filepath", "(", "filename", ",", "basepaths", "=", "(", "os", ".", "path", ".", "curdir", ",", "DATA_PATH", ",", "BIGDATA_PATH", ",", "BASE_DIR", ",", "'~'", ",", "'~/Downloads'", ",", "os", ".", "path", ".", "join", "(", "'/'", ",", "'tmp'", ")", ",", "'..'", ")", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "filename", ")", ":", "return", "filename", "for", "basedir", "in", "basepaths", ":", "fullpath", "=", "expand_filepath", "(", "os", ".", "path", ".", "join", "(", "basedir", ",", "filename", ")", ")", "if", "os", ".", "path", ".", "isfile", "(", "fullpath", ")", ":", "return", "fullpath", "return", "False" ]
Given a filename or path see if it exists in any of the common places datafiles might be >>> p = find_filepath('iq_test.csv') >>> p == expand_filepath(os.path.join(DATA_PATH, 'iq_test.csv')) True >>> p[-len('iq_test.csv'):] 'iq_test.csv' >>> find_filepath('exponentially-crazy-filename-2.718281828459045.nonexistent') False
[ "Given", "a", "filename", "or", "path", "see", "if", "it", "exists", "in", "any", "of", "the", "common", "places", "datafiles", "might", "be" ]
efa01126275e9cd3c3a5151a644f1c798a9ec53f
https://github.com/totalgood/nlpia/blob/efa01126275e9cd3c3a5151a644f1c798a9ec53f/src/nlpia/futil.py#L302-L321
231,498
neo4j/neo4j-python-driver
neo4j/__init__.py
Driver.close
def close(self): """ Shut down, closing any open connections in the pool. """ if not self._closed: self._closed = True if self._pool is not None: self._pool.close() self._pool = None
python
def close(self): """ Shut down, closing any open connections in the pool. """ if not self._closed: self._closed = True if self._pool is not None: self._pool.close() self._pool = None
[ "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "_closed", ":", "self", ".", "_closed", "=", "True", "if", "self", ".", "_pool", "is", "not", "None", ":", "self", ".", "_pool", ".", "close", "(", ")", "self", ".", "_pool", "=", "None" ]
Shut down, closing any open connections in the pool.
[ "Shut", "down", "closing", "any", "open", "connections", "in", "the", "pool", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/__init__.py#L163-L170
231,499
neo4j/neo4j-python-driver
neo4j/types/spatial.py
hydrate_point
def hydrate_point(srid, *coordinates): """ Create a new instance of a Point subclass from a raw set of fields. The subclass chosen is determined by the given SRID; a ValueError will be raised if no such subclass can be found. """ try: point_class, dim = __srid_table[srid] except KeyError: point = Point(coordinates) point.srid = srid return point else: if len(coordinates) != dim: raise ValueError("SRID %d requires %d coordinates (%d provided)" % (srid, dim, len(coordinates))) return point_class(coordinates)
python
def hydrate_point(srid, *coordinates): """ Create a new instance of a Point subclass from a raw set of fields. The subclass chosen is determined by the given SRID; a ValueError will be raised if no such subclass can be found. """ try: point_class, dim = __srid_table[srid] except KeyError: point = Point(coordinates) point.srid = srid return point else: if len(coordinates) != dim: raise ValueError("SRID %d requires %d coordinates (%d provided)" % (srid, dim, len(coordinates))) return point_class(coordinates)
[ "def", "hydrate_point", "(", "srid", ",", "*", "coordinates", ")", ":", "try", ":", "point_class", ",", "dim", "=", "__srid_table", "[", "srid", "]", "except", "KeyError", ":", "point", "=", "Point", "(", "coordinates", ")", "point", ".", "srid", "=", "srid", "return", "point", "else", ":", "if", "len", "(", "coordinates", ")", "!=", "dim", ":", "raise", "ValueError", "(", "\"SRID %d requires %d coordinates (%d provided)\"", "%", "(", "srid", ",", "dim", ",", "len", "(", "coordinates", ")", ")", ")", "return", "point_class", "(", "coordinates", ")" ]
Create a new instance of a Point subclass from a raw set of fields. The subclass chosen is determined by the given SRID; a ValueError will be raised if no such subclass can be found.
[ "Create", "a", "new", "instance", "of", "a", "Point", "subclass", "from", "a", "raw", "set", "of", "fields", ".", "The", "subclass", "chosen", "is", "determined", "by", "the", "given", "SRID", ";", "a", "ValueError", "will", "be", "raised", "if", "no", "such", "subclass", "can", "be", "found", "." ]
0c641e826765e86ff5454dae57c99521db8ca45c
https://github.com/neo4j/neo4j-python-driver/blob/0c641e826765e86ff5454dae57c99521db8ca45c/neo4j/types/spatial.py#L104-L119