id
int32
0
252k
repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
7,000
limodou/uliweb
uliweb/orm/__init__.py
Model._get_data
def _get_data(self, fields=None, compare=True): """ Get the changed property, it'll be used to save the object If compare is False, then it'll include all data not only changed property """ fields = fields or [] if self._key is None or self._key == '' or self._key == 0: d = {} for k, v in self.properties.items(): #test fields if fields and k not in fields: continue # if not isinstance(v, ManyToMany): if v.property_type == 'compound': continue if v.sequence: continue if not isinstance(v, ManyToMany): x = v.get_value_for_datastore(self) if isinstance(x, Model): x = x._key elif x is None or (k==self._primary_field and not x): if isinstance(v, DateTimeProperty) and v.auto_now_add: x = v.now() elif (v.auto_add or (not v.auto and not v.auto_add)): x = v.default_value() else: x = v.get_value_for_datastore(self, cached=True) if x is not None and not x is Lazy: d[k] = x else: d = {} d[self._primary_field] = self._key for k, v in self.properties.items(): if fields and k not in fields: continue if v.property_type == 'compound': continue t = self._old_values.get(k, None) if not isinstance(v, ManyToMany): x = v.get_value_for_datastore(self) if isinstance(x, Model): x = x._key else: x = v.get_value_for_datastore(self, cached=True) if not x is Lazy: if (compare and t != self.field_str(x)) or not compare: d[k] = x return d
python
def _get_data(self, fields=None, compare=True): """ Get the changed property, it'll be used to save the object If compare is False, then it'll include all data not only changed property """ fields = fields or [] if self._key is None or self._key == '' or self._key == 0: d = {} for k, v in self.properties.items(): #test fields if fields and k not in fields: continue # if not isinstance(v, ManyToMany): if v.property_type == 'compound': continue if v.sequence: continue if not isinstance(v, ManyToMany): x = v.get_value_for_datastore(self) if isinstance(x, Model): x = x._key elif x is None or (k==self._primary_field and not x): if isinstance(v, DateTimeProperty) and v.auto_now_add: x = v.now() elif (v.auto_add or (not v.auto and not v.auto_add)): x = v.default_value() else: x = v.get_value_for_datastore(self, cached=True) if x is not None and not x is Lazy: d[k] = x else: d = {} d[self._primary_field] = self._key for k, v in self.properties.items(): if fields and k not in fields: continue if v.property_type == 'compound': continue t = self._old_values.get(k, None) if not isinstance(v, ManyToMany): x = v.get_value_for_datastore(self) if isinstance(x, Model): x = x._key else: x = v.get_value_for_datastore(self, cached=True) if not x is Lazy: if (compare and t != self.field_str(x)) or not compare: d[k] = x return d
[ "def", "_get_data", "(", "self", ",", "fields", "=", "None", ",", "compare", "=", "True", ")", ":", "fields", "=", "fields", "or", "[", "]", "if", "self", ".", "_key", "is", "None", "or", "self", ".", "_key", "==", "''", "or", "self", ".", "_key", "==", "0", ":", "d", "=", "{", "}", "for", "k", ",", "v", "in", "self", ".", "properties", ".", "items", "(", ")", ":", "#test fields", "if", "fields", "and", "k", "not", "in", "fields", ":", "continue", "# if not isinstance(v, ManyToMany):", "if", "v", ".", "property_type", "==", "'compound'", ":", "continue", "if", "v", ".", "sequence", ":", "continue", "if", "not", "isinstance", "(", "v", ",", "ManyToMany", ")", ":", "x", "=", "v", ".", "get_value_for_datastore", "(", "self", ")", "if", "isinstance", "(", "x", ",", "Model", ")", ":", "x", "=", "x", ".", "_key", "elif", "x", "is", "None", "or", "(", "k", "==", "self", ".", "_primary_field", "and", "not", "x", ")", ":", "if", "isinstance", "(", "v", ",", "DateTimeProperty", ")", "and", "v", ".", "auto_now_add", ":", "x", "=", "v", ".", "now", "(", ")", "elif", "(", "v", ".", "auto_add", "or", "(", "not", "v", ".", "auto", "and", "not", "v", ".", "auto_add", ")", ")", ":", "x", "=", "v", ".", "default_value", "(", ")", "else", ":", "x", "=", "v", ".", "get_value_for_datastore", "(", "self", ",", "cached", "=", "True", ")", "if", "x", "is", "not", "None", "and", "not", "x", "is", "Lazy", ":", "d", "[", "k", "]", "=", "x", "else", ":", "d", "=", "{", "}", "d", "[", "self", ".", "_primary_field", "]", "=", "self", ".", "_key", "for", "k", ",", "v", "in", "self", ".", "properties", ".", "items", "(", ")", ":", "if", "fields", "and", "k", "not", "in", "fields", ":", "continue", "if", "v", ".", "property_type", "==", "'compound'", ":", "continue", "t", "=", "self", ".", "_old_values", ".", "get", "(", "k", ",", "None", ")", "if", "not", "isinstance", "(", "v", ",", "ManyToMany", ")", ":", "x", "=", "v", ".", "get_value_for_datastore", "(", "self", ")", "if", "isinstance", "(", "x", ",", "Model", ")", ":", "x", "=", "x", ".", "_key", "else", ":", "x", "=", "v", ".", "get_value_for_datastore", "(", "self", ",", "cached", "=", "True", ")", "if", "not", "x", "is", "Lazy", ":", "if", "(", "compare", "and", "t", "!=", "self", ".", "field_str", "(", "x", ")", ")", "or", "not", "compare", ":", "d", "[", "k", "]", "=", "x", "return", "d" ]
Get the changed property, it'll be used to save the object If compare is False, then it'll include all data not only changed property
[ "Get", "the", "changed", "property", "it", "ll", "be", "used", "to", "save", "the", "object", "If", "compare", "is", "False", "then", "it", "ll", "include", "all", "data", "not", "only", "changed", "property" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/orm/__init__.py#L3967-L4016
7,001
limodou/uliweb
uliweb/orm/__init__.py
Model.create_sql
def create_sql(self, insert=False, version=False, version_fieldname=None, fields=None, ec=None, compare=False): """ Create sql statement, do not process manytomany """ version_fieldname = version_fieldname or 'version' #fix when d is empty, orm will not insert record bug 2013/04/07 if not self._key or insert: d = self._get_data(fields, compare=compare) if d: return rawsql(self.table.insert().values(**d), ec or self.get_engine_name()) + ';' else: d = self._get_data(fields, compare=compare) _key = d.pop(self._primary_field) if d: _cond = self.table.c[self._primary_field] == self._key if version: version_field = self.table.c.get(version_fieldname) if version_field is None: raise KindError("version_fieldname %s is not existed in Model %s" % (version_fieldname, self.__class__.__name__)) _version_value = getattr(self, version_fieldname, 0) # setattr(self, version_fieldname, _version_value+1) d[version_fieldname] = _version_value+1 _cond = (version_field == _version_value) & _cond return rawsql(self.table.update(_cond).values(**d), ec or self.get_engine_name()) + ';' return ''
python
def create_sql(self, insert=False, version=False, version_fieldname=None, fields=None, ec=None, compare=False): """ Create sql statement, do not process manytomany """ version_fieldname = version_fieldname or 'version' #fix when d is empty, orm will not insert record bug 2013/04/07 if not self._key or insert: d = self._get_data(fields, compare=compare) if d: return rawsql(self.table.insert().values(**d), ec or self.get_engine_name()) + ';' else: d = self._get_data(fields, compare=compare) _key = d.pop(self._primary_field) if d: _cond = self.table.c[self._primary_field] == self._key if version: version_field = self.table.c.get(version_fieldname) if version_field is None: raise KindError("version_fieldname %s is not existed in Model %s" % (version_fieldname, self.__class__.__name__)) _version_value = getattr(self, version_fieldname, 0) # setattr(self, version_fieldname, _version_value+1) d[version_fieldname] = _version_value+1 _cond = (version_field == _version_value) & _cond return rawsql(self.table.update(_cond).values(**d), ec or self.get_engine_name()) + ';' return ''
[ "def", "create_sql", "(", "self", ",", "insert", "=", "False", ",", "version", "=", "False", ",", "version_fieldname", "=", "None", ",", "fields", "=", "None", ",", "ec", "=", "None", ",", "compare", "=", "False", ")", ":", "version_fieldname", "=", "version_fieldname", "or", "'version'", "#fix when d is empty, orm will not insert record bug 2013/04/07", "if", "not", "self", ".", "_key", "or", "insert", ":", "d", "=", "self", ".", "_get_data", "(", "fields", ",", "compare", "=", "compare", ")", "if", "d", ":", "return", "rawsql", "(", "self", ".", "table", ".", "insert", "(", ")", ".", "values", "(", "*", "*", "d", ")", ",", "ec", "or", "self", ".", "get_engine_name", "(", ")", ")", "+", "';'", "else", ":", "d", "=", "self", ".", "_get_data", "(", "fields", ",", "compare", "=", "compare", ")", "_key", "=", "d", ".", "pop", "(", "self", ".", "_primary_field", ")", "if", "d", ":", "_cond", "=", "self", ".", "table", ".", "c", "[", "self", ".", "_primary_field", "]", "==", "self", ".", "_key", "if", "version", ":", "version_field", "=", "self", ".", "table", ".", "c", ".", "get", "(", "version_fieldname", ")", "if", "version_field", "is", "None", ":", "raise", "KindError", "(", "\"version_fieldname %s is not existed in Model %s\"", "%", "(", "version_fieldname", ",", "self", ".", "__class__", ".", "__name__", ")", ")", "_version_value", "=", "getattr", "(", "self", ",", "version_fieldname", ",", "0", ")", "# setattr(self, version_fieldname, _version_value+1)", "d", "[", "version_fieldname", "]", "=", "_version_value", "+", "1", "_cond", "=", "(", "version_field", "==", "_version_value", ")", "&", "_cond", "return", "rawsql", "(", "self", ".", "table", ".", "update", "(", "_cond", ")", ".", "values", "(", "*", "*", "d", ")", ",", "ec", "or", "self", ".", "get_engine_name", "(", ")", ")", "+", "';'", "return", "''" ]
Create sql statement, do not process manytomany
[ "Create", "sql", "statement", "do", "not", "process", "manytomany" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/orm/__init__.py#L4202-L4231
7,002
limodou/uliweb
uliweb/orm/__init__.py
Model.get_collection_name
def get_collection_name(cls, from_class_name, collection_name=None, prefix=None): """ Get reference collection_name, if the collection_name is None then make sure the collection_name is not conflict, but if the collection_name is not None, then check if the collection_name is already exists, if existed then raise Exception. """ if not collection_name: collection_name = prefix + '_set' if hasattr(cls, collection_name): #if the xxx_set is already existed, then automatically #create unique collection_set id collection_name = prefix + '_set_' + str(cls._collection_set_id) cls._collection_set_id += 1 else: if collection_name in cls._collection_names: if cls._collection_names.get(collection_name) != from_class_name: raise DuplicatePropertyError("Model %s already has collection property %s" % (cls.__name__, collection_name)) #add property check if collection_name in cls.properties: raise DuplicatePropertyError("Model %s already has property %s" % (cls.__name__, collection_name)) return collection_name
python
def get_collection_name(cls, from_class_name, collection_name=None, prefix=None): """ Get reference collection_name, if the collection_name is None then make sure the collection_name is not conflict, but if the collection_name is not None, then check if the collection_name is already exists, if existed then raise Exception. """ if not collection_name: collection_name = prefix + '_set' if hasattr(cls, collection_name): #if the xxx_set is already existed, then automatically #create unique collection_set id collection_name = prefix + '_set_' + str(cls._collection_set_id) cls._collection_set_id += 1 else: if collection_name in cls._collection_names: if cls._collection_names.get(collection_name) != from_class_name: raise DuplicatePropertyError("Model %s already has collection property %s" % (cls.__name__, collection_name)) #add property check if collection_name in cls.properties: raise DuplicatePropertyError("Model %s already has property %s" % (cls.__name__, collection_name)) return collection_name
[ "def", "get_collection_name", "(", "cls", ",", "from_class_name", ",", "collection_name", "=", "None", ",", "prefix", "=", "None", ")", ":", "if", "not", "collection_name", ":", "collection_name", "=", "prefix", "+", "'_set'", "if", "hasattr", "(", "cls", ",", "collection_name", ")", ":", "#if the xxx_set is already existed, then automatically", "#create unique collection_set id", "collection_name", "=", "prefix", "+", "'_set_'", "+", "str", "(", "cls", ".", "_collection_set_id", ")", "cls", ".", "_collection_set_id", "+=", "1", "else", ":", "if", "collection_name", "in", "cls", ".", "_collection_names", ":", "if", "cls", ".", "_collection_names", ".", "get", "(", "collection_name", ")", "!=", "from_class_name", ":", "raise", "DuplicatePropertyError", "(", "\"Model %s already has collection property %s\"", "%", "(", "cls", ".", "__name__", ",", "collection_name", ")", ")", "#add property check", "if", "collection_name", "in", "cls", ".", "properties", ":", "raise", "DuplicatePropertyError", "(", "\"Model %s already has property %s\"", "%", "(", "cls", ".", "__name__", ",", "collection_name", ")", ")", "return", "collection_name" ]
Get reference collection_name, if the collection_name is None then make sure the collection_name is not conflict, but if the collection_name is not None, then check if the collection_name is already exists, if existed then raise Exception.
[ "Get", "reference", "collection_name", "if", "the", "collection_name", "is", "None", "then", "make", "sure", "the", "collection_name", "is", "not", "conflict", "but", "if", "the", "collection_name", "is", "not", "None", "then", "check", "if", "the", "collection_name", "is", "already", "exists", "if", "existed", "then", "raise", "Exception", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/orm/__init__.py#L4313-L4334
7,003
limodou/uliweb
uliweb/orm/__init__.py
Model._use
def _use(cls, ec): """ underly implement of use """ # class ConnectModel(cls): # pass ConnectModel = type(cls.__name__, (cls,), {}) ConnectModel.tablename = cls.tablename ConnectModel._base_class = cls if isinstance(ec, (str, unicode)): ConnectModel._engine_name = ec elif isinstance(ec, Session): ConnectModel._engine_name = ec.engine_name ConnectModel._connection = ec return ConnectModel
python
def _use(cls, ec): """ underly implement of use """ # class ConnectModel(cls): # pass ConnectModel = type(cls.__name__, (cls,), {}) ConnectModel.tablename = cls.tablename ConnectModel._base_class = cls if isinstance(ec, (str, unicode)): ConnectModel._engine_name = ec elif isinstance(ec, Session): ConnectModel._engine_name = ec.engine_name ConnectModel._connection = ec return ConnectModel
[ "def", "_use", "(", "cls", ",", "ec", ")", ":", "# class ConnectModel(cls):", "# pass", "ConnectModel", "=", "type", "(", "cls", ".", "__name__", ",", "(", "cls", ",", ")", ",", "{", "}", ")", "ConnectModel", ".", "tablename", "=", "cls", ".", "tablename", "ConnectModel", ".", "_base_class", "=", "cls", "if", "isinstance", "(", "ec", ",", "(", "str", ",", "unicode", ")", ")", ":", "ConnectModel", ".", "_engine_name", "=", "ec", "elif", "isinstance", "(", "ec", ",", "Session", ")", ":", "ConnectModel", ".", "_engine_name", "=", "ec", ".", "engine_name", "ConnectModel", ".", "_connection", "=", "ec", "return", "ConnectModel" ]
underly implement of use
[ "underly", "implement", "of", "use" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/orm/__init__.py#L4413-L4428
7,004
limodou/uliweb
uliweb/orm/__init__.py
Model.use
def use(cls, ec): """ use will duplicate a new Model class and bind ec ec is Engine name or Sesstion object """ if isinstance(ec, (str, unicode)): m = get_model(cls._alias, ec, signal=False) else: m = cls._use(ec) return m
python
def use(cls, ec): """ use will duplicate a new Model class and bind ec ec is Engine name or Sesstion object """ if isinstance(ec, (str, unicode)): m = get_model(cls._alias, ec, signal=False) else: m = cls._use(ec) return m
[ "def", "use", "(", "cls", ",", "ec", ")", ":", "if", "isinstance", "(", "ec", ",", "(", "str", ",", "unicode", ")", ")", ":", "m", "=", "get_model", "(", "cls", ".", "_alias", ",", "ec", ",", "signal", "=", "False", ")", "else", ":", "m", "=", "cls", ".", "_use", "(", "ec", ")", "return", "m" ]
use will duplicate a new Model class and bind ec ec is Engine name or Sesstion object
[ "use", "will", "duplicate", "a", "new", "Model", "class", "and", "bind", "ec", "ec", "is", "Engine", "name", "or", "Sesstion", "object" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/orm/__init__.py#L4431-L4442
7,005
limodou/uliweb
uliweb/orm/__init__.py
Model.get_tree
def get_tree(cls, *condition, **kwargs): """ parent is root parent value, default is None current is current value condition is extra condition for select root records mode is search method, value is 'wide' or 'deep' """ parent_field = kwargs.pop('parent_field', 'parent') parent = kwargs.pop('parent', None) parent_order_by = kwargs.pop('parent_order_by', None) current = kwargs.pop('current', None) order_by = kwargs.pop('order_by', None) id_field = kwargs.pop('id_field', 'id') mode = kwargs.pop('mode', 'wide') if mode not in ('wide', 'deep'): raise Exception("mode parameter should be 'wide' or 'deep', but '{}' found.".format(mode)) def _f(parent): query = cls.filter(cls.c[parent_field]==parent, *condition) if order_by is not None: query.order_by(order_by) for row in query: if mode == 'wide': yield row for _row in _f(getattr(row, id_field)): yield _row if mode == 'deep': yield row if current: query = cls.filter(cls.c[id_field]==current) else: if is_condition(parent): query = cls.filter(parent) else: query = cls.filter(cls.c[parent_field]==parent) if parent_order_by is not None: query.order_by(parent_order_by) for row in query: if mode == 'wide': yield row for r in _f(getattr(row, id_field)): yield r if mode == 'deep': yield row
python
def get_tree(cls, *condition, **kwargs): """ parent is root parent value, default is None current is current value condition is extra condition for select root records mode is search method, value is 'wide' or 'deep' """ parent_field = kwargs.pop('parent_field', 'parent') parent = kwargs.pop('parent', None) parent_order_by = kwargs.pop('parent_order_by', None) current = kwargs.pop('current', None) order_by = kwargs.pop('order_by', None) id_field = kwargs.pop('id_field', 'id') mode = kwargs.pop('mode', 'wide') if mode not in ('wide', 'deep'): raise Exception("mode parameter should be 'wide' or 'deep', but '{}' found.".format(mode)) def _f(parent): query = cls.filter(cls.c[parent_field]==parent, *condition) if order_by is not None: query.order_by(order_by) for row in query: if mode == 'wide': yield row for _row in _f(getattr(row, id_field)): yield _row if mode == 'deep': yield row if current: query = cls.filter(cls.c[id_field]==current) else: if is_condition(parent): query = cls.filter(parent) else: query = cls.filter(cls.c[parent_field]==parent) if parent_order_by is not None: query.order_by(parent_order_by) for row in query: if mode == 'wide': yield row for r in _f(getattr(row, id_field)): yield r if mode == 'deep': yield row
[ "def", "get_tree", "(", "cls", ",", "*", "condition", ",", "*", "*", "kwargs", ")", ":", "parent_field", "=", "kwargs", ".", "pop", "(", "'parent_field'", ",", "'parent'", ")", "parent", "=", "kwargs", ".", "pop", "(", "'parent'", ",", "None", ")", "parent_order_by", "=", "kwargs", ".", "pop", "(", "'parent_order_by'", ",", "None", ")", "current", "=", "kwargs", ".", "pop", "(", "'current'", ",", "None", ")", "order_by", "=", "kwargs", ".", "pop", "(", "'order_by'", ",", "None", ")", "id_field", "=", "kwargs", ".", "pop", "(", "'id_field'", ",", "'id'", ")", "mode", "=", "kwargs", ".", "pop", "(", "'mode'", ",", "'wide'", ")", "if", "mode", "not", "in", "(", "'wide'", ",", "'deep'", ")", ":", "raise", "Exception", "(", "\"mode parameter should be 'wide' or 'deep', but '{}' found.\"", ".", "format", "(", "mode", ")", ")", "def", "_f", "(", "parent", ")", ":", "query", "=", "cls", ".", "filter", "(", "cls", ".", "c", "[", "parent_field", "]", "==", "parent", ",", "*", "condition", ")", "if", "order_by", "is", "not", "None", ":", "query", ".", "order_by", "(", "order_by", ")", "for", "row", "in", "query", ":", "if", "mode", "==", "'wide'", ":", "yield", "row", "for", "_row", "in", "_f", "(", "getattr", "(", "row", ",", "id_field", ")", ")", ":", "yield", "_row", "if", "mode", "==", "'deep'", ":", "yield", "row", "if", "current", ":", "query", "=", "cls", ".", "filter", "(", "cls", ".", "c", "[", "id_field", "]", "==", "current", ")", "else", ":", "if", "is_condition", "(", "parent", ")", ":", "query", "=", "cls", ".", "filter", "(", "parent", ")", "else", ":", "query", "=", "cls", ".", "filter", "(", "cls", ".", "c", "[", "parent_field", "]", "==", "parent", ")", "if", "parent_order_by", "is", "not", "None", ":", "query", ".", "order_by", "(", "parent_order_by", ")", "for", "row", "in", "query", ":", "if", "mode", "==", "'wide'", ":", "yield", "row", "for", "r", "in", "_f", "(", "getattr", "(", "row", ",", "id_field", ")", ")", ":", "yield", "r", "if", "mode", "==", "'deep'", ":", "yield", "row" ]
parent is root parent value, default is None current is current value condition is extra condition for select root records mode is search method, value is 'wide' or 'deep'
[ "parent", "is", "root", "parent", "value", "default", "is", "None", "current", "is", "current", "value", "condition", "is", "extra", "condition", "for", "select", "root", "records", "mode", "is", "search", "method", "value", "is", "wide", "or", "deep" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/orm/__init__.py#L4628-L4672
7,006
limodou/uliweb
uliweb/orm/__init__.py
Model.refresh
def refresh(self, fields=None, **kwargs): """ Re get the instance of current id """ cond = self.c[self._primary_field]==self._key query = self.filter(cond, **kwargs) if not fields: fields = list(self.table.c) v = query.values_one(*fields) if not v: raise NotFound('Instance <{0}:{1}> can not be found'.format(self.tablename, self._key)) d = self._data_prepare(v.items()) self.update(**d) self.set_saved()
python
def refresh(self, fields=None, **kwargs): """ Re get the instance of current id """ cond = self.c[self._primary_field]==self._key query = self.filter(cond, **kwargs) if not fields: fields = list(self.table.c) v = query.values_one(*fields) if not v: raise NotFound('Instance <{0}:{1}> can not be found'.format(self.tablename, self._key)) d = self._data_prepare(v.items()) self.update(**d) self.set_saved()
[ "def", "refresh", "(", "self", ",", "fields", "=", "None", ",", "*", "*", "kwargs", ")", ":", "cond", "=", "self", ".", "c", "[", "self", ".", "_primary_field", "]", "==", "self", ".", "_key", "query", "=", "self", ".", "filter", "(", "cond", ",", "*", "*", "kwargs", ")", "if", "not", "fields", ":", "fields", "=", "list", "(", "self", ".", "table", ".", "c", ")", "v", "=", "query", ".", "values_one", "(", "*", "fields", ")", "if", "not", "v", ":", "raise", "NotFound", "(", "'Instance <{0}:{1}> can not be found'", ".", "format", "(", "self", ".", "tablename", ",", "self", ".", "_key", ")", ")", "d", "=", "self", ".", "_data_prepare", "(", "v", ".", "items", "(", ")", ")", "self", ".", "update", "(", "*", "*", "d", ")", "self", ".", "set_saved", "(", ")" ]
Re get the instance of current id
[ "Re", "get", "the", "instance", "of", "current", "id" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/orm/__init__.py#L4744-L4759
7,007
limodou/uliweb
uliweb/orm/__init__.py
Model.dump
def dump(self, fields=None, exclude=None): """ Dump current object to dict, but the value is string for manytomany fields will not automatically be dumpped, only when they are given in fields parameter """ exclude = exclude or [] d = {} if fields and self._primary_field not in fields: fields = list(fields) fields.append(self._primary_field) for k, v in self.properties.items(): if ((not fields) or (k in fields)) and (not exclude or (k not in exclude)): if not isinstance(v, ManyToMany): t = v.get_value_for_datastore(self) if t is Lazy: self.refresh() t = v.get_value_for_datastore(self) if isinstance(t, Model): t = t._key d[k] = v.to_str(t) else: if fields: d[k] = ','.join([str(x) for x in getattr(self, v._lazy_value(), [])]) if self._primary_field and d and self._primary_field not in d: d[self._primary_field] = str(self._key) return d
python
def dump(self, fields=None, exclude=None): """ Dump current object to dict, but the value is string for manytomany fields will not automatically be dumpped, only when they are given in fields parameter """ exclude = exclude or [] d = {} if fields and self._primary_field not in fields: fields = list(fields) fields.append(self._primary_field) for k, v in self.properties.items(): if ((not fields) or (k in fields)) and (not exclude or (k not in exclude)): if not isinstance(v, ManyToMany): t = v.get_value_for_datastore(self) if t is Lazy: self.refresh() t = v.get_value_for_datastore(self) if isinstance(t, Model): t = t._key d[k] = v.to_str(t) else: if fields: d[k] = ','.join([str(x) for x in getattr(self, v._lazy_value(), [])]) if self._primary_field and d and self._primary_field not in d: d[self._primary_field] = str(self._key) return d
[ "def", "dump", "(", "self", ",", "fields", "=", "None", ",", "exclude", "=", "None", ")", ":", "exclude", "=", "exclude", "or", "[", "]", "d", "=", "{", "}", "if", "fields", "and", "self", ".", "_primary_field", "not", "in", "fields", ":", "fields", "=", "list", "(", "fields", ")", "fields", ".", "append", "(", "self", ".", "_primary_field", ")", "for", "k", ",", "v", "in", "self", ".", "properties", ".", "items", "(", ")", ":", "if", "(", "(", "not", "fields", ")", "or", "(", "k", "in", "fields", ")", ")", "and", "(", "not", "exclude", "or", "(", "k", "not", "in", "exclude", ")", ")", ":", "if", "not", "isinstance", "(", "v", ",", "ManyToMany", ")", ":", "t", "=", "v", ".", "get_value_for_datastore", "(", "self", ")", "if", "t", "is", "Lazy", ":", "self", ".", "refresh", "(", ")", "t", "=", "v", ".", "get_value_for_datastore", "(", "self", ")", "if", "isinstance", "(", "t", ",", "Model", ")", ":", "t", "=", "t", ".", "_key", "d", "[", "k", "]", "=", "v", ".", "to_str", "(", "t", ")", "else", ":", "if", "fields", ":", "d", "[", "k", "]", "=", "','", ".", "join", "(", "[", "str", "(", "x", ")", "for", "x", "in", "getattr", "(", "self", ",", "v", ".", "_lazy_value", "(", ")", ",", "[", "]", ")", "]", ")", "if", "self", ".", "_primary_field", "and", "d", "and", "self", ".", "_primary_field", "not", "in", "d", ":", "d", "[", "self", ".", "_primary_field", "]", "=", "str", "(", "self", ".", "_key", ")", "return", "d" ]
Dump current object to dict, but the value is string for manytomany fields will not automatically be dumpped, only when they are given in fields parameter
[ "Dump", "current", "object", "to", "dict", "but", "the", "value", "is", "string", "for", "manytomany", "fields", "will", "not", "automatically", "be", "dumpped", "only", "when", "they", "are", "given", "in", "fields", "parameter" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/orm/__init__.py#L4802-L4828
7,008
limodou/uliweb
uliweb/orm/__init__.py
Model.clear_relation
def clear_relation(cls): """ Clear relation properties for reference Model, such as OneToOne, Reference, ManyToMany """ for k, v in cls.properties.items(): if isinstance(v, ReferenceProperty): if hasattr(v, 'collection_name') and hasattr(v.reference_class, v.collection_name): delattr(v.reference_class, v.collection_name) if isinstance(v, OneToOne): #append to reference_class._onetoone del v.reference_class._onetoone[v.collection_name]
python
def clear_relation(cls): """ Clear relation properties for reference Model, such as OneToOne, Reference, ManyToMany """ for k, v in cls.properties.items(): if isinstance(v, ReferenceProperty): if hasattr(v, 'collection_name') and hasattr(v.reference_class, v.collection_name): delattr(v.reference_class, v.collection_name) if isinstance(v, OneToOne): #append to reference_class._onetoone del v.reference_class._onetoone[v.collection_name]
[ "def", "clear_relation", "(", "cls", ")", ":", "for", "k", ",", "v", "in", "cls", ".", "properties", ".", "items", "(", ")", ":", "if", "isinstance", "(", "v", ",", "ReferenceProperty", ")", ":", "if", "hasattr", "(", "v", ",", "'collection_name'", ")", "and", "hasattr", "(", "v", ".", "reference_class", ",", "v", ".", "collection_name", ")", ":", "delattr", "(", "v", ".", "reference_class", ",", "v", ".", "collection_name", ")", "if", "isinstance", "(", "v", ",", "OneToOne", ")", ":", "#append to reference_class._onetoone", "del", "v", ".", "reference_class", ".", "_onetoone", "[", "v", ".", "collection_name", "]" ]
Clear relation properties for reference Model, such as OneToOne, Reference, ManyToMany
[ "Clear", "relation", "properties", "for", "reference", "Model", "such", "as", "OneToOne", "Reference", "ManyToMany" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/orm/__init__.py#L4840-L4852
7,009
limodou/uliweb
uliweb/orm/__init__.py
Bulk.put
def put(self, _name, **values): """ Put data to cach, if reached size value, it'll execute at once. """ try: sql = self.sqles[_name] data = sql['data'] if sql['positional']: d = [values[k] for k, v in sql['fields'].items()] else: d = {v:values[k] for k, v in sql['fields'].items()} data.append(d) if self.size and len(data) >= self.size: do_(sql['raw_sql'], args=data) sql['data'] = [] except: if self.transcation: Rollback(self.engine) raise
python
def put(self, _name, **values): """ Put data to cach, if reached size value, it'll execute at once. """ try: sql = self.sqles[_name] data = sql['data'] if sql['positional']: d = [values[k] for k, v in sql['fields'].items()] else: d = {v:values[k] for k, v in sql['fields'].items()} data.append(d) if self.size and len(data) >= self.size: do_(sql['raw_sql'], args=data) sql['data'] = [] except: if self.transcation: Rollback(self.engine) raise
[ "def", "put", "(", "self", ",", "_name", ",", "*", "*", "values", ")", ":", "try", ":", "sql", "=", "self", ".", "sqles", "[", "_name", "]", "data", "=", "sql", "[", "'data'", "]", "if", "sql", "[", "'positional'", "]", ":", "d", "=", "[", "values", "[", "k", "]", "for", "k", ",", "v", "in", "sql", "[", "'fields'", "]", ".", "items", "(", ")", "]", "else", ":", "d", "=", "{", "v", ":", "values", "[", "k", "]", "for", "k", ",", "v", "in", "sql", "[", "'fields'", "]", ".", "items", "(", ")", "}", "data", ".", "append", "(", "d", ")", "if", "self", ".", "size", "and", "len", "(", "data", ")", ">=", "self", ".", "size", ":", "do_", "(", "sql", "[", "'raw_sql'", "]", ",", "args", "=", "data", ")", "sql", "[", "'data'", "]", "=", "[", "]", "except", ":", "if", "self", ".", "transcation", ":", "Rollback", "(", "self", ".", "engine", ")", "raise" ]
Put data to cach, if reached size value, it'll execute at once.
[ "Put", "data", "to", "cach", "if", "reached", "size", "value", "it", "ll", "execute", "at", "once", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/orm/__init__.py#L4931-L4949
7,010
limodou/uliweb
uliweb/contrib/secretkey/__init__.py
get_key
def get_key(keyfile=None): """ Read the key content from secret_file """ keyfile = keyfile or application_path(settings.SECRETKEY.SECRET_FILE) with file(keyfile, 'rb') as f: return f.read()
python
def get_key(keyfile=None): """ Read the key content from secret_file """ keyfile = keyfile or application_path(settings.SECRETKEY.SECRET_FILE) with file(keyfile, 'rb') as f: return f.read()
[ "def", "get_key", "(", "keyfile", "=", "None", ")", ":", "keyfile", "=", "keyfile", "or", "application_path", "(", "settings", ".", "SECRETKEY", ".", "SECRET_FILE", ")", "with", "file", "(", "keyfile", ",", "'rb'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")" ]
Read the key content from secret_file
[ "Read", "the", "key", "content", "from", "secret_file" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/secretkey/__init__.py#L34-L40
7,011
limodou/uliweb
uliweb/contrib/secretkey/__init__.py
get_cipher_key
def get_cipher_key(keyfile=None): """ Create key which will be used in des, because des need 8bytes chars """ _key = get_key(keyfile) _k = md5(_key).hexdigest() key = xor(_k[:8], _k[8:16], _k[16:24], _k[24:]) return key
python
def get_cipher_key(keyfile=None): """ Create key which will be used in des, because des need 8bytes chars """ _key = get_key(keyfile) _k = md5(_key).hexdigest() key = xor(_k[:8], _k[8:16], _k[16:24], _k[24:]) return key
[ "def", "get_cipher_key", "(", "keyfile", "=", "None", ")", ":", "_key", "=", "get_key", "(", "keyfile", ")", "_k", "=", "md5", "(", "_key", ")", ".", "hexdigest", "(", ")", "key", "=", "xor", "(", "_k", "[", ":", "8", "]", ",", "_k", "[", "8", ":", "16", "]", ",", "_k", "[", "16", ":", "24", "]", ",", "_k", "[", "24", ":", "]", ")", "return", "key" ]
Create key which will be used in des, because des need 8bytes chars
[ "Create", "key", "which", "will", "be", "used", "in", "des", "because", "des", "need", "8bytes", "chars" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/secretkey/__init__.py#L42-L49
7,012
limodou/uliweb
uliweb/lib/werkzeug/contrib/cache.py
BaseCache.set_many
def set_many(self, mapping, timeout=None): """Sets multiple keys and values from a mapping. :param mapping: a mapping with the keys/values to set. :param timeout: the cache timeout for the key (if not specified, it uses the default timeout). """ for key, value in _items(mapping): self.set(key, value, timeout)
python
def set_many(self, mapping, timeout=None): """Sets multiple keys and values from a mapping. :param mapping: a mapping with the keys/values to set. :param timeout: the cache timeout for the key (if not specified, it uses the default timeout). """ for key, value in _items(mapping): self.set(key, value, timeout)
[ "def", "set_many", "(", "self", ",", "mapping", ",", "timeout", "=", "None", ")", ":", "for", "key", ",", "value", "in", "_items", "(", "mapping", ")", ":", "self", ".", "set", "(", "key", ",", "value", ",", "timeout", ")" ]
Sets multiple keys and values from a mapping. :param mapping: a mapping with the keys/values to set. :param timeout: the cache timeout for the key (if not specified, it uses the default timeout).
[ "Sets", "multiple", "keys", "and", "values", "from", "a", "mapping", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/contrib/cache.py#L166-L174
7,013
limodou/uliweb
uliweb/lib/werkzeug/contrib/cache.py
BaseCache.dec
def dec(self, key, delta=1): """Decrements the value of a key by `delta`. If the key does not yet exist it is initialized with `-delta`. For supporting caches this is an atomic operation. :param key: the key to increment. :param delta: the delta to subtract. """ self.set(key, (self.get(key) or 0) - delta)
python
def dec(self, key, delta=1): """Decrements the value of a key by `delta`. If the key does not yet exist it is initialized with `-delta`. For supporting caches this is an atomic operation. :param key: the key to increment. :param delta: the delta to subtract. """ self.set(key, (self.get(key) or 0) - delta)
[ "def", "dec", "(", "self", ",", "key", ",", "delta", "=", "1", ")", ":", "self", ".", "set", "(", "key", ",", "(", "self", ".", "get", "(", "key", ")", "or", "0", ")", "-", "delta", ")" ]
Decrements the value of a key by `delta`. If the key does not yet exist it is initialized with `-delta`. For supporting caches this is an atomic operation. :param key: the key to increment. :param delta: the delta to subtract.
[ "Decrements", "the", "value", "of", "a", "key", "by", "delta", ".", "If", "the", "key", "does", "not", "yet", "exist", "it", "is", "initialized", "with", "-", "delta", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/contrib/cache.py#L202-L211
7,014
limodou/uliweb
uliweb/lib/werkzeug/contrib/cache.py
MemcachedCache.import_preferred_memcache_lib
def import_preferred_memcache_lib(self, servers): """Returns an initialized memcache client. Used by the constructor.""" try: import pylibmc except ImportError: pass else: return pylibmc.Client(servers) try: from google.appengine.api import memcache except ImportError: pass else: return memcache.Client() try: import memcache except ImportError: pass else: return memcache.Client(servers)
python
def import_preferred_memcache_lib(self, servers): """Returns an initialized memcache client. Used by the constructor.""" try: import pylibmc except ImportError: pass else: return pylibmc.Client(servers) try: from google.appengine.api import memcache except ImportError: pass else: return memcache.Client() try: import memcache except ImportError: pass else: return memcache.Client(servers)
[ "def", "import_preferred_memcache_lib", "(", "self", ",", "servers", ")", ":", "try", ":", "import", "pylibmc", "except", "ImportError", ":", "pass", "else", ":", "return", "pylibmc", ".", "Client", "(", "servers", ")", "try", ":", "from", "google", ".", "appengine", ".", "api", "import", "memcache", "except", "ImportError", ":", "pass", "else", ":", "return", "memcache", ".", "Client", "(", ")", "try", ":", "import", "memcache", "except", "ImportError", ":", "pass", "else", ":", "return", "memcache", ".", "Client", "(", "servers", ")" ]
Returns an initialized memcache client. Used by the constructor.
[ "Returns", "an", "initialized", "memcache", "client", ".", "Used", "by", "the", "constructor", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/contrib/cache.py#L420-L441
7,015
limodou/uliweb
uliweb/lib/werkzeug/contrib/cache.py
RedisCache.dump_object
def dump_object(self, value): """Dumps an object into a string for redis. By default it serializes integers as regular string and pickle dumps everything else. """ t = type(value) if t is int or t is long: return str(value) return '!' + pickle.dumps(value)
python
def dump_object(self, value): """Dumps an object into a string for redis. By default it serializes integers as regular string and pickle dumps everything else. """ t = type(value) if t is int or t is long: return str(value) return '!' + pickle.dumps(value)
[ "def", "dump_object", "(", "self", ",", "value", ")", ":", "t", "=", "type", "(", "value", ")", "if", "t", "is", "int", "or", "t", "is", "long", ":", "return", "str", "(", "value", ")", "return", "'!'", "+", "pickle", ".", "dumps", "(", "value", ")" ]
Dumps an object into a string for redis. By default it serializes integers as regular string and pickle dumps everything else.
[ "Dumps", "an", "object", "into", "a", "string", "for", "redis", ".", "By", "default", "it", "serializes", "integers", "as", "regular", "string", "and", "pickle", "dumps", "everything", "else", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/contrib/cache.py#L491-L498
7,016
limodou/uliweb
uliweb/utils/common.py
extract_dirs
def extract_dirs(mod, path, dst, verbose=False, exclude=None, exclude_ext=None, recursion=True, replace=True): """ mod name path mod path dst output directory resursion True will extract all sub module of mod """ default_exclude = ['.svn', '_svn', '.git'] default_exclude_ext = ['.pyc', '.pyo', '.bak', '.tmp'] exclude = exclude or [] exclude_ext = exclude_ext or [] # log = logging.getLogger('uliweb') if not os.path.exists(dst): os.makedirs(dst) if verbose: print 'Make directory %s' % dst for r in pkg.resource_listdir(mod, path): if r in exclude or r in default_exclude: continue fpath = os.path.join(path, r) if pkg.resource_isdir(mod, fpath): if recursion: extract_dirs(mod, fpath, os.path.join(dst, r), verbose, exclude, exclude_ext, recursion, replace) else: ext = os.path.splitext(fpath)[1] if ext in exclude_ext or ext in default_exclude_ext: continue extract_file(mod, fpath, dst, verbose, replace)
python
def extract_dirs(mod, path, dst, verbose=False, exclude=None, exclude_ext=None, recursion=True, replace=True): """ mod name path mod path dst output directory resursion True will extract all sub module of mod """ default_exclude = ['.svn', '_svn', '.git'] default_exclude_ext = ['.pyc', '.pyo', '.bak', '.tmp'] exclude = exclude or [] exclude_ext = exclude_ext or [] # log = logging.getLogger('uliweb') if not os.path.exists(dst): os.makedirs(dst) if verbose: print 'Make directory %s' % dst for r in pkg.resource_listdir(mod, path): if r in exclude or r in default_exclude: continue fpath = os.path.join(path, r) if pkg.resource_isdir(mod, fpath): if recursion: extract_dirs(mod, fpath, os.path.join(dst, r), verbose, exclude, exclude_ext, recursion, replace) else: ext = os.path.splitext(fpath)[1] if ext in exclude_ext or ext in default_exclude_ext: continue extract_file(mod, fpath, dst, verbose, replace)
[ "def", "extract_dirs", "(", "mod", ",", "path", ",", "dst", ",", "verbose", "=", "False", ",", "exclude", "=", "None", ",", "exclude_ext", "=", "None", ",", "recursion", "=", "True", ",", "replace", "=", "True", ")", ":", "default_exclude", "=", "[", "'.svn'", ",", "'_svn'", ",", "'.git'", "]", "default_exclude_ext", "=", "[", "'.pyc'", ",", "'.pyo'", ",", "'.bak'", ",", "'.tmp'", "]", "exclude", "=", "exclude", "or", "[", "]", "exclude_ext", "=", "exclude_ext", "or", "[", "]", "# log = logging.getLogger('uliweb')", "if", "not", "os", ".", "path", ".", "exists", "(", "dst", ")", ":", "os", ".", "makedirs", "(", "dst", ")", "if", "verbose", ":", "print", "'Make directory %s'", "%", "dst", "for", "r", "in", "pkg", ".", "resource_listdir", "(", "mod", ",", "path", ")", ":", "if", "r", "in", "exclude", "or", "r", "in", "default_exclude", ":", "continue", "fpath", "=", "os", ".", "path", ".", "join", "(", "path", ",", "r", ")", "if", "pkg", ".", "resource_isdir", "(", "mod", ",", "fpath", ")", ":", "if", "recursion", ":", "extract_dirs", "(", "mod", ",", "fpath", ",", "os", ".", "path", ".", "join", "(", "dst", ",", "r", ")", ",", "verbose", ",", "exclude", ",", "exclude_ext", ",", "recursion", ",", "replace", ")", "else", ":", "ext", "=", "os", ".", "path", ".", "splitext", "(", "fpath", ")", "[", "1", "]", "if", "ext", "in", "exclude_ext", "or", "ext", "in", "default_exclude_ext", ":", "continue", "extract_file", "(", "mod", ",", "fpath", ",", "dst", ",", "verbose", ",", "replace", ")" ]
mod name path mod path dst output directory resursion True will extract all sub module of mod
[ "mod", "name", "path", "mod", "path", "dst", "output", "directory", "resursion", "True", "will", "extract", "all", "sub", "module", "of", "mod" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/common.py#L110-L137
7,017
limodou/uliweb
uliweb/utils/common.py
walk_dirs
def walk_dirs(path, include=None, include_ext=None, exclude=None, exclude_ext=None, recursion=True, file_only=False, use_default_pattern=True, patterns=None): """ path directory path resursion True will extract all sub module of mod """ default_exclude = ['.svn', '_svn', '.git'] default_exclude_ext = ['.pyc', '.pyo', '.bak', '.tmp'] exclude = exclude or [] exclude_ext = exclude_ext or [] include_ext = include_ext or [] include = include or [] if not os.path.exists(path): raise StopIteration for r in os.listdir(path): if match(r, exclude) or (use_default_pattern and r in default_exclude): continue if include and r not in include: continue fpath = os.path.join(path, r) if os.path.isdir(fpath): if not file_only: if patterns and match(r, patterns): yield os.path.normpath(fpath).replace('\\', '/') if recursion: for f in walk_dirs(fpath, include, include_ext, exclude, exclude_ext, recursion, file_only, use_default_pattern, patterns): yield os.path.normpath(f).replace('\\', '/') else: ext = os.path.splitext(fpath)[1] if ext in exclude_ext or (use_default_pattern and ext in default_exclude_ext): continue if include_ext and ext not in include_ext: continue if patterns: if not match(r, patterns): continue yield os.path.normpath(fpath).replace('\\', '/')
python
def walk_dirs(path, include=None, include_ext=None, exclude=None, exclude_ext=None, recursion=True, file_only=False, use_default_pattern=True, patterns=None): """ path directory path resursion True will extract all sub module of mod """ default_exclude = ['.svn', '_svn', '.git'] default_exclude_ext = ['.pyc', '.pyo', '.bak', '.tmp'] exclude = exclude or [] exclude_ext = exclude_ext or [] include_ext = include_ext or [] include = include or [] if not os.path.exists(path): raise StopIteration for r in os.listdir(path): if match(r, exclude) or (use_default_pattern and r in default_exclude): continue if include and r not in include: continue fpath = os.path.join(path, r) if os.path.isdir(fpath): if not file_only: if patterns and match(r, patterns): yield os.path.normpath(fpath).replace('\\', '/') if recursion: for f in walk_dirs(fpath, include, include_ext, exclude, exclude_ext, recursion, file_only, use_default_pattern, patterns): yield os.path.normpath(f).replace('\\', '/') else: ext = os.path.splitext(fpath)[1] if ext in exclude_ext or (use_default_pattern and ext in default_exclude_ext): continue if include_ext and ext not in include_ext: continue if patterns: if not match(r, patterns): continue yield os.path.normpath(fpath).replace('\\', '/')
[ "def", "walk_dirs", "(", "path", ",", "include", "=", "None", ",", "include_ext", "=", "None", ",", "exclude", "=", "None", ",", "exclude_ext", "=", "None", ",", "recursion", "=", "True", ",", "file_only", "=", "False", ",", "use_default_pattern", "=", "True", ",", "patterns", "=", "None", ")", ":", "default_exclude", "=", "[", "'.svn'", ",", "'_svn'", ",", "'.git'", "]", "default_exclude_ext", "=", "[", "'.pyc'", ",", "'.pyo'", ",", "'.bak'", ",", "'.tmp'", "]", "exclude", "=", "exclude", "or", "[", "]", "exclude_ext", "=", "exclude_ext", "or", "[", "]", "include_ext", "=", "include_ext", "or", "[", "]", "include", "=", "include", "or", "[", "]", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "raise", "StopIteration", "for", "r", "in", "os", ".", "listdir", "(", "path", ")", ":", "if", "match", "(", "r", ",", "exclude", ")", "or", "(", "use_default_pattern", "and", "r", "in", "default_exclude", ")", ":", "continue", "if", "include", "and", "r", "not", "in", "include", ":", "continue", "fpath", "=", "os", ".", "path", ".", "join", "(", "path", ",", "r", ")", "if", "os", ".", "path", ".", "isdir", "(", "fpath", ")", ":", "if", "not", "file_only", ":", "if", "patterns", "and", "match", "(", "r", ",", "patterns", ")", ":", "yield", "os", ".", "path", ".", "normpath", "(", "fpath", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "if", "recursion", ":", "for", "f", "in", "walk_dirs", "(", "fpath", ",", "include", ",", "include_ext", ",", "exclude", ",", "exclude_ext", ",", "recursion", ",", "file_only", ",", "use_default_pattern", ",", "patterns", ")", ":", "yield", "os", ".", "path", ".", "normpath", "(", "f", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", "else", ":", "ext", "=", "os", ".", "path", ".", "splitext", "(", "fpath", ")", "[", "1", "]", "if", "ext", "in", "exclude_ext", "or", "(", "use_default_pattern", "and", "ext", "in", "default_exclude_ext", ")", ":", "continue", "if", "include_ext", "and", "ext", "not", "in", "include_ext", ":", "continue", "if", "patterns", ":", "if", "not", "match", "(", "r", ",", "patterns", ")", ":", "continue", "yield", "os", ".", "path", ".", "normpath", "(", "fpath", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")" ]
path directory path resursion True will extract all sub module of mod
[ "path", "directory", "path", "resursion", "True", "will", "extract", "all", "sub", "module", "of", "mod" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/common.py#L147-L187
7,018
limodou/uliweb
uliweb/utils/common.py
camel_to_
def camel_to_(s): """ Convert CamelCase to camel_case """ s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
python
def camel_to_(s): """ Convert CamelCase to camel_case """ s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
[ "def", "camel_to_", "(", "s", ")", ":", "s1", "=", "re", ".", "sub", "(", "'(.)([A-Z][a-z]+)'", ",", "r'\\1_\\2'", ",", "s", ")", "return", "re", ".", "sub", "(", "'([a-z0-9])([A-Z])'", ",", "r'\\1_\\2'", ",", "s1", ")", ".", "lower", "(", ")" ]
Convert CamelCase to camel_case
[ "Convert", "CamelCase", "to", "camel_case" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/common.py#L605-L610
7,019
limodou/uliweb
uliweb/utils/common.py
application_path
def application_path(path): """ Join application project_dir and path """ from uliweb import application return os.path.join(application.project_dir, path)
python
def application_path(path): """ Join application project_dir and path """ from uliweb import application return os.path.join(application.project_dir, path)
[ "def", "application_path", "(", "path", ")", ":", "from", "uliweb", "import", "application", "return", "os", ".", "path", ".", "join", "(", "application", ".", "project_dir", ",", "path", ")" ]
Join application project_dir and path
[ "Join", "application", "project_dir", "and", "path" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/common.py#L612-L617
7,020
limodou/uliweb
uliweb/utils/common.py
get_uuid
def get_uuid(type=4): """ Get uuid value """ import uuid name = 'uuid'+str(type) u = getattr(uuid, name) return u().hex
python
def get_uuid(type=4): """ Get uuid value """ import uuid name = 'uuid'+str(type) u = getattr(uuid, name) return u().hex
[ "def", "get_uuid", "(", "type", "=", "4", ")", ":", "import", "uuid", "name", "=", "'uuid'", "+", "str", "(", "type", ")", "u", "=", "getattr", "(", "uuid", ",", "name", ")", "return", "u", "(", ")", ".", "hex" ]
Get uuid value
[ "Get", "uuid", "value" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/common.py#L619-L627
7,021
limodou/uliweb
uliweb/utils/common.py
request_url
def request_url(req=None): """ Get full url of a request """ from uliweb import request r = req or request if request: if r.query_string: return r.path + '?' + r.query_string else: return r.path else: return ''
python
def request_url(req=None): """ Get full url of a request """ from uliweb import request r = req or request if request: if r.query_string: return r.path + '?' + r.query_string else: return r.path else: return ''
[ "def", "request_url", "(", "req", "=", "None", ")", ":", "from", "uliweb", "import", "request", "r", "=", "req", "or", "request", "if", "request", ":", "if", "r", ".", "query_string", ":", "return", "r", ".", "path", "+", "'?'", "+", "r", ".", "query_string", "else", ":", "return", "r", ".", "path", "else", ":", "return", "''" ]
Get full url of a request
[ "Get", "full", "url", "of", "a", "request" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/common.py#L658-L671
7,022
limodou/uliweb
uliweb/utils/common.py
compare_dict
def compare_dict(da, db): """ Compare differencs from two dicts """ sa = set(da.items()) sb = set(db.items()) diff = sa & sb return dict(sa - diff), dict(sb - diff)
python
def compare_dict(da, db): """ Compare differencs from two dicts """ sa = set(da.items()) sb = set(db.items()) diff = sa & sb return dict(sa - diff), dict(sb - diff)
[ "def", "compare_dict", "(", "da", ",", "db", ")", ":", "sa", "=", "set", "(", "da", ".", "items", "(", ")", ")", "sb", "=", "set", "(", "db", ".", "items", "(", ")", ")", "diff", "=", "sa", "&", "sb", "return", "dict", "(", "sa", "-", "diff", ")", ",", "dict", "(", "sb", "-", "diff", ")" ]
Compare differencs from two dicts
[ "Compare", "differencs", "from", "two", "dicts" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/common.py#L711-L719
7,023
limodou/uliweb
uliweb/utils/common.py
get_configrable_object
def get_configrable_object(key, section, cls=None): """ if obj is a class, then check if the class is subclass of cls or it should be object path, and it'll be imported by import_attr """ from uliweb import UliwebError, settings import inspect if inspect.isclass(key) and cls and issubclass(key, cls): return key elif isinstance(key, (str, unicode)): path = settings[section].get(key) if path: _cls = import_attr(path) return _cls else: raise UliwebError("Can't find section name %s in settings" % section) else: raise UliwebError("Key %r should be subclass of %r object or string path format!" % (key, cls))
python
def get_configrable_object(key, section, cls=None): """ if obj is a class, then check if the class is subclass of cls or it should be object path, and it'll be imported by import_attr """ from uliweb import UliwebError, settings import inspect if inspect.isclass(key) and cls and issubclass(key, cls): return key elif isinstance(key, (str, unicode)): path = settings[section].get(key) if path: _cls = import_attr(path) return _cls else: raise UliwebError("Can't find section name %s in settings" % section) else: raise UliwebError("Key %r should be subclass of %r object or string path format!" % (key, cls))
[ "def", "get_configrable_object", "(", "key", ",", "section", ",", "cls", "=", "None", ")", ":", "from", "uliweb", "import", "UliwebError", ",", "settings", "import", "inspect", "if", "inspect", ".", "isclass", "(", "key", ")", "and", "cls", "and", "issubclass", "(", "key", ",", "cls", ")", ":", "return", "key", "elif", "isinstance", "(", "key", ",", "(", "str", ",", "unicode", ")", ")", ":", "path", "=", "settings", "[", "section", "]", ".", "get", "(", "key", ")", "if", "path", ":", "_cls", "=", "import_attr", "(", "path", ")", "return", "_cls", "else", ":", "raise", "UliwebError", "(", "\"Can't find section name %s in settings\"", "%", "section", ")", "else", ":", "raise", "UliwebError", "(", "\"Key %r should be subclass of %r object or string path format!\"", "%", "(", "key", ",", "cls", ")", ")" ]
if obj is a class, then check if the class is subclass of cls or it should be object path, and it'll be imported by import_attr
[ "if", "obj", "is", "a", "class", "then", "check", "if", "the", "class", "is", "subclass", "of", "cls", "or", "it", "should", "be", "object", "path", "and", "it", "ll", "be", "imported", "by", "import_attr" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/common.py#L824-L842
7,024
limodou/uliweb
uliweb/utils/common.py
convert_bytes
def convert_bytes(n): """ Convert a size number to 'K', 'M', .etc """ symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols): prefix[s] = 1 << (i + 1) * 10 for s in reversed(symbols): if n >= prefix[s]: value = float(n) / prefix[s] return '%.1f%s' % (value, s) return "%sB" % n
python
def convert_bytes(n): """ Convert a size number to 'K', 'M', .etc """ symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols): prefix[s] = 1 << (i + 1) * 10 for s in reversed(symbols): if n >= prefix[s]: value = float(n) / prefix[s] return '%.1f%s' % (value, s) return "%sB" % n
[ "def", "convert_bytes", "(", "n", ")", ":", "symbols", "=", "(", "'K'", ",", "'M'", ",", "'G'", ",", "'T'", ",", "'P'", ",", "'E'", ",", "'Z'", ",", "'Y'", ")", "prefix", "=", "{", "}", "for", "i", ",", "s", "in", "enumerate", "(", "symbols", ")", ":", "prefix", "[", "s", "]", "=", "1", "<<", "(", "i", "+", "1", ")", "*", "10", "for", "s", "in", "reversed", "(", "symbols", ")", ":", "if", "n", ">=", "prefix", "[", "s", "]", ":", "value", "=", "float", "(", "n", ")", "/", "prefix", "[", "s", "]", "return", "'%.1f%s'", "%", "(", "value", ",", "s", ")", "return", "\"%sB\"", "%", "n" ]
Convert a size number to 'K', 'M', .etc
[ "Convert", "a", "size", "number", "to", "K", "M", ".", "etc" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/common.py#L868-L880
7,025
limodou/uliweb
uliweb/contrib/template/__init__.py
init_static_combine
def init_static_combine(): """ Process static combine, create md5 key according each static filename """ from uliweb import settings from hashlib import md5 import os d = {} if settings.get_var('STATIC_COMBINE_CONFIG/enable', False): for k, v in settings.get('STATIC_COMBINE', {}).items(): key = '_cmb_'+md5(''.join(v)).hexdigest()+os.path.splitext(v[0])[1] d[key] = v return d
python
def init_static_combine(): """ Process static combine, create md5 key according each static filename """ from uliweb import settings from hashlib import md5 import os d = {} if settings.get_var('STATIC_COMBINE_CONFIG/enable', False): for k, v in settings.get('STATIC_COMBINE', {}).items(): key = '_cmb_'+md5(''.join(v)).hexdigest()+os.path.splitext(v[0])[1] d[key] = v return d
[ "def", "init_static_combine", "(", ")", ":", "from", "uliweb", "import", "settings", "from", "hashlib", "import", "md5", "import", "os", "d", "=", "{", "}", "if", "settings", ".", "get_var", "(", "'STATIC_COMBINE_CONFIG/enable'", ",", "False", ")", ":", "for", "k", ",", "v", "in", "settings", ".", "get", "(", "'STATIC_COMBINE'", ",", "{", "}", ")", ".", "items", "(", ")", ":", "key", "=", "'_cmb_'", "+", "md5", "(", "''", ".", "join", "(", "v", ")", ")", ".", "hexdigest", "(", ")", "+", "os", ".", "path", ".", "splitext", "(", "v", "[", "0", "]", ")", "[", "1", "]", "d", "[", "key", "]", "=", "v", "return", "d" ]
Process static combine, create md5 key according each static filename
[ "Process", "static", "combine", "create", "md5", "key", "according", "each", "static", "filename" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/template/__init__.py#L9-L23
7,026
limodou/uliweb
uliweb/contrib/csrf/__init__.py
csrf_token
def csrf_token(): """ Get csrf token or create new one """ from uliweb import request, settings from uliweb.utils.common import safe_str v = {} token_name = settings.CSRF.cookie_token_name if not request.session.deleted and request.session.get(token_name): v = request.session[token_name] if time.time() >= v['created_time'] + v['expiry_time']: v = {} else: v['created_time'] = time.time() if not v: token = request.cookies.get(token_name) if not token: token = uuid.uuid4().get_hex() v = {'token':token, 'expiry_time':settings.CSRF.timeout, 'created_time':time.time()} if not request.session.deleted: request.session[token_name] = v return safe_str(v['token'])
python
def csrf_token(): """ Get csrf token or create new one """ from uliweb import request, settings from uliweb.utils.common import safe_str v = {} token_name = settings.CSRF.cookie_token_name if not request.session.deleted and request.session.get(token_name): v = request.session[token_name] if time.time() >= v['created_time'] + v['expiry_time']: v = {} else: v['created_time'] = time.time() if not v: token = request.cookies.get(token_name) if not token: token = uuid.uuid4().get_hex() v = {'token':token, 'expiry_time':settings.CSRF.timeout, 'created_time':time.time()} if not request.session.deleted: request.session[token_name] = v return safe_str(v['token'])
[ "def", "csrf_token", "(", ")", ":", "from", "uliweb", "import", "request", ",", "settings", "from", "uliweb", ".", "utils", ".", "common", "import", "safe_str", "v", "=", "{", "}", "token_name", "=", "settings", ".", "CSRF", ".", "cookie_token_name", "if", "not", "request", ".", "session", ".", "deleted", "and", "request", ".", "session", ".", "get", "(", "token_name", ")", ":", "v", "=", "request", ".", "session", "[", "token_name", "]", "if", "time", ".", "time", "(", ")", ">=", "v", "[", "'created_time'", "]", "+", "v", "[", "'expiry_time'", "]", ":", "v", "=", "{", "}", "else", ":", "v", "[", "'created_time'", "]", "=", "time", ".", "time", "(", ")", "if", "not", "v", ":", "token", "=", "request", ".", "cookies", ".", "get", "(", "token_name", ")", "if", "not", "token", ":", "token", "=", "uuid", ".", "uuid4", "(", ")", ".", "get_hex", "(", ")", "v", "=", "{", "'token'", ":", "token", ",", "'expiry_time'", ":", "settings", ".", "CSRF", ".", "timeout", ",", "'created_time'", ":", "time", ".", "time", "(", ")", "}", "if", "not", "request", ".", "session", ".", "deleted", ":", "request", ".", "session", "[", "token_name", "]", "=", "v", "return", "safe_str", "(", "v", "[", "'token'", "]", ")" ]
Get csrf token or create new one
[ "Get", "csrf", "token", "or", "create", "new", "one" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/csrf/__init__.py#L5-L29
7,027
limodou/uliweb
uliweb/lib/werkzeug/contrib/wrappers.py
JSONRequestMixin.json
def json(self): """Get the result of simplejson.loads if possible.""" if 'json' not in self.environ.get('CONTENT_TYPE', ''): raise BadRequest('Not a JSON request') try: return loads(self.data) except Exception: raise BadRequest('Unable to read JSON request')
python
def json(self): """Get the result of simplejson.loads if possible.""" if 'json' not in self.environ.get('CONTENT_TYPE', ''): raise BadRequest('Not a JSON request') try: return loads(self.data) except Exception: raise BadRequest('Unable to read JSON request')
[ "def", "json", "(", "self", ")", ":", "if", "'json'", "not", "in", "self", ".", "environ", ".", "get", "(", "'CONTENT_TYPE'", ",", "''", ")", ":", "raise", "BadRequest", "(", "'Not a JSON request'", ")", "try", ":", "return", "loads", "(", "self", ".", "data", ")", "except", "Exception", ":", "raise", "BadRequest", "(", "'Unable to read JSON request'", ")" ]
Get the result of simplejson.loads if possible.
[ "Get", "the", "result", "of", "simplejson", ".", "loads", "if", "possible", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/contrib/wrappers.py#L53-L60
7,028
limodou/uliweb
uliweb/lib/werkzeug/contrib/wrappers.py
ProtobufRequestMixin.parse_protobuf
def parse_protobuf(self, proto_type): """Parse the data into an instance of proto_type.""" if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''): raise BadRequest('Not a Protobuf request') obj = proto_type() try: obj.ParseFromString(self.data) except Exception: raise BadRequest("Unable to parse Protobuf request") # Fail if not all required fields are set if self.protobuf_check_initialization and not obj.IsInitialized(): raise BadRequest("Partial Protobuf request") return obj
python
def parse_protobuf(self, proto_type): """Parse the data into an instance of proto_type.""" if 'protobuf' not in self.environ.get('CONTENT_TYPE', ''): raise BadRequest('Not a Protobuf request') obj = proto_type() try: obj.ParseFromString(self.data) except Exception: raise BadRequest("Unable to parse Protobuf request") # Fail if not all required fields are set if self.protobuf_check_initialization and not obj.IsInitialized(): raise BadRequest("Partial Protobuf request") return obj
[ "def", "parse_protobuf", "(", "self", ",", "proto_type", ")", ":", "if", "'protobuf'", "not", "in", "self", ".", "environ", ".", "get", "(", "'CONTENT_TYPE'", ",", "''", ")", ":", "raise", "BadRequest", "(", "'Not a Protobuf request'", ")", "obj", "=", "proto_type", "(", ")", "try", ":", "obj", ".", "ParseFromString", "(", "self", ".", "data", ")", "except", "Exception", ":", "raise", "BadRequest", "(", "\"Unable to parse Protobuf request\"", ")", "# Fail if not all required fields are set", "if", "self", ".", "protobuf_check_initialization", "and", "not", "obj", ".", "IsInitialized", "(", ")", ":", "raise", "BadRequest", "(", "\"Partial Protobuf request\"", ")", "return", "obj" ]
Parse the data into an instance of proto_type.
[ "Parse", "the", "data", "into", "an", "instance", "of", "proto_type", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/contrib/wrappers.py#L79-L94
7,029
limodou/uliweb
uliweb/lib/werkzeug/contrib/wrappers.py
DynamicCharsetRequestMixin.charset
def charset(self): """The charset from the content type.""" header = self.environ.get('CONTENT_TYPE') if header: ct, options = parse_options_header(header) charset = options.get('charset') if charset: if is_known_charset(charset): return charset return self.unknown_charset(charset) return self.default_charset
python
def charset(self): """The charset from the content type.""" header = self.environ.get('CONTENT_TYPE') if header: ct, options = parse_options_header(header) charset = options.get('charset') if charset: if is_known_charset(charset): return charset return self.unknown_charset(charset) return self.default_charset
[ "def", "charset", "(", "self", ")", ":", "header", "=", "self", ".", "environ", ".", "get", "(", "'CONTENT_TYPE'", ")", "if", "header", ":", "ct", ",", "options", "=", "parse_options_header", "(", "header", ")", "charset", "=", "options", ".", "get", "(", "'charset'", ")", "if", "charset", ":", "if", "is_known_charset", "(", "charset", ")", ":", "return", "charset", "return", "self", ".", "unknown_charset", "(", "charset", ")", "return", "self", ".", "default_charset" ]
The charset from the content type.
[ "The", "charset", "from", "the", "content", "type", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/contrib/wrappers.py#L222-L232
7,030
limodou/uliweb
uliweb/core/template.py
utf8
def utf8(value): """Converts a string argument to a byte string. If the argument is already a byte string or None, it is returned unchanged. Otherwise it must be a unicode string and is encoded as utf8. """ if isinstance(value, _UTF8_TYPES): return value elif isinstance(value, unicode_type): return value.encode("utf-8") else: return str(value)
python
def utf8(value): """Converts a string argument to a byte string. If the argument is already a byte string or None, it is returned unchanged. Otherwise it must be a unicode string and is encoded as utf8. """ if isinstance(value, _UTF8_TYPES): return value elif isinstance(value, unicode_type): return value.encode("utf-8") else: return str(value)
[ "def", "utf8", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "_UTF8_TYPES", ")", ":", "return", "value", "elif", "isinstance", "(", "value", ",", "unicode_type", ")", ":", "return", "value", ".", "encode", "(", "\"utf-8\"", ")", "else", ":", "return", "str", "(", "value", ")" ]
Converts a string argument to a byte string. If the argument is already a byte string or None, it is returned unchanged. Otherwise it must be a unicode string and is encoded as utf8.
[ "Converts", "a", "string", "argument", "to", "a", "byte", "string", ".", "If", "the", "argument", "is", "already", "a", "byte", "string", "or", "None", "it", "is", "returned", "unchanged", ".", "Otherwise", "it", "must", "be", "a", "unicode", "string", "and", "is", "encoded", "as", "utf8", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/template.py#L249-L260
7,031
limodou/uliweb
uliweb/core/template.py
to_basestring
def to_basestring(value): """Converts a string argument to a subclass of basestring. In python2, byte and unicode strings are mostly interchangeable, so functions that deal with a user-supplied argument in combination with ascii string constants can use either and should return the type the user supplied. In python3, the two types are not interchangeable, so this method is needed to convert byte strings to unicode. """ if value is None: return 'None' if isinstance(value, _BASESTRING_TYPES): return value elif isinstance(value, unicode_type): return value.decode("utf-8") else: return str(value)
python
def to_basestring(value): """Converts a string argument to a subclass of basestring. In python2, byte and unicode strings are mostly interchangeable, so functions that deal with a user-supplied argument in combination with ascii string constants can use either and should return the type the user supplied. In python3, the two types are not interchangeable, so this method is needed to convert byte strings to unicode. """ if value is None: return 'None' if isinstance(value, _BASESTRING_TYPES): return value elif isinstance(value, unicode_type): return value.decode("utf-8") else: return str(value)
[ "def", "to_basestring", "(", "value", ")", ":", "if", "value", "is", "None", ":", "return", "'None'", "if", "isinstance", "(", "value", ",", "_BASESTRING_TYPES", ")", ":", "return", "value", "elif", "isinstance", "(", "value", ",", "unicode_type", ")", ":", "return", "value", ".", "decode", "(", "\"utf-8\"", ")", "else", ":", "return", "str", "(", "value", ")" ]
Converts a string argument to a subclass of basestring. In python2, byte and unicode strings are mostly interchangeable, so functions that deal with a user-supplied argument in combination with ascii string constants can use either and should return the type the user supplied. In python3, the two types are not interchangeable, so this method is needed to convert byte strings to unicode.
[ "Converts", "a", "string", "argument", "to", "a", "subclass", "of", "basestring", ".", "In", "python2", "byte", "and", "unicode", "strings", "are", "mostly", "interchangeable", "so", "functions", "that", "deal", "with", "a", "user", "-", "supplied", "argument", "in", "combination", "with", "ascii", "string", "constants", "can", "use", "either", "and", "should", "return", "the", "type", "the", "user", "supplied", ".", "In", "python3", "the", "two", "types", "are", "not", "interchangeable", "so", "this", "method", "is", "needed", "to", "convert", "byte", "strings", "to", "unicode", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/template.py#L293-L309
7,032
limodou/uliweb
uliweb/core/template.py
LRUTmplatesCacheDict.clear
def clear(self): """ Clears the dict. """ self.__values.clear() self.__access_keys = [] self.__modified_times.clear()
python
def clear(self): """ Clears the dict. """ self.__values.clear() self.__access_keys = [] self.__modified_times.clear()
[ "def", "clear", "(", "self", ")", ":", "self", ".", "__values", ".", "clear", "(", ")", "self", ".", "__access_keys", "=", "[", "]", "self", ".", "__modified_times", ".", "clear", "(", ")" ]
Clears the dict.
[ "Clears", "the", "dict", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/template.py#L677-L683
7,033
limodou/uliweb
uliweb/core/template.py
LRUTmplatesCacheDict.has
def has(self, key, mtime=None): """ This method should almost NEVER be used. The reason is that between the time has_key is called, and the key is accessed, the key might vanish. """ v = self.__values.get(key, None) if not v: return False if self.check_modified_time: mtime = self._get_mtime(key, mtime) if mtime != self.__modified_times[key]: del self[key] return False return True
python
def has(self, key, mtime=None): """ This method should almost NEVER be used. The reason is that between the time has_key is called, and the key is accessed, the key might vanish. """ v = self.__values.get(key, None) if not v: return False if self.check_modified_time: mtime = self._get_mtime(key, mtime) if mtime != self.__modified_times[key]: del self[key] return False return True
[ "def", "has", "(", "self", ",", "key", ",", "mtime", "=", "None", ")", ":", "v", "=", "self", ".", "__values", ".", "get", "(", "key", ",", "None", ")", "if", "not", "v", ":", "return", "False", "if", "self", ".", "check_modified_time", ":", "mtime", "=", "self", ".", "_get_mtime", "(", "key", ",", "mtime", ")", "if", "mtime", "!=", "self", ".", "__modified_times", "[", "key", "]", ":", "del", "self", "[", "key", "]", "return", "False", "return", "True" ]
This method should almost NEVER be used. The reason is that between the time has_key is called, and the key is accessed, the key might vanish.
[ "This", "method", "should", "almost", "NEVER", "be", "used", ".", "The", "reason", "is", "that", "between", "the", "time", "has_key", "is", "called", "and", "the", "key", "is", "accessed", "the", "key", "might", "vanish", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/template.py#L694-L708
7,034
limodou/uliweb
uliweb/core/template.py
Loader.reset
def reset(self): """Resets the cache of compiled templates.""" with self.lock: if self.cache: if self.use_tmp: shutil.rmtree(self.tmp_dir, ignore_errors=True) else: self.templates = {}
python
def reset(self): """Resets the cache of compiled templates.""" with self.lock: if self.cache: if self.use_tmp: shutil.rmtree(self.tmp_dir, ignore_errors=True) else: self.templates = {}
[ "def", "reset", "(", "self", ")", ":", "with", "self", ".", "lock", ":", "if", "self", ".", "cache", ":", "if", "self", ".", "use_tmp", ":", "shutil", ".", "rmtree", "(", "self", ".", "tmp_dir", ",", "ignore_errors", "=", "True", ")", "else", ":", "self", ".", "templates", "=", "{", "}" ]
Resets the cache of compiled templates.
[ "Resets", "the", "cache", "of", "compiled", "templates", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/template.py#L796-L803
7,035
limodou/uliweb
uliweb/contrib/rbac/rbac.py
has_role
def has_role(user, *roles, **kwargs): """ Judge is the user belongs to the role, and if does, then return the role object if not then return False. kwargs will be passed to role_func. """ Role = get_model('role') if isinstance(user, (unicode, str)): User = get_model('user') user = User.get(User.c.username==user) for role in roles: if isinstance(role, (str, unicode)): role = Role.get(Role.c.name==role) if not role: continue name = role.name func = __role_funcs__.get(name, None) if func: if isinstance(func, (unicode, str)): func = import_attr(func) assert callable(func) para = kwargs.copy() para['user'] = user flag = call_func(func, para) if flag: return role flag = role.users.has(user) if flag: return role flag = role.usergroups_has_user(user) if flag: return role return False
python
def has_role(user, *roles, **kwargs): """ Judge is the user belongs to the role, and if does, then return the role object if not then return False. kwargs will be passed to role_func. """ Role = get_model('role') if isinstance(user, (unicode, str)): User = get_model('user') user = User.get(User.c.username==user) for role in roles: if isinstance(role, (str, unicode)): role = Role.get(Role.c.name==role) if not role: continue name = role.name func = __role_funcs__.get(name, None) if func: if isinstance(func, (unicode, str)): func = import_attr(func) assert callable(func) para = kwargs.copy() para['user'] = user flag = call_func(func, para) if flag: return role flag = role.users.has(user) if flag: return role flag = role.usergroups_has_user(user) if flag: return role return False
[ "def", "has_role", "(", "user", ",", "*", "roles", ",", "*", "*", "kwargs", ")", ":", "Role", "=", "get_model", "(", "'role'", ")", "if", "isinstance", "(", "user", ",", "(", "unicode", ",", "str", ")", ")", ":", "User", "=", "get_model", "(", "'user'", ")", "user", "=", "User", ".", "get", "(", "User", ".", "c", ".", "username", "==", "user", ")", "for", "role", "in", "roles", ":", "if", "isinstance", "(", "role", ",", "(", "str", ",", "unicode", ")", ")", ":", "role", "=", "Role", ".", "get", "(", "Role", ".", "c", ".", "name", "==", "role", ")", "if", "not", "role", ":", "continue", "name", "=", "role", ".", "name", "func", "=", "__role_funcs__", ".", "get", "(", "name", ",", "None", ")", "if", "func", ":", "if", "isinstance", "(", "func", ",", "(", "unicode", ",", "str", ")", ")", ":", "func", "=", "import_attr", "(", "func", ")", "assert", "callable", "(", "func", ")", "para", "=", "kwargs", ".", "copy", "(", ")", "para", "[", "'user'", "]", "=", "user", "flag", "=", "call_func", "(", "func", ",", "para", ")", "if", "flag", ":", "return", "role", "flag", "=", "role", ".", "users", ".", "has", "(", "user", ")", "if", "flag", ":", "return", "role", "flag", "=", "role", ".", "usergroups_has_user", "(", "user", ")", "if", "flag", ":", "return", "role", "return", "False" ]
Judge is the user belongs to the role, and if does, then return the role object if not then return False. kwargs will be passed to role_func.
[ "Judge", "is", "the", "user", "belongs", "to", "the", "role", "and", "if", "does", "then", "return", "the", "role", "object", "if", "not", "then", "return", "False", ".", "kwargs", "will", "be", "passed", "to", "role_func", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/rbac/rbac.py#L42-L78
7,036
limodou/uliweb
uliweb/contrib/rbac/rbac.py
has_permission
def has_permission(user, *permissions, **role_kwargs): """ Judge if an user has permission, and if it does return role object, and if it doesn't return False. role_kwargs will be passed to role functions. With role object, you can use role.relation to get Role_Perm_Rel object. """ Role = get_model('role') Perm = get_model('permission') if isinstance(user, (unicode, str)): User = get_model('user') user = User.get(User.c.username==user) for name in permissions: perm = Perm.get(Perm.c.name==name) if not perm: continue flag = has_role(user, *list(perm.perm_roles.with_relation().all()), **role_kwargs) if flag: return flag return False
python
def has_permission(user, *permissions, **role_kwargs): """ Judge if an user has permission, and if it does return role object, and if it doesn't return False. role_kwargs will be passed to role functions. With role object, you can use role.relation to get Role_Perm_Rel object. """ Role = get_model('role') Perm = get_model('permission') if isinstance(user, (unicode, str)): User = get_model('user') user = User.get(User.c.username==user) for name in permissions: perm = Perm.get(Perm.c.name==name) if not perm: continue flag = has_role(user, *list(perm.perm_roles.with_relation().all()), **role_kwargs) if flag: return flag return False
[ "def", "has_permission", "(", "user", ",", "*", "permissions", ",", "*", "*", "role_kwargs", ")", ":", "Role", "=", "get_model", "(", "'role'", ")", "Perm", "=", "get_model", "(", "'permission'", ")", "if", "isinstance", "(", "user", ",", "(", "unicode", ",", "str", ")", ")", ":", "User", "=", "get_model", "(", "'user'", ")", "user", "=", "User", ".", "get", "(", "User", ".", "c", ".", "username", "==", "user", ")", "for", "name", "in", "permissions", ":", "perm", "=", "Perm", ".", "get", "(", "Perm", ".", "c", ".", "name", "==", "name", ")", "if", "not", "perm", ":", "continue", "flag", "=", "has_role", "(", "user", ",", "*", "list", "(", "perm", ".", "perm_roles", ".", "with_relation", "(", ")", ".", "all", "(", ")", ")", ",", "*", "*", "role_kwargs", ")", "if", "flag", ":", "return", "flag", "return", "False" ]
Judge if an user has permission, and if it does return role object, and if it doesn't return False. role_kwargs will be passed to role functions. With role object, you can use role.relation to get Role_Perm_Rel object.
[ "Judge", "if", "an", "user", "has", "permission", "and", "if", "it", "does", "return", "role", "object", "and", "if", "it", "doesn", "t", "return", "False", ".", "role_kwargs", "will", "be", "passed", "to", "role", "functions", ".", "With", "role", "object", "you", "can", "use", "role", ".", "relation", "to", "get", "Role_Perm_Rel", "object", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/rbac/rbac.py#L80-L101
7,037
limodou/uliweb
uliweb/lib/werkzeug/serving.py
load_ssl_context
def load_ssl_context(cert_file, pkey_file): """Loads an SSL context from a certificate and private key file.""" from OpenSSL import SSL ctx = SSL.Context(SSL.SSLv23_METHOD) ctx.use_certificate_file(cert_file) ctx.use_privatekey_file(pkey_file) return ctx
python
def load_ssl_context(cert_file, pkey_file): """Loads an SSL context from a certificate and private key file.""" from OpenSSL import SSL ctx = SSL.Context(SSL.SSLv23_METHOD) ctx.use_certificate_file(cert_file) ctx.use_privatekey_file(pkey_file) return ctx
[ "def", "load_ssl_context", "(", "cert_file", ",", "pkey_file", ")", ":", "from", "OpenSSL", "import", "SSL", "ctx", "=", "SSL", ".", "Context", "(", "SSL", ".", "SSLv23_METHOD", ")", "ctx", ".", "use_certificate_file", "(", "cert_file", ")", "ctx", ".", "use_privatekey_file", "(", "pkey_file", ")", "return", "ctx" ]
Loads an SSL context from a certificate and private key file.
[ "Loads", "an", "SSL", "context", "from", "a", "certificate", "and", "private", "key", "file", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/serving.py#L344-L350
7,038
limodou/uliweb
uliweb/lib/werkzeug/serving.py
select_ip_version
def select_ip_version(host, port): """Returns AF_INET4 or AF_INET6 depending on where to connect to.""" # disabled due to problems with current ipv6 implementations # and various operating systems. Probably this code also is # not supposed to work, but I can't come up with any other # ways to implement this. ##try: ## info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, ## socket.SOCK_STREAM, 0, ## socket.AI_PASSIVE) ## if info: ## return info[0][0] ##except socket.gaierror: ## pass if ':' in host and hasattr(socket, 'AF_INET6'): return socket.AF_INET6 return socket.AF_INET
python
def select_ip_version(host, port): """Returns AF_INET4 or AF_INET6 depending on where to connect to.""" # disabled due to problems with current ipv6 implementations # and various operating systems. Probably this code also is # not supposed to work, but I can't come up with any other # ways to implement this. ##try: ## info = socket.getaddrinfo(host, port, socket.AF_UNSPEC, ## socket.SOCK_STREAM, 0, ## socket.AI_PASSIVE) ## if info: ## return info[0][0] ##except socket.gaierror: ## pass if ':' in host and hasattr(socket, 'AF_INET6'): return socket.AF_INET6 return socket.AF_INET
[ "def", "select_ip_version", "(", "host", ",", "port", ")", ":", "# disabled due to problems with current ipv6 implementations", "# and various operating systems. Probably this code also is", "# not supposed to work, but I can't come up with any other", "# ways to implement this.", "##try:", "## info = socket.getaddrinfo(host, port, socket.AF_UNSPEC,", "## socket.SOCK_STREAM, 0,", "## socket.AI_PASSIVE)", "## if info:", "## return info[0][0]", "##except socket.gaierror:", "## pass", "if", "':'", "in", "host", "and", "hasattr", "(", "socket", ",", "'AF_INET6'", ")", ":", "return", "socket", ".", "AF_INET6", "return", "socket", ".", "AF_INET" ]
Returns AF_INET4 or AF_INET6 depending on where to connect to.
[ "Returns", "AF_INET4", "or", "AF_INET6", "depending", "on", "where", "to", "connect", "to", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/serving.py#L380-L396
7,039
limodou/uliweb
uliweb/lib/werkzeug/serving.py
make_server
def make_server(host, port, app=None, threaded=False, processes=1, request_handler=None, passthrough_errors=False, ssl_context=None): """Create a new server instance that is either threaded, or forks or just processes one request after another. """ if threaded and processes > 1: raise ValueError("cannot have a multithreaded and " "multi process server.") elif threaded: return ThreadedWSGIServer(host, port, app, request_handler, passthrough_errors, ssl_context) elif processes > 1: return ForkingWSGIServer(host, port, app, processes, request_handler, passthrough_errors, ssl_context) else: return BaseWSGIServer(host, port, app, request_handler, passthrough_errors, ssl_context)
python
def make_server(host, port, app=None, threaded=False, processes=1, request_handler=None, passthrough_errors=False, ssl_context=None): """Create a new server instance that is either threaded, or forks or just processes one request after another. """ if threaded and processes > 1: raise ValueError("cannot have a multithreaded and " "multi process server.") elif threaded: return ThreadedWSGIServer(host, port, app, request_handler, passthrough_errors, ssl_context) elif processes > 1: return ForkingWSGIServer(host, port, app, processes, request_handler, passthrough_errors, ssl_context) else: return BaseWSGIServer(host, port, app, request_handler, passthrough_errors, ssl_context)
[ "def", "make_server", "(", "host", ",", "port", ",", "app", "=", "None", ",", "threaded", "=", "False", ",", "processes", "=", "1", ",", "request_handler", "=", "None", ",", "passthrough_errors", "=", "False", ",", "ssl_context", "=", "None", ")", ":", "if", "threaded", "and", "processes", ">", "1", ":", "raise", "ValueError", "(", "\"cannot have a multithreaded and \"", "\"multi process server.\"", ")", "elif", "threaded", ":", "return", "ThreadedWSGIServer", "(", "host", ",", "port", ",", "app", ",", "request_handler", ",", "passthrough_errors", ",", "ssl_context", ")", "elif", "processes", ">", "1", ":", "return", "ForkingWSGIServer", "(", "host", ",", "port", ",", "app", ",", "processes", ",", "request_handler", ",", "passthrough_errors", ",", "ssl_context", ")", "else", ":", "return", "BaseWSGIServer", "(", "host", ",", "port", ",", "app", ",", "request_handler", ",", "passthrough_errors", ",", "ssl_context", ")" ]
Create a new server instance that is either threaded, or forks or just processes one request after another.
[ "Create", "a", "new", "server", "instance", "that", "is", "either", "threaded", "or", "forks", "or", "just", "processes", "one", "request", "after", "another", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/serving.py#L469-L486
7,040
limodou/uliweb
uliweb/lib/werkzeug/serving.py
_reloader_stat_loop
def _reloader_stat_loop(extra_files=None, interval=1): """When this function is run from the main thread, it will force other threads to exit when any modules currently loaded change. Copyright notice. This function is based on the autoreload.py from the CherryPy trac which originated from WSGIKit which is now dead. :param extra_files: a list of additional files it should watch. """ from itertools import chain mtimes = {} while 1: for filename in chain(_iter_module_files(), extra_files or ()): try: mtime = os.stat(filename).st_mtime except OSError: continue old_time = mtimes.get(filename) if old_time is None: mtimes[filename] = mtime continue elif mtime > old_time: _log('info', ' * Detected change in %r, reloading' % filename) sys.exit(3) time.sleep(interval)
python
def _reloader_stat_loop(extra_files=None, interval=1): """When this function is run from the main thread, it will force other threads to exit when any modules currently loaded change. Copyright notice. This function is based on the autoreload.py from the CherryPy trac which originated from WSGIKit which is now dead. :param extra_files: a list of additional files it should watch. """ from itertools import chain mtimes = {} while 1: for filename in chain(_iter_module_files(), extra_files or ()): try: mtime = os.stat(filename).st_mtime except OSError: continue old_time = mtimes.get(filename) if old_time is None: mtimes[filename] = mtime continue elif mtime > old_time: _log('info', ' * Detected change in %r, reloading' % filename) sys.exit(3) time.sleep(interval)
[ "def", "_reloader_stat_loop", "(", "extra_files", "=", "None", ",", "interval", "=", "1", ")", ":", "from", "itertools", "import", "chain", "mtimes", "=", "{", "}", "while", "1", ":", "for", "filename", "in", "chain", "(", "_iter_module_files", "(", ")", ",", "extra_files", "or", "(", ")", ")", ":", "try", ":", "mtime", "=", "os", ".", "stat", "(", "filename", ")", ".", "st_mtime", "except", "OSError", ":", "continue", "old_time", "=", "mtimes", ".", "get", "(", "filename", ")", "if", "old_time", "is", "None", ":", "mtimes", "[", "filename", "]", "=", "mtime", "continue", "elif", "mtime", ">", "old_time", ":", "_log", "(", "'info'", ",", "' * Detected change in %r, reloading'", "%", "filename", ")", "sys", ".", "exit", "(", "3", ")", "time", ".", "sleep", "(", "interval", ")" ]
When this function is run from the main thread, it will force other threads to exit when any modules currently loaded change. Copyright notice. This function is based on the autoreload.py from the CherryPy trac which originated from WSGIKit which is now dead. :param extra_files: a list of additional files it should watch.
[ "When", "this", "function", "is", "run", "from", "the", "main", "thread", "it", "will", "force", "other", "threads", "to", "exit", "when", "any", "modules", "currently", "loaded", "change", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/serving.py#L507-L532
7,041
limodou/uliweb
uliweb/lib/werkzeug/serving.py
run_simple
def run_simple(hostname, port, application, use_reloader=False, use_debugger=False, use_evalex=True, extra_files=None, reloader_interval=1, threaded=False, processes=1, request_handler=None, static_files=None, passthrough_errors=False, ssl_context=None): """Start an application using wsgiref and with an optional reloader. This wraps `wsgiref` to fix the wrong default reporting of the multithreaded WSGI variable and adds optional multithreading and fork support. This function has a command-line interface too:: python -m werkzeug.serving --help .. versionadded:: 0.5 `static_files` was added to simplify serving of static files as well as `passthrough_errors`. .. versionadded:: 0.6 support for SSL was added. .. versionadded:: 0.8 Added support for automatically loading a SSL context from certificate file and private key. .. versionadded:: 0.9 Added command-line interface. :param hostname: The host for the application. eg: ``'localhost'`` :param port: The port for the server. eg: ``8080`` :param application: the WSGI application to execute :param use_reloader: should the server automatically restart the python process if modules were changed? :param use_debugger: should the werkzeug debugging system be used? :param use_evalex: should the exception evaluation feature be enabled? :param extra_files: a list of files the reloader should watch additionally to the modules. For example configuration files. :param reloader_interval: the interval for the reloader in seconds. :param threaded: should the process handle each request in a separate thread? :param processes: if greater than 1 then handle each request in a new process up to this maximum number of concurrent processes. :param request_handler: optional parameter that can be used to replace the default one. You can use this to replace it with a different :class:`~BaseHTTPServer.BaseHTTPRequestHandler` subclass. :param static_files: a dict of paths for static files. This works exactly like :class:`SharedDataMiddleware`, it's actually just wrapping the application in that middleware before serving. :param passthrough_errors: set this to `True` to disable the error catching. This means that the server will die on errors but it can be useful to hook debuggers in (pdb etc.) :param ssl_context: an SSL context for the connection. Either an OpenSSL context, a tuple in the form ``(cert_file, pkey_file)``, the string ``'adhoc'`` if the server should automatically create one, or `None` to disable SSL (which is the default). """ if use_debugger: from werkzeug.debug import DebuggedApplication application = DebuggedApplication(application, use_evalex) if static_files: from werkzeug.wsgi import SharedDataMiddleware application = SharedDataMiddleware(application, static_files) def inner(): make_server(hostname, port, application, threaded, processes, request_handler, passthrough_errors, ssl_context).serve_forever() if os.environ.get('WERKZEUG_RUN_MAIN') != 'true': display_hostname = hostname != '*' and hostname or 'localhost' if ':' in display_hostname: display_hostname = '[%s]' % display_hostname _log('info', ' * Running on %s://%s:%d/', ssl_context is None and 'http' or 'https', display_hostname, port) if use_reloader: # Create and destroy a socket so that any exceptions are raised before # we spawn a separate Python interpreter and lose this ability. address_family = select_ip_version(hostname, port) test_socket = socket.socket(address_family, socket.SOCK_STREAM) test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) test_socket.bind((hostname, port)) test_socket.close() run_with_reloader(inner, extra_files, reloader_interval) else: inner()
python
def run_simple(hostname, port, application, use_reloader=False, use_debugger=False, use_evalex=True, extra_files=None, reloader_interval=1, threaded=False, processes=1, request_handler=None, static_files=None, passthrough_errors=False, ssl_context=None): """Start an application using wsgiref and with an optional reloader. This wraps `wsgiref` to fix the wrong default reporting of the multithreaded WSGI variable and adds optional multithreading and fork support. This function has a command-line interface too:: python -m werkzeug.serving --help .. versionadded:: 0.5 `static_files` was added to simplify serving of static files as well as `passthrough_errors`. .. versionadded:: 0.6 support for SSL was added. .. versionadded:: 0.8 Added support for automatically loading a SSL context from certificate file and private key. .. versionadded:: 0.9 Added command-line interface. :param hostname: The host for the application. eg: ``'localhost'`` :param port: The port for the server. eg: ``8080`` :param application: the WSGI application to execute :param use_reloader: should the server automatically restart the python process if modules were changed? :param use_debugger: should the werkzeug debugging system be used? :param use_evalex: should the exception evaluation feature be enabled? :param extra_files: a list of files the reloader should watch additionally to the modules. For example configuration files. :param reloader_interval: the interval for the reloader in seconds. :param threaded: should the process handle each request in a separate thread? :param processes: if greater than 1 then handle each request in a new process up to this maximum number of concurrent processes. :param request_handler: optional parameter that can be used to replace the default one. You can use this to replace it with a different :class:`~BaseHTTPServer.BaseHTTPRequestHandler` subclass. :param static_files: a dict of paths for static files. This works exactly like :class:`SharedDataMiddleware`, it's actually just wrapping the application in that middleware before serving. :param passthrough_errors: set this to `True` to disable the error catching. This means that the server will die on errors but it can be useful to hook debuggers in (pdb etc.) :param ssl_context: an SSL context for the connection. Either an OpenSSL context, a tuple in the form ``(cert_file, pkey_file)``, the string ``'adhoc'`` if the server should automatically create one, or `None` to disable SSL (which is the default). """ if use_debugger: from werkzeug.debug import DebuggedApplication application = DebuggedApplication(application, use_evalex) if static_files: from werkzeug.wsgi import SharedDataMiddleware application = SharedDataMiddleware(application, static_files) def inner(): make_server(hostname, port, application, threaded, processes, request_handler, passthrough_errors, ssl_context).serve_forever() if os.environ.get('WERKZEUG_RUN_MAIN') != 'true': display_hostname = hostname != '*' and hostname or 'localhost' if ':' in display_hostname: display_hostname = '[%s]' % display_hostname _log('info', ' * Running on %s://%s:%d/', ssl_context is None and 'http' or 'https', display_hostname, port) if use_reloader: # Create and destroy a socket so that any exceptions are raised before # we spawn a separate Python interpreter and lose this ability. address_family = select_ip_version(hostname, port) test_socket = socket.socket(address_family, socket.SOCK_STREAM) test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) test_socket.bind((hostname, port)) test_socket.close() run_with_reloader(inner, extra_files, reloader_interval) else: inner()
[ "def", "run_simple", "(", "hostname", ",", "port", ",", "application", ",", "use_reloader", "=", "False", ",", "use_debugger", "=", "False", ",", "use_evalex", "=", "True", ",", "extra_files", "=", "None", ",", "reloader_interval", "=", "1", ",", "threaded", "=", "False", ",", "processes", "=", "1", ",", "request_handler", "=", "None", ",", "static_files", "=", "None", ",", "passthrough_errors", "=", "False", ",", "ssl_context", "=", "None", ")", ":", "if", "use_debugger", ":", "from", "werkzeug", ".", "debug", "import", "DebuggedApplication", "application", "=", "DebuggedApplication", "(", "application", ",", "use_evalex", ")", "if", "static_files", ":", "from", "werkzeug", ".", "wsgi", "import", "SharedDataMiddleware", "application", "=", "SharedDataMiddleware", "(", "application", ",", "static_files", ")", "def", "inner", "(", ")", ":", "make_server", "(", "hostname", ",", "port", ",", "application", ",", "threaded", ",", "processes", ",", "request_handler", ",", "passthrough_errors", ",", "ssl_context", ")", ".", "serve_forever", "(", ")", "if", "os", ".", "environ", ".", "get", "(", "'WERKZEUG_RUN_MAIN'", ")", "!=", "'true'", ":", "display_hostname", "=", "hostname", "!=", "'*'", "and", "hostname", "or", "'localhost'", "if", "':'", "in", "display_hostname", ":", "display_hostname", "=", "'[%s]'", "%", "display_hostname", "_log", "(", "'info'", ",", "' * Running on %s://%s:%d/'", ",", "ssl_context", "is", "None", "and", "'http'", "or", "'https'", ",", "display_hostname", ",", "port", ")", "if", "use_reloader", ":", "# Create and destroy a socket so that any exceptions are raised before", "# we spawn a separate Python interpreter and lose this ability.", "address_family", "=", "select_ip_version", "(", "hostname", ",", "port", ")", "test_socket", "=", "socket", ".", "socket", "(", "address_family", ",", "socket", ".", "SOCK_STREAM", ")", "test_socket", ".", "setsockopt", "(", "socket", ".", "SOL_SOCKET", ",", "socket", ".", "SO_REUSEADDR", ",", "1", ")", "test_socket", ".", "bind", "(", "(", "hostname", ",", "port", ")", ")", "test_socket", ".", "close", "(", ")", "run_with_reloader", "(", "inner", ",", "extra_files", ",", "reloader_interval", ")", "else", ":", "inner", "(", ")" ]
Start an application using wsgiref and with an optional reloader. This wraps `wsgiref` to fix the wrong default reporting of the multithreaded WSGI variable and adds optional multithreading and fork support. This function has a command-line interface too:: python -m werkzeug.serving --help .. versionadded:: 0.5 `static_files` was added to simplify serving of static files as well as `passthrough_errors`. .. versionadded:: 0.6 support for SSL was added. .. versionadded:: 0.8 Added support for automatically loading a SSL context from certificate file and private key. .. versionadded:: 0.9 Added command-line interface. :param hostname: The host for the application. eg: ``'localhost'`` :param port: The port for the server. eg: ``8080`` :param application: the WSGI application to execute :param use_reloader: should the server automatically restart the python process if modules were changed? :param use_debugger: should the werkzeug debugging system be used? :param use_evalex: should the exception evaluation feature be enabled? :param extra_files: a list of files the reloader should watch additionally to the modules. For example configuration files. :param reloader_interval: the interval for the reloader in seconds. :param threaded: should the process handle each request in a separate thread? :param processes: if greater than 1 then handle each request in a new process up to this maximum number of concurrent processes. :param request_handler: optional parameter that can be used to replace the default one. You can use this to replace it with a different :class:`~BaseHTTPServer.BaseHTTPRequestHandler` subclass. :param static_files: a dict of paths for static files. This works exactly like :class:`SharedDataMiddleware`, it's actually just wrapping the application in that middleware before serving. :param passthrough_errors: set this to `True` to disable the error catching. This means that the server will die on errors but it can be useful to hook debuggers in (pdb etc.) :param ssl_context: an SSL context for the connection. Either an OpenSSL context, a tuple in the form ``(cert_file, pkey_file)``, the string ``'adhoc'`` if the server should automatically create one, or `None` to disable SSL (which is the default).
[ "Start", "an", "application", "using", "wsgiref", "and", "with", "an", "optional", "reloader", ".", "This", "wraps", "wsgiref", "to", "fix", "the", "wrong", "default", "reporting", "of", "the", "multithreaded", "WSGI", "variable", "and", "adds", "optional", "multithreading", "and", "fork", "support", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/serving.py#L626-L714
7,042
limodou/uliweb
uliweb/core/html.py
to_attrs
def to_attrs(args, nocreate_if_none=['id', 'for', 'class']): """ Make python dict to k="v" format """ if not args: return '' s = [''] for k, v in sorted(args.items()): k = u_str(k) v = u_str(v) if k.startswith('_'): k = k[1:] if v is None: if k not in nocreate_if_none: s.append(k) else: if k.lower() in __noescape_attrs__: t = u_str(v) else: t = cgi.escape(u_str(v)) t = '"%s"' % t.replace('"', '&quot;') s.append('%s=%s' % (k, t)) return ' '.join(s)
python
def to_attrs(args, nocreate_if_none=['id', 'for', 'class']): """ Make python dict to k="v" format """ if not args: return '' s = [''] for k, v in sorted(args.items()): k = u_str(k) v = u_str(v) if k.startswith('_'): k = k[1:] if v is None: if k not in nocreate_if_none: s.append(k) else: if k.lower() in __noescape_attrs__: t = u_str(v) else: t = cgi.escape(u_str(v)) t = '"%s"' % t.replace('"', '&quot;') s.append('%s=%s' % (k, t)) return ' '.join(s)
[ "def", "to_attrs", "(", "args", ",", "nocreate_if_none", "=", "[", "'id'", ",", "'for'", ",", "'class'", "]", ")", ":", "if", "not", "args", ":", "return", "''", "s", "=", "[", "''", "]", "for", "k", ",", "v", "in", "sorted", "(", "args", ".", "items", "(", ")", ")", ":", "k", "=", "u_str", "(", "k", ")", "v", "=", "u_str", "(", "v", ")", "if", "k", ".", "startswith", "(", "'_'", ")", ":", "k", "=", "k", "[", "1", ":", "]", "if", "v", "is", "None", ":", "if", "k", "not", "in", "nocreate_if_none", ":", "s", ".", "append", "(", "k", ")", "else", ":", "if", "k", ".", "lower", "(", ")", "in", "__noescape_attrs__", ":", "t", "=", "u_str", "(", "v", ")", "else", ":", "t", "=", "cgi", ".", "escape", "(", "u_str", "(", "v", ")", ")", "t", "=", "'\"%s\"'", "%", "t", ".", "replace", "(", "'\"'", ",", "'&quot;'", ")", "s", ".", "append", "(", "'%s=%s'", "%", "(", "k", ",", "t", ")", ")", "return", "' '", ".", "join", "(", "s", ")" ]
Make python dict to k="v" format
[ "Make", "python", "dict", "to", "k", "=", "v", "format" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/html.py#L24-L46
7,043
limodou/uliweb
uliweb/i18n/pygettext.py
_get_modpkg_path
def _get_modpkg_path(dotted_name, pathlist=None): """Get the filesystem path for a module or a package. Return the file system path to a file for a module, and to a directory for a package. Return None if the name is not found, or is a builtin or extension module. """ # split off top-most name parts = dotted_name.split('.', 1) if len(parts) > 1: # we have a dotted path, import top-level package try: file, pathname, description = imp.find_module(parts[0], pathlist) if file: file.close() except ImportError: return None # check if it's indeed a package if description[2] == imp.PKG_DIRECTORY: # recursively handle the remaining name parts pathname = _get_modpkg_path(parts[1], [pathname]) else: pathname = None else: # plain name try: file, pathname, description = imp.find_module( dotted_name, pathlist) if file: file.close() if description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]: pathname = None except ImportError: pathname = None return pathname
python
def _get_modpkg_path(dotted_name, pathlist=None): """Get the filesystem path for a module or a package. Return the file system path to a file for a module, and to a directory for a package. Return None if the name is not found, or is a builtin or extension module. """ # split off top-most name parts = dotted_name.split('.', 1) if len(parts) > 1: # we have a dotted path, import top-level package try: file, pathname, description = imp.find_module(parts[0], pathlist) if file: file.close() except ImportError: return None # check if it's indeed a package if description[2] == imp.PKG_DIRECTORY: # recursively handle the remaining name parts pathname = _get_modpkg_path(parts[1], [pathname]) else: pathname = None else: # plain name try: file, pathname, description = imp.find_module( dotted_name, pathlist) if file: file.close() if description[2] not in [imp.PY_SOURCE, imp.PKG_DIRECTORY]: pathname = None except ImportError: pathname = None return pathname
[ "def", "_get_modpkg_path", "(", "dotted_name", ",", "pathlist", "=", "None", ")", ":", "# split off top-most name", "parts", "=", "dotted_name", ".", "split", "(", "'.'", ",", "1", ")", "if", "len", "(", "parts", ")", ">", "1", ":", "# we have a dotted path, import top-level package", "try", ":", "file", ",", "pathname", ",", "description", "=", "imp", ".", "find_module", "(", "parts", "[", "0", "]", ",", "pathlist", ")", "if", "file", ":", "file", ".", "close", "(", ")", "except", "ImportError", ":", "return", "None", "# check if it's indeed a package", "if", "description", "[", "2", "]", "==", "imp", ".", "PKG_DIRECTORY", ":", "# recursively handle the remaining name parts", "pathname", "=", "_get_modpkg_path", "(", "parts", "[", "1", "]", ",", "[", "pathname", "]", ")", "else", ":", "pathname", "=", "None", "else", ":", "# plain name", "try", ":", "file", ",", "pathname", ",", "description", "=", "imp", ".", "find_module", "(", "dotted_name", ",", "pathlist", ")", "if", "file", ":", "file", ".", "close", "(", ")", "if", "description", "[", "2", "]", "not", "in", "[", "imp", ".", "PY_SOURCE", ",", "imp", ".", "PKG_DIRECTORY", "]", ":", "pathname", "=", "None", "except", "ImportError", ":", "pathname", "=", "None", "return", "pathname" ]
Get the filesystem path for a module or a package. Return the file system path to a file for a module, and to a directory for a package. Return None if the name is not found, or is a builtin or extension module.
[ "Get", "the", "filesystem", "path", "for", "a", "module", "or", "a", "package", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/i18n/pygettext.py#L314-L350
7,044
limodou/uliweb
uliweb/i18n/pygettext.py
getFilesForName
def getFilesForName(name): """Get a list of module files for a filename, a module or package name, or a directory. """ if not os.path.exists(name): # check for glob chars if containsAny(name, "*?[]"): files = glob.glob(name) list = [] for file in files: list.extend(getFilesForName(file)) return list # try to find module or package name = _get_modpkg_path(name) if not name: return [] if os.path.isdir(name): # find all python files in directory list = [] os.path.walk(name, _visit_pyfiles, list) return list elif os.path.exists(name): # a single file return [name] return []
python
def getFilesForName(name): """Get a list of module files for a filename, a module or package name, or a directory. """ if not os.path.exists(name): # check for glob chars if containsAny(name, "*?[]"): files = glob.glob(name) list = [] for file in files: list.extend(getFilesForName(file)) return list # try to find module or package name = _get_modpkg_path(name) if not name: return [] if os.path.isdir(name): # find all python files in directory list = [] os.path.walk(name, _visit_pyfiles, list) return list elif os.path.exists(name): # a single file return [name] return []
[ "def", "getFilesForName", "(", "name", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "name", ")", ":", "# check for glob chars", "if", "containsAny", "(", "name", ",", "\"*?[]\"", ")", ":", "files", "=", "glob", ".", "glob", "(", "name", ")", "list", "=", "[", "]", "for", "file", "in", "files", ":", "list", ".", "extend", "(", "getFilesForName", "(", "file", ")", ")", "return", "list", "# try to find module or package", "name", "=", "_get_modpkg_path", "(", "name", ")", "if", "not", "name", ":", "return", "[", "]", "if", "os", ".", "path", ".", "isdir", "(", "name", ")", ":", "# find all python files in directory", "list", "=", "[", "]", "os", ".", "path", ".", "walk", "(", "name", ",", "_visit_pyfiles", ",", "list", ")", "return", "list", "elif", "os", ".", "path", ".", "exists", "(", "name", ")", ":", "# a single file", "return", "[", "name", "]", "return", "[", "]" ]
Get a list of module files for a filename, a module or package name, or a directory.
[ "Get", "a", "list", "of", "module", "files", "for", "a", "filename", "a", "module", "or", "package", "name", "or", "a", "directory", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/i18n/pygettext.py#L353-L380
7,045
limodou/uliweb
uliweb/lib/werkzeug/utils.py
unescape
def unescape(s): """The reverse function of `escape`. This unescapes all the HTML entities, not only the XML entities inserted by `escape`. :param s: the string to unescape. """ def handle_match(m): name = m.group(1) if name in HTMLBuilder._entities: return unichr(HTMLBuilder._entities[name]) try: if name[:2] in ('#x', '#X'): return unichr(int(name[2:], 16)) elif name.startswith('#'): return unichr(int(name[1:])) except ValueError: pass return u'' return _entity_re.sub(handle_match, s)
python
def unescape(s): """The reverse function of `escape`. This unescapes all the HTML entities, not only the XML entities inserted by `escape`. :param s: the string to unescape. """ def handle_match(m): name = m.group(1) if name in HTMLBuilder._entities: return unichr(HTMLBuilder._entities[name]) try: if name[:2] in ('#x', '#X'): return unichr(int(name[2:], 16)) elif name.startswith('#'): return unichr(int(name[1:])) except ValueError: pass return u'' return _entity_re.sub(handle_match, s)
[ "def", "unescape", "(", "s", ")", ":", "def", "handle_match", "(", "m", ")", ":", "name", "=", "m", ".", "group", "(", "1", ")", "if", "name", "in", "HTMLBuilder", ".", "_entities", ":", "return", "unichr", "(", "HTMLBuilder", ".", "_entities", "[", "name", "]", ")", "try", ":", "if", "name", "[", ":", "2", "]", "in", "(", "'#x'", ",", "'#X'", ")", ":", "return", "unichr", "(", "int", "(", "name", "[", "2", ":", "]", ",", "16", ")", ")", "elif", "name", ".", "startswith", "(", "'#'", ")", ":", "return", "unichr", "(", "int", "(", "name", "[", "1", ":", "]", ")", ")", "except", "ValueError", ":", "pass", "return", "u''", "return", "_entity_re", ".", "sub", "(", "handle_match", ",", "s", ")" ]
The reverse function of `escape`. This unescapes all the HTML entities, not only the XML entities inserted by `escape`. :param s: the string to unescape.
[ "The", "reverse", "function", "of", "escape", ".", "This", "unescapes", "all", "the", "HTML", "entities", "not", "only", "the", "XML", "entities", "inserted", "by", "escape", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/utils.py#L317-L335
7,046
limodou/uliweb
uliweb/lib/werkzeug/utils.py
append_slash_redirect
def append_slash_redirect(environ, code=301): """Redirect to the same URL but with a slash appended. The behavior of this function is undefined if the path ends with a slash already. :param environ: the WSGI environment for the request that triggers the redirect. :param code: the status code for the redirect. """ new_path = environ['PATH_INFO'].strip('/') + '/' query_string = environ.get('QUERY_STRING') if query_string: new_path += '?' + query_string return redirect(new_path, code)
python
def append_slash_redirect(environ, code=301): """Redirect to the same URL but with a slash appended. The behavior of this function is undefined if the path ends with a slash already. :param environ: the WSGI environment for the request that triggers the redirect. :param code: the status code for the redirect. """ new_path = environ['PATH_INFO'].strip('/') + '/' query_string = environ.get('QUERY_STRING') if query_string: new_path += '?' + query_string return redirect(new_path, code)
[ "def", "append_slash_redirect", "(", "environ", ",", "code", "=", "301", ")", ":", "new_path", "=", "environ", "[", "'PATH_INFO'", "]", ".", "strip", "(", "'/'", ")", "+", "'/'", "query_string", "=", "environ", ".", "get", "(", "'QUERY_STRING'", ")", "if", "query_string", ":", "new_path", "+=", "'?'", "+", "query_string", "return", "redirect", "(", "new_path", ",", "code", ")" ]
Redirect to the same URL but with a slash appended. The behavior of this function is undefined if the path ends with a slash already. :param environ: the WSGI environment for the request that triggers the redirect. :param code: the status code for the redirect.
[ "Redirect", "to", "the", "same", "URL", "but", "with", "a", "slash", "appended", ".", "The", "behavior", "of", "this", "function", "is", "undefined", "if", "the", "path", "ends", "with", "a", "slash", "already", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/utils.py#L368-L380
7,047
limodou/uliweb
uliweb/i18n/po_merge.py
parse_translation
def parse_translation(f, lineno): """Read a single translation entry from the file F and return a tuple with the comments, msgid and msgstr. The comments is returned as a list of lines which do not end in new-lines. The msgid and msgstr are strings, possibly with embedded newlines""" line = f.readline() def get_line(f, line, need_keys, lineno, default='""'): line = line.rstrip() if not line: return lineno, need_keys[0], default, line key, value = line.split(' ', 1) # Parse msgid if key not in need_keys: print 'Error Line, need %r: %d, line=' % (need_keys, lineno, line) raise RuntimeError("parse error") v = value while 1: line = f.readline() line = line.rstrip() lineno += 1 if not line or line[0] != '"': break v += '\n' + line[:] return lineno, key, v, line # Parse comments comments = [] while 1: if not line: return lineno, None, None, None if line.strip() == '': return lineno, comments, None, None elif line[0] == '#': comments.append(line[:-1]) else: break line = f.readline() lineno += 1 lineno, key, msgid, line = get_line(f, line, ['msgid'], lineno) lineno, key, value, line = get_line(f, line, ['msgid_plural', 'msgstr'], lineno) if key == 'msgid_plural': msgid = (msgid, value) lineno, key, v1, line = get_line(f, line, ['msgstr[0]'], lineno) lineno, key, v2, line = get_line(f, line, ['msgstr[1]'], lineno) msgstr = (v1, v2) else: msgstr = value if line != '': print 'File: %s Error Line: %s' % (f.name, line) raise RuntimeError("parse error") return lineno, comments, msgid, msgstr
python
def parse_translation(f, lineno): """Read a single translation entry from the file F and return a tuple with the comments, msgid and msgstr. The comments is returned as a list of lines which do not end in new-lines. The msgid and msgstr are strings, possibly with embedded newlines""" line = f.readline() def get_line(f, line, need_keys, lineno, default='""'): line = line.rstrip() if not line: return lineno, need_keys[0], default, line key, value = line.split(' ', 1) # Parse msgid if key not in need_keys: print 'Error Line, need %r: %d, line=' % (need_keys, lineno, line) raise RuntimeError("parse error") v = value while 1: line = f.readline() line = line.rstrip() lineno += 1 if not line or line[0] != '"': break v += '\n' + line[:] return lineno, key, v, line # Parse comments comments = [] while 1: if not line: return lineno, None, None, None if line.strip() == '': return lineno, comments, None, None elif line[0] == '#': comments.append(line[:-1]) else: break line = f.readline() lineno += 1 lineno, key, msgid, line = get_line(f, line, ['msgid'], lineno) lineno, key, value, line = get_line(f, line, ['msgid_plural', 'msgstr'], lineno) if key == 'msgid_plural': msgid = (msgid, value) lineno, key, v1, line = get_line(f, line, ['msgstr[0]'], lineno) lineno, key, v2, line = get_line(f, line, ['msgstr[1]'], lineno) msgstr = (v1, v2) else: msgstr = value if line != '': print 'File: %s Error Line: %s' % (f.name, line) raise RuntimeError("parse error") return lineno, comments, msgid, msgstr
[ "def", "parse_translation", "(", "f", ",", "lineno", ")", ":", "line", "=", "f", ".", "readline", "(", ")", "def", "get_line", "(", "f", ",", "line", ",", "need_keys", ",", "lineno", ",", "default", "=", "'\"\"'", ")", ":", "line", "=", "line", ".", "rstrip", "(", ")", "if", "not", "line", ":", "return", "lineno", ",", "need_keys", "[", "0", "]", ",", "default", ",", "line", "key", ",", "value", "=", "line", ".", "split", "(", "' '", ",", "1", ")", "# Parse msgid\r", "if", "key", "not", "in", "need_keys", ":", "print", "'Error Line, need %r: %d, line='", "%", "(", "need_keys", ",", "lineno", ",", "line", ")", "raise", "RuntimeError", "(", "\"parse error\"", ")", "v", "=", "value", "while", "1", ":", "line", "=", "f", ".", "readline", "(", ")", "line", "=", "line", ".", "rstrip", "(", ")", "lineno", "+=", "1", "if", "not", "line", "or", "line", "[", "0", "]", "!=", "'\"'", ":", "break", "v", "+=", "'\\n'", "+", "line", "[", ":", "]", "return", "lineno", ",", "key", ",", "v", ",", "line", "# Parse comments\r", "comments", "=", "[", "]", "while", "1", ":", "if", "not", "line", ":", "return", "lineno", ",", "None", ",", "None", ",", "None", "if", "line", ".", "strip", "(", ")", "==", "''", ":", "return", "lineno", ",", "comments", ",", "None", ",", "None", "elif", "line", "[", "0", "]", "==", "'#'", ":", "comments", ".", "append", "(", "line", "[", ":", "-", "1", "]", ")", "else", ":", "break", "line", "=", "f", ".", "readline", "(", ")", "lineno", "+=", "1", "lineno", ",", "key", ",", "msgid", ",", "line", "=", "get_line", "(", "f", ",", "line", ",", "[", "'msgid'", "]", ",", "lineno", ")", "lineno", ",", "key", ",", "value", ",", "line", "=", "get_line", "(", "f", ",", "line", ",", "[", "'msgid_plural'", ",", "'msgstr'", "]", ",", "lineno", ")", "if", "key", "==", "'msgid_plural'", ":", "msgid", "=", "(", "msgid", ",", "value", ")", "lineno", ",", "key", ",", "v1", ",", "line", "=", "get_line", "(", "f", ",", "line", ",", "[", "'msgstr[0]'", "]", ",", "lineno", ")", "lineno", ",", "key", ",", "v2", ",", "line", "=", "get_line", "(", "f", ",", "line", ",", "[", "'msgstr[1]'", "]", ",", "lineno", ")", "msgstr", "=", "(", "v1", ",", "v2", ")", "else", ":", "msgstr", "=", "value", "if", "line", "!=", "''", ":", "print", "'File: %s Error Line: %s'", "%", "(", "f", ".", "name", ",", "line", ")", "raise", "RuntimeError", "(", "\"parse error\"", ")", "return", "lineno", ",", "comments", ",", "msgid", ",", "msgstr" ]
Read a single translation entry from the file F and return a tuple with the comments, msgid and msgstr. The comments is returned as a list of lines which do not end in new-lines. The msgid and msgstr are strings, possibly with embedded newlines
[ "Read", "a", "single", "translation", "entry", "from", "the", "file", "F", "and", "return", "a", "tuple", "with", "the", "comments", "msgid", "and", "msgstr", ".", "The", "comments", "is", "returned", "as", "a", "list", "of", "lines", "which", "do", "not", "end", "in", "new", "-", "lines", ".", "The", "msgid", "and", "msgstr", "are", "strings", "possibly", "with", "embedded", "newlines" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/i18n/po_merge.py#L5-L59
7,048
limodou/uliweb
uliweb/contrib/form/__init__.py
get_form
def get_form(formcls): """ get form class according form class path or form class object """ from uliweb.form import Form import inspect if inspect.isclass(formcls) and issubclass(formcls, Form): return formcls elif isinstance(formcls, (str, unicode)): path = settings.FORMS.get(formcls) if path: _cls = import_attr(path) return _cls else: raise UliwebError("Can't find formcls name %s in settings.FORMS" % formcls) else: raise UliwebError("formcls should be Form class object or string path format, but %r found!" % formcls)
python
def get_form(formcls): """ get form class according form class path or form class object """ from uliweb.form import Form import inspect if inspect.isclass(formcls) and issubclass(formcls, Form): return formcls elif isinstance(formcls, (str, unicode)): path = settings.FORMS.get(formcls) if path: _cls = import_attr(path) return _cls else: raise UliwebError("Can't find formcls name %s in settings.FORMS" % formcls) else: raise UliwebError("formcls should be Form class object or string path format, but %r found!" % formcls)
[ "def", "get_form", "(", "formcls", ")", ":", "from", "uliweb", ".", "form", "import", "Form", "import", "inspect", "if", "inspect", ".", "isclass", "(", "formcls", ")", "and", "issubclass", "(", "formcls", ",", "Form", ")", ":", "return", "formcls", "elif", "isinstance", "(", "formcls", ",", "(", "str", ",", "unicode", ")", ")", ":", "path", "=", "settings", ".", "FORMS", ".", "get", "(", "formcls", ")", "if", "path", ":", "_cls", "=", "import_attr", "(", "path", ")", "return", "_cls", "else", ":", "raise", "UliwebError", "(", "\"Can't find formcls name %s in settings.FORMS\"", "%", "formcls", ")", "else", ":", "raise", "UliwebError", "(", "\"formcls should be Form class object or string path format, but %r found!\"", "%", "formcls", ")" ]
get form class according form class path or form class object
[ "get", "form", "class", "according", "form", "class", "path", "or", "form", "class", "object" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/form/__init__.py#L6-L23
7,049
limodou/uliweb
uliweb/lib/werkzeug/script.py
run
def run(namespace=None, action_prefix='action_', args=None): """Run the script. Participating actions are looked up in the caller's namespace if no namespace is given, otherwise in the dict provided. Only items that start with action_prefix are processed as actions. If you want to use all items in the namespace provided as actions set action_prefix to an empty string. :param namespace: An optional dict where the functions are looked up in. By default the local namespace of the caller is used. :param action_prefix: The prefix for the functions. Everything else is ignored. :param args: the arguments for the function. If not specified :data:`sys.argv` without the first argument is used. """ if namespace is None: namespace = sys._getframe(1).f_locals actions = find_actions(namespace, action_prefix) if args is None: args = sys.argv[1:] if not args or args[0] in ('-h', '--help'): return print_usage(actions) elif args[0] not in actions: fail('Unknown action \'%s\'' % args[0]) arguments = {} types = {} key_to_arg = {} long_options = [] formatstring = '' func, doc, arg_def = actions[args.pop(0)] for idx, (arg, shortcut, default, option_type) in enumerate(arg_def): real_arg = arg.replace('-', '_') if shortcut: formatstring += shortcut if not isinstance(default, bool): formatstring += ':' key_to_arg['-' + shortcut] = real_arg long_options.append(isinstance(default, bool) and arg or arg + '=') key_to_arg['--' + arg] = real_arg key_to_arg[idx] = real_arg types[real_arg] = option_type arguments[real_arg] = default try: optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options) except getopt.GetoptError as e: fail(str(e)) specified_arguments = set() for key, value in enumerate(posargs): try: arg = key_to_arg[key] except IndexError: fail('Too many parameters') specified_arguments.add(arg) try: arguments[arg] = converters[types[arg]](value) except ValueError: fail('Invalid value for argument %s (%s): %s' % (key, arg, value)) for key, value in optlist: arg = key_to_arg[key] if arg in specified_arguments: fail('Argument \'%s\' is specified twice' % arg) if types[arg] == 'boolean': if arg.startswith('no_'): value = 'no' else: value = 'yes' try: arguments[arg] = converters[types[arg]](value) except ValueError: fail('Invalid value for \'%s\': %s' % (key, value)) newargs = {} for k, v in iteritems(arguments): newargs[k.startswith('no_') and k[3:] or k] = v arguments = newargs return func(**arguments)
python
def run(namespace=None, action_prefix='action_', args=None): """Run the script. Participating actions are looked up in the caller's namespace if no namespace is given, otherwise in the dict provided. Only items that start with action_prefix are processed as actions. If you want to use all items in the namespace provided as actions set action_prefix to an empty string. :param namespace: An optional dict where the functions are looked up in. By default the local namespace of the caller is used. :param action_prefix: The prefix for the functions. Everything else is ignored. :param args: the arguments for the function. If not specified :data:`sys.argv` without the first argument is used. """ if namespace is None: namespace = sys._getframe(1).f_locals actions = find_actions(namespace, action_prefix) if args is None: args = sys.argv[1:] if not args or args[0] in ('-h', '--help'): return print_usage(actions) elif args[0] not in actions: fail('Unknown action \'%s\'' % args[0]) arguments = {} types = {} key_to_arg = {} long_options = [] formatstring = '' func, doc, arg_def = actions[args.pop(0)] for idx, (arg, shortcut, default, option_type) in enumerate(arg_def): real_arg = arg.replace('-', '_') if shortcut: formatstring += shortcut if not isinstance(default, bool): formatstring += ':' key_to_arg['-' + shortcut] = real_arg long_options.append(isinstance(default, bool) and arg or arg + '=') key_to_arg['--' + arg] = real_arg key_to_arg[idx] = real_arg types[real_arg] = option_type arguments[real_arg] = default try: optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options) except getopt.GetoptError as e: fail(str(e)) specified_arguments = set() for key, value in enumerate(posargs): try: arg = key_to_arg[key] except IndexError: fail('Too many parameters') specified_arguments.add(arg) try: arguments[arg] = converters[types[arg]](value) except ValueError: fail('Invalid value for argument %s (%s): %s' % (key, arg, value)) for key, value in optlist: arg = key_to_arg[key] if arg in specified_arguments: fail('Argument \'%s\' is specified twice' % arg) if types[arg] == 'boolean': if arg.startswith('no_'): value = 'no' else: value = 'yes' try: arguments[arg] = converters[types[arg]](value) except ValueError: fail('Invalid value for \'%s\': %s' % (key, value)) newargs = {} for k, v in iteritems(arguments): newargs[k.startswith('no_') and k[3:] or k] = v arguments = newargs return func(**arguments)
[ "def", "run", "(", "namespace", "=", "None", ",", "action_prefix", "=", "'action_'", ",", "args", "=", "None", ")", ":", "if", "namespace", "is", "None", ":", "namespace", "=", "sys", ".", "_getframe", "(", "1", ")", ".", "f_locals", "actions", "=", "find_actions", "(", "namespace", ",", "action_prefix", ")", "if", "args", "is", "None", ":", "args", "=", "sys", ".", "argv", "[", "1", ":", "]", "if", "not", "args", "or", "args", "[", "0", "]", "in", "(", "'-h'", ",", "'--help'", ")", ":", "return", "print_usage", "(", "actions", ")", "elif", "args", "[", "0", "]", "not", "in", "actions", ":", "fail", "(", "'Unknown action \\'%s\\''", "%", "args", "[", "0", "]", ")", "arguments", "=", "{", "}", "types", "=", "{", "}", "key_to_arg", "=", "{", "}", "long_options", "=", "[", "]", "formatstring", "=", "''", "func", ",", "doc", ",", "arg_def", "=", "actions", "[", "args", ".", "pop", "(", "0", ")", "]", "for", "idx", ",", "(", "arg", ",", "shortcut", ",", "default", ",", "option_type", ")", "in", "enumerate", "(", "arg_def", ")", ":", "real_arg", "=", "arg", ".", "replace", "(", "'-'", ",", "'_'", ")", "if", "shortcut", ":", "formatstring", "+=", "shortcut", "if", "not", "isinstance", "(", "default", ",", "bool", ")", ":", "formatstring", "+=", "':'", "key_to_arg", "[", "'-'", "+", "shortcut", "]", "=", "real_arg", "long_options", ".", "append", "(", "isinstance", "(", "default", ",", "bool", ")", "and", "arg", "or", "arg", "+", "'='", ")", "key_to_arg", "[", "'--'", "+", "arg", "]", "=", "real_arg", "key_to_arg", "[", "idx", "]", "=", "real_arg", "types", "[", "real_arg", "]", "=", "option_type", "arguments", "[", "real_arg", "]", "=", "default", "try", ":", "optlist", ",", "posargs", "=", "getopt", ".", "gnu_getopt", "(", "args", ",", "formatstring", ",", "long_options", ")", "except", "getopt", ".", "GetoptError", "as", "e", ":", "fail", "(", "str", "(", "e", ")", ")", "specified_arguments", "=", "set", "(", ")", "for", "key", ",", "value", "in", "enumerate", "(", "posargs", ")", ":", "try", ":", "arg", "=", "key_to_arg", "[", "key", "]", "except", "IndexError", ":", "fail", "(", "'Too many parameters'", ")", "specified_arguments", ".", "add", "(", "arg", ")", "try", ":", "arguments", "[", "arg", "]", "=", "converters", "[", "types", "[", "arg", "]", "]", "(", "value", ")", "except", "ValueError", ":", "fail", "(", "'Invalid value for argument %s (%s): %s'", "%", "(", "key", ",", "arg", ",", "value", ")", ")", "for", "key", ",", "value", "in", "optlist", ":", "arg", "=", "key_to_arg", "[", "key", "]", "if", "arg", "in", "specified_arguments", ":", "fail", "(", "'Argument \\'%s\\' is specified twice'", "%", "arg", ")", "if", "types", "[", "arg", "]", "==", "'boolean'", ":", "if", "arg", ".", "startswith", "(", "'no_'", ")", ":", "value", "=", "'no'", "else", ":", "value", "=", "'yes'", "try", ":", "arguments", "[", "arg", "]", "=", "converters", "[", "types", "[", "arg", "]", "]", "(", "value", ")", "except", "ValueError", ":", "fail", "(", "'Invalid value for \\'%s\\': %s'", "%", "(", "key", ",", "value", ")", ")", "newargs", "=", "{", "}", "for", "k", ",", "v", "in", "iteritems", "(", "arguments", ")", ":", "newargs", "[", "k", ".", "startswith", "(", "'no_'", ")", "and", "k", "[", "3", ":", "]", "or", "k", "]", "=", "v", "arguments", "=", "newargs", "return", "func", "(", "*", "*", "arguments", ")" ]
Run the script. Participating actions are looked up in the caller's namespace if no namespace is given, otherwise in the dict provided. Only items that start with action_prefix are processed as actions. If you want to use all items in the namespace provided as actions set action_prefix to an empty string. :param namespace: An optional dict where the functions are looked up in. By default the local namespace of the caller is used. :param action_prefix: The prefix for the functions. Everything else is ignored. :param args: the arguments for the function. If not specified :data:`sys.argv` without the first argument is used.
[ "Run", "the", "script", ".", "Participating", "actions", "are", "looked", "up", "in", "the", "caller", "s", "namespace", "if", "no", "namespace", "is", "given", "otherwise", "in", "the", "dict", "provided", ".", "Only", "items", "that", "start", "with", "action_prefix", "are", "processed", "as", "actions", ".", "If", "you", "want", "to", "use", "all", "items", "in", "the", "namespace", "provided", "as", "actions", "set", "action_prefix", "to", "an", "empty", "string", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/script.py#L98-L177
7,050
limodou/uliweb
uliweb/lib/werkzeug/script.py
fail
def fail(message, code=-1): """Fail with an error.""" print('Error: %s' % message, file=sys.stderr) sys.exit(code)
python
def fail(message, code=-1): """Fail with an error.""" print('Error: %s' % message, file=sys.stderr) sys.exit(code)
[ "def", "fail", "(", "message", ",", "code", "=", "-", "1", ")", ":", "print", "(", "'Error: %s'", "%", "message", ",", "file", "=", "sys", ".", "stderr", ")", "sys", ".", "exit", "(", "code", ")" ]
Fail with an error.
[ "Fail", "with", "an", "error", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/script.py#L180-L183
7,051
limodou/uliweb
uliweb/lib/werkzeug/script.py
find_actions
def find_actions(namespace, action_prefix): """Find all the actions in the namespace.""" actions = {} for key, value in iteritems(namespace): if key.startswith(action_prefix): actions[key[len(action_prefix):]] = analyse_action(value) return actions
python
def find_actions(namespace, action_prefix): """Find all the actions in the namespace.""" actions = {} for key, value in iteritems(namespace): if key.startswith(action_prefix): actions[key[len(action_prefix):]] = analyse_action(value) return actions
[ "def", "find_actions", "(", "namespace", ",", "action_prefix", ")", ":", "actions", "=", "{", "}", "for", "key", ",", "value", "in", "iteritems", "(", "namespace", ")", ":", "if", "key", ".", "startswith", "(", "action_prefix", ")", ":", "actions", "[", "key", "[", "len", "(", "action_prefix", ")", ":", "]", "]", "=", "analyse_action", "(", "value", ")", "return", "actions" ]
Find all the actions in the namespace.
[ "Find", "all", "the", "actions", "in", "the", "namespace", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/script.py#L186-L192
7,052
limodou/uliweb
uliweb/lib/werkzeug/script.py
analyse_action
def analyse_action(func): """Analyse a function.""" description = inspect.getdoc(func) or 'undocumented action' arguments = [] args, varargs, kwargs, defaults = inspect.getargspec(func) if varargs or kwargs: raise TypeError('variable length arguments for action not allowed.') if len(args) != len(defaults or ()): raise TypeError('not all arguments have proper definitions') for idx, (arg, definition) in enumerate(zip(args, defaults or ())): if arg.startswith('_'): raise TypeError('arguments may not start with an underscore') if not isinstance(definition, tuple): shortcut = None default = definition else: shortcut, default = definition argument_type = argument_types[type(default)] if isinstance(default, bool) and default is True: arg = 'no-' + arg arguments.append((arg.replace('_', '-'), shortcut, default, argument_type)) return func, description, arguments
python
def analyse_action(func): """Analyse a function.""" description = inspect.getdoc(func) or 'undocumented action' arguments = [] args, varargs, kwargs, defaults = inspect.getargspec(func) if varargs or kwargs: raise TypeError('variable length arguments for action not allowed.') if len(args) != len(defaults or ()): raise TypeError('not all arguments have proper definitions') for idx, (arg, definition) in enumerate(zip(args, defaults or ())): if arg.startswith('_'): raise TypeError('arguments may not start with an underscore') if not isinstance(definition, tuple): shortcut = None default = definition else: shortcut, default = definition argument_type = argument_types[type(default)] if isinstance(default, bool) and default is True: arg = 'no-' + arg arguments.append((arg.replace('_', '-'), shortcut, default, argument_type)) return func, description, arguments
[ "def", "analyse_action", "(", "func", ")", ":", "description", "=", "inspect", ".", "getdoc", "(", "func", ")", "or", "'undocumented action'", "arguments", "=", "[", "]", "args", ",", "varargs", ",", "kwargs", ",", "defaults", "=", "inspect", ".", "getargspec", "(", "func", ")", "if", "varargs", "or", "kwargs", ":", "raise", "TypeError", "(", "'variable length arguments for action not allowed.'", ")", "if", "len", "(", "args", ")", "!=", "len", "(", "defaults", "or", "(", ")", ")", ":", "raise", "TypeError", "(", "'not all arguments have proper definitions'", ")", "for", "idx", ",", "(", "arg", ",", "definition", ")", "in", "enumerate", "(", "zip", "(", "args", ",", "defaults", "or", "(", ")", ")", ")", ":", "if", "arg", ".", "startswith", "(", "'_'", ")", ":", "raise", "TypeError", "(", "'arguments may not start with an underscore'", ")", "if", "not", "isinstance", "(", "definition", ",", "tuple", ")", ":", "shortcut", "=", "None", "default", "=", "definition", "else", ":", "shortcut", ",", "default", "=", "definition", "argument_type", "=", "argument_types", "[", "type", "(", "default", ")", "]", "if", "isinstance", "(", "default", ",", "bool", ")", "and", "default", "is", "True", ":", "arg", "=", "'no-'", "+", "arg", "arguments", ".", "append", "(", "(", "arg", ".", "replace", "(", "'_'", ",", "'-'", ")", ",", "shortcut", ",", "default", ",", "argument_type", ")", ")", "return", "func", ",", "description", ",", "arguments" ]
Analyse a function.
[ "Analyse", "a", "function", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/script.py#L222-L245
7,053
limodou/uliweb
uliweb/lib/werkzeug/script.py
make_shell
def make_shell(init_func=None, banner=None, use_ipython=True): """Returns an action callback that spawns a new interactive python shell. :param init_func: an optional initialization function that is called before the shell is started. The return value of this function is the initial namespace. :param banner: the banner that is displayed before the shell. If not specified a generic banner is used instead. :param use_ipython: if set to `True` ipython is used if available. """ if banner is None: banner = 'Interactive Werkzeug Shell' if init_func is None: init_func = dict def action(ipython=use_ipython): """Start a new interactive python session.""" namespace = init_func() if ipython: try: try: from IPython.frontend.terminal.embed import InteractiveShellEmbed sh = InteractiveShellEmbed(banner1=banner) except ImportError: from IPython.Shell import IPShellEmbed sh = IPShellEmbed(banner=banner) except ImportError: pass else: sh(global_ns={}, local_ns=namespace) return from code import interact interact(banner, local=namespace) return action
python
def make_shell(init_func=None, banner=None, use_ipython=True): """Returns an action callback that spawns a new interactive python shell. :param init_func: an optional initialization function that is called before the shell is started. The return value of this function is the initial namespace. :param banner: the banner that is displayed before the shell. If not specified a generic banner is used instead. :param use_ipython: if set to `True` ipython is used if available. """ if banner is None: banner = 'Interactive Werkzeug Shell' if init_func is None: init_func = dict def action(ipython=use_ipython): """Start a new interactive python session.""" namespace = init_func() if ipython: try: try: from IPython.frontend.terminal.embed import InteractiveShellEmbed sh = InteractiveShellEmbed(banner1=banner) except ImportError: from IPython.Shell import IPShellEmbed sh = IPShellEmbed(banner=banner) except ImportError: pass else: sh(global_ns={}, local_ns=namespace) return from code import interact interact(banner, local=namespace) return action
[ "def", "make_shell", "(", "init_func", "=", "None", ",", "banner", "=", "None", ",", "use_ipython", "=", "True", ")", ":", "if", "banner", "is", "None", ":", "banner", "=", "'Interactive Werkzeug Shell'", "if", "init_func", "is", "None", ":", "init_func", "=", "dict", "def", "action", "(", "ipython", "=", "use_ipython", ")", ":", "\"\"\"Start a new interactive python session.\"\"\"", "namespace", "=", "init_func", "(", ")", "if", "ipython", ":", "try", ":", "try", ":", "from", "IPython", ".", "frontend", ".", "terminal", ".", "embed", "import", "InteractiveShellEmbed", "sh", "=", "InteractiveShellEmbed", "(", "banner1", "=", "banner", ")", "except", "ImportError", ":", "from", "IPython", ".", "Shell", "import", "IPShellEmbed", "sh", "=", "IPShellEmbed", "(", "banner", "=", "banner", ")", "except", "ImportError", ":", "pass", "else", ":", "sh", "(", "global_ns", "=", "{", "}", ",", "local_ns", "=", "namespace", ")", "return", "from", "code", "import", "interact", "interact", "(", "banner", ",", "local", "=", "namespace", ")", "return", "action" ]
Returns an action callback that spawns a new interactive python shell. :param init_func: an optional initialization function that is called before the shell is started. The return value of this function is the initial namespace. :param banner: the banner that is displayed before the shell. If not specified a generic banner is used instead. :param use_ipython: if set to `True` ipython is used if available.
[ "Returns", "an", "action", "callback", "that", "spawns", "a", "new", "interactive", "python", "shell", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/script.py#L248-L281
7,054
limodou/uliweb
uliweb/lib/werkzeug/script.py
make_runserver
def make_runserver(app_factory, hostname='localhost', port=5000, use_reloader=False, use_debugger=False, use_evalex=True, threaded=False, processes=1, static_files=None, extra_files=None, ssl_context=None): """Returns an action callback that spawns a new development server. .. versionadded:: 0.5 `static_files` and `extra_files` was added. ..versionadded:: 0.6.1 `ssl_context` was added. :param app_factory: a function that returns a new WSGI application. :param hostname: the default hostname the server should listen on. :param port: the default port of the server. :param use_reloader: the default setting for the reloader. :param use_evalex: the default setting for the evalex flag of the debugger. :param threaded: the default threading setting. :param processes: the default number of processes to start. :param static_files: optional dict of static files. :param extra_files: optional list of extra files to track for reloading. :param ssl_context: optional SSL context for running server in HTTPS mode. """ def action(hostname=('h', hostname), port=('p', port), reloader=use_reloader, debugger=use_debugger, evalex=use_evalex, threaded=threaded, processes=processes): """Start a new development server.""" from werkzeug.serving import run_simple app = app_factory() run_simple(hostname, port, app, reloader, debugger, evalex, extra_files, 1, threaded, processes, static_files=static_files, ssl_context=ssl_context) return action
python
def make_runserver(app_factory, hostname='localhost', port=5000, use_reloader=False, use_debugger=False, use_evalex=True, threaded=False, processes=1, static_files=None, extra_files=None, ssl_context=None): """Returns an action callback that spawns a new development server. .. versionadded:: 0.5 `static_files` and `extra_files` was added. ..versionadded:: 0.6.1 `ssl_context` was added. :param app_factory: a function that returns a new WSGI application. :param hostname: the default hostname the server should listen on. :param port: the default port of the server. :param use_reloader: the default setting for the reloader. :param use_evalex: the default setting for the evalex flag of the debugger. :param threaded: the default threading setting. :param processes: the default number of processes to start. :param static_files: optional dict of static files. :param extra_files: optional list of extra files to track for reloading. :param ssl_context: optional SSL context for running server in HTTPS mode. """ def action(hostname=('h', hostname), port=('p', port), reloader=use_reloader, debugger=use_debugger, evalex=use_evalex, threaded=threaded, processes=processes): """Start a new development server.""" from werkzeug.serving import run_simple app = app_factory() run_simple(hostname, port, app, reloader, debugger, evalex, extra_files, 1, threaded, processes, static_files=static_files, ssl_context=ssl_context) return action
[ "def", "make_runserver", "(", "app_factory", ",", "hostname", "=", "'localhost'", ",", "port", "=", "5000", ",", "use_reloader", "=", "False", ",", "use_debugger", "=", "False", ",", "use_evalex", "=", "True", ",", "threaded", "=", "False", ",", "processes", "=", "1", ",", "static_files", "=", "None", ",", "extra_files", "=", "None", ",", "ssl_context", "=", "None", ")", ":", "def", "action", "(", "hostname", "=", "(", "'h'", ",", "hostname", ")", ",", "port", "=", "(", "'p'", ",", "port", ")", ",", "reloader", "=", "use_reloader", ",", "debugger", "=", "use_debugger", ",", "evalex", "=", "use_evalex", ",", "threaded", "=", "threaded", ",", "processes", "=", "processes", ")", ":", "\"\"\"Start a new development server.\"\"\"", "from", "werkzeug", ".", "serving", "import", "run_simple", "app", "=", "app_factory", "(", ")", "run_simple", "(", "hostname", ",", "port", ",", "app", ",", "reloader", ",", "debugger", ",", "evalex", ",", "extra_files", ",", "1", ",", "threaded", ",", "processes", ",", "static_files", "=", "static_files", ",", "ssl_context", "=", "ssl_context", ")", "return", "action" ]
Returns an action callback that spawns a new development server. .. versionadded:: 0.5 `static_files` and `extra_files` was added. ..versionadded:: 0.6.1 `ssl_context` was added. :param app_factory: a function that returns a new WSGI application. :param hostname: the default hostname the server should listen on. :param port: the default port of the server. :param use_reloader: the default setting for the reloader. :param use_evalex: the default setting for the evalex flag of the debugger. :param threaded: the default threading setting. :param processes: the default number of processes to start. :param static_files: optional dict of static files. :param extra_files: optional list of extra files to track for reloading. :param ssl_context: optional SSL context for running server in HTTPS mode.
[ "Returns", "an", "action", "callback", "that", "spawns", "a", "new", "development", "server", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/script.py#L284-L316
7,055
limodou/uliweb
uliweb/core/dispatch.py
unbind
def unbind(topic, func): """ Remove receiver function """ if topic in _receivers: receivers = _receivers[topic] for i in range(len(receivers)-1, -1, -1): nice, f = receivers[i] if (callable(func) and f['func'] == func) or (f['func_name'] == func): del receivers[i] return
python
def unbind(topic, func): """ Remove receiver function """ if topic in _receivers: receivers = _receivers[topic] for i in range(len(receivers)-1, -1, -1): nice, f = receivers[i] if (callable(func) and f['func'] == func) or (f['func_name'] == func): del receivers[i] return
[ "def", "unbind", "(", "topic", ",", "func", ")", ":", "if", "topic", "in", "_receivers", ":", "receivers", "=", "_receivers", "[", "topic", "]", "for", "i", "in", "range", "(", "len", "(", "receivers", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "nice", ",", "f", "=", "receivers", "[", "i", "]", "if", "(", "callable", "(", "func", ")", "and", "f", "[", "'func'", "]", "==", "func", ")", "or", "(", "f", "[", "'func_name'", "]", "==", "func", ")", ":", "del", "receivers", "[", "i", "]", "return" ]
Remove receiver function
[ "Remove", "receiver", "function" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/dispatch.py#L54-L64
7,056
limodou/uliweb
uliweb/core/dispatch.py
call
def call(sender, topic, *args, **kwargs): """ Invoke receiver functions according topic, it'll invoke receiver functions one by one, and it'll not return anything, so if you want to return a value, you should use get function. """ if not topic in _receivers: return items = _receivers[topic] def _cmp(x, y): return cmp(x[0], y[0]) items.sort(_cmp) i = 0 while i<len(items): nice, f = items[i] i = i + 1 _f = f['func'] if not _f: try: _f = import_attr(f['func_name']) except (ImportError, AttributeError) as e: logging.error("Can't import function %s" % f['func_name']) raise f['func'] = _f if callable(_f): kw = kwargs.copy() if not _test(kw, f): continue try: _f(sender, *args, **kw) except: func = _f.__module__ + '.' + _f.__name__ logging.exception('Calling dispatch point [%s] %s(%r, %r) error!' % (topic, func, args, kw)) raise else: raise Exception, "Dispatch point [%s] %r can't been invoked" % (topic, _f)
python
def call(sender, topic, *args, **kwargs): """ Invoke receiver functions according topic, it'll invoke receiver functions one by one, and it'll not return anything, so if you want to return a value, you should use get function. """ if not topic in _receivers: return items = _receivers[topic] def _cmp(x, y): return cmp(x[0], y[0]) items.sort(_cmp) i = 0 while i<len(items): nice, f = items[i] i = i + 1 _f = f['func'] if not _f: try: _f = import_attr(f['func_name']) except (ImportError, AttributeError) as e: logging.error("Can't import function %s" % f['func_name']) raise f['func'] = _f if callable(_f): kw = kwargs.copy() if not _test(kw, f): continue try: _f(sender, *args, **kw) except: func = _f.__module__ + '.' + _f.__name__ logging.exception('Calling dispatch point [%s] %s(%r, %r) error!' % (topic, func, args, kw)) raise else: raise Exception, "Dispatch point [%s] %r can't been invoked" % (topic, _f)
[ "def", "call", "(", "sender", ",", "topic", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "topic", "in", "_receivers", ":", "return", "items", "=", "_receivers", "[", "topic", "]", "def", "_cmp", "(", "x", ",", "y", ")", ":", "return", "cmp", "(", "x", "[", "0", "]", ",", "y", "[", "0", "]", ")", "items", ".", "sort", "(", "_cmp", ")", "i", "=", "0", "while", "i", "<", "len", "(", "items", ")", ":", "nice", ",", "f", "=", "items", "[", "i", "]", "i", "=", "i", "+", "1", "_f", "=", "f", "[", "'func'", "]", "if", "not", "_f", ":", "try", ":", "_f", "=", "import_attr", "(", "f", "[", "'func_name'", "]", ")", "except", "(", "ImportError", ",", "AttributeError", ")", "as", "e", ":", "logging", ".", "error", "(", "\"Can't import function %s\"", "%", "f", "[", "'func_name'", "]", ")", "raise", "f", "[", "'func'", "]", "=", "_f", "if", "callable", "(", "_f", ")", ":", "kw", "=", "kwargs", ".", "copy", "(", ")", "if", "not", "_test", "(", "kw", ",", "f", ")", ":", "continue", "try", ":", "_f", "(", "sender", ",", "*", "args", ",", "*", "*", "kw", ")", "except", ":", "func", "=", "_f", ".", "__module__", "+", "'.'", "+", "_f", ".", "__name__", "logging", ".", "exception", "(", "'Calling dispatch point [%s] %s(%r, %r) error!'", "%", "(", "topic", ",", "func", ",", "args", ",", "kw", ")", ")", "raise", "else", ":", "raise", "Exception", ",", "\"Dispatch point [%s] %r can't been invoked\"", "%", "(", "topic", ",", "_f", ")" ]
Invoke receiver functions according topic, it'll invoke receiver functions one by one, and it'll not return anything, so if you want to return a value, you should use get function.
[ "Invoke", "receiver", "functions", "according", "topic", "it", "ll", "invoke", "receiver", "functions", "one", "by", "one", "and", "it", "ll", "not", "return", "anything", "so", "if", "you", "want", "to", "return", "a", "value", "you", "should", "use", "get", "function", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/dispatch.py#L82-L118
7,057
limodou/uliweb
uliweb/core/dispatch.py
get
def get(sender, topic, *args, **kwargs): """ Invoke receiver functions according topic, it'll invoke receiver functions one by one, and if one receiver function return non-None value, it'll return it and break the loop. """ if not topic in _receivers: return items = _receivers[topic] def _cmp(x, y): return cmp(x[0], y[0]) items.sort(_cmp) for i in range(len(items)): nice, f = items[i] _f = f['func'] if not _f: try: _f = import_attr(f['func_name']) except ImportError: logging.error("Can't import function %s" % f['func_name']) raise f['func'] = _f if callable(_f): if not _test(kwargs, f): continue try: v = _f(sender, *args, **kwargs) except: func = _f.__module__ + '.' + _f.__name__ logging.exception('Calling dispatch point [%s] %s(%r,%r) error!' % (topic, func, args, kwargs)) raise if v is not None: return v else: raise "Dispatch point [%s] %r can't been invoked" % (topic, _f)
python
def get(sender, topic, *args, **kwargs): """ Invoke receiver functions according topic, it'll invoke receiver functions one by one, and if one receiver function return non-None value, it'll return it and break the loop. """ if not topic in _receivers: return items = _receivers[topic] def _cmp(x, y): return cmp(x[0], y[0]) items.sort(_cmp) for i in range(len(items)): nice, f = items[i] _f = f['func'] if not _f: try: _f = import_attr(f['func_name']) except ImportError: logging.error("Can't import function %s" % f['func_name']) raise f['func'] = _f if callable(_f): if not _test(kwargs, f): continue try: v = _f(sender, *args, **kwargs) except: func = _f.__module__ + '.' + _f.__name__ logging.exception('Calling dispatch point [%s] %s(%r,%r) error!' % (topic, func, args, kwargs)) raise if v is not None: return v else: raise "Dispatch point [%s] %r can't been invoked" % (topic, _f)
[ "def", "get", "(", "sender", ",", "topic", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "topic", "in", "_receivers", ":", "return", "items", "=", "_receivers", "[", "topic", "]", "def", "_cmp", "(", "x", ",", "y", ")", ":", "return", "cmp", "(", "x", "[", "0", "]", ",", "y", "[", "0", "]", ")", "items", ".", "sort", "(", "_cmp", ")", "for", "i", "in", "range", "(", "len", "(", "items", ")", ")", ":", "nice", ",", "f", "=", "items", "[", "i", "]", "_f", "=", "f", "[", "'func'", "]", "if", "not", "_f", ":", "try", ":", "_f", "=", "import_attr", "(", "f", "[", "'func_name'", "]", ")", "except", "ImportError", ":", "logging", ".", "error", "(", "\"Can't import function %s\"", "%", "f", "[", "'func_name'", "]", ")", "raise", "f", "[", "'func'", "]", "=", "_f", "if", "callable", "(", "_f", ")", ":", "if", "not", "_test", "(", "kwargs", ",", "f", ")", ":", "continue", "try", ":", "v", "=", "_f", "(", "sender", ",", "*", "args", ",", "*", "*", "kwargs", ")", "except", ":", "func", "=", "_f", ".", "__module__", "+", "'.'", "+", "_f", ".", "__name__", "logging", ".", "exception", "(", "'Calling dispatch point [%s] %s(%r,%r) error!'", "%", "(", "topic", ",", "func", ",", "args", ",", "kwargs", ")", ")", "raise", "if", "v", "is", "not", "None", ":", "return", "v", "else", ":", "raise", "\"Dispatch point [%s] %r can't been invoked\"", "%", "(", "topic", ",", "_f", ")" ]
Invoke receiver functions according topic, it'll invoke receiver functions one by one, and if one receiver function return non-None value, it'll return it and break the loop.
[ "Invoke", "receiver", "functions", "according", "topic", "it", "ll", "invoke", "receiver", "functions", "one", "by", "one", "and", "if", "one", "receiver", "function", "return", "non", "-", "None", "value", "it", "ll", "return", "it", "and", "break", "the", "loop", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/core/dispatch.py#L128-L163
7,058
limodou/uliweb
uliweb/utils/pyini.py
Ini.__read_line
def __read_line(self, f): """ Get logic line according the syntax not the physical line It'll return the line text and if there is identifier existed return line, bool """ g = tokenize.generate_tokens(f.readline) buf = [] time = 0 iden_existed = False while 1: v = g.next() tokentype, t, start, end, line = v if tokentype == 54: continue if tokentype in (token.INDENT, token.DEDENT, tokenize.COMMENT): continue if tokentype == token.NAME: iden_existed = True if tokentype == token.NEWLINE: return ''.join(buf), iden_existed else: if t == '=' and time == 0: time += 1 continue buf.append(t)
python
def __read_line(self, f): """ Get logic line according the syntax not the physical line It'll return the line text and if there is identifier existed return line, bool """ g = tokenize.generate_tokens(f.readline) buf = [] time = 0 iden_existed = False while 1: v = g.next() tokentype, t, start, end, line = v if tokentype == 54: continue if tokentype in (token.INDENT, token.DEDENT, tokenize.COMMENT): continue if tokentype == token.NAME: iden_existed = True if tokentype == token.NEWLINE: return ''.join(buf), iden_existed else: if t == '=' and time == 0: time += 1 continue buf.append(t)
[ "def", "__read_line", "(", "self", ",", "f", ")", ":", "g", "=", "tokenize", ".", "generate_tokens", "(", "f", ".", "readline", ")", "buf", "=", "[", "]", "time", "=", "0", "iden_existed", "=", "False", "while", "1", ":", "v", "=", "g", ".", "next", "(", ")", "tokentype", ",", "t", ",", "start", ",", "end", ",", "line", "=", "v", "if", "tokentype", "==", "54", ":", "continue", "if", "tokentype", "in", "(", "token", ".", "INDENT", ",", "token", ".", "DEDENT", ",", "tokenize", ".", "COMMENT", ")", ":", "continue", "if", "tokentype", "==", "token", ".", "NAME", ":", "iden_existed", "=", "True", "if", "tokentype", "==", "token", ".", "NEWLINE", ":", "return", "''", ".", "join", "(", "buf", ")", ",", "iden_existed", "else", ":", "if", "t", "==", "'='", "and", "time", "==", "0", ":", "time", "+=", "1", "continue", "buf", ".", "append", "(", "t", ")" ]
Get logic line according the syntax not the physical line It'll return the line text and if there is identifier existed return line, bool
[ "Get", "logic", "line", "according", "the", "syntax", "not", "the", "physical", "line", "It", "ll", "return", "the", "line", "text", "and", "if", "there", "is", "identifier", "existed", "return", "line", "bool" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/pyini.py#L611-L638
7,059
limodou/uliweb
uliweb/utils/pyini.py
Ini.freeze
def freeze(self): """ Process all EvalValue to real value """ self._lazy = False for k, v in self.items(): if k in self._env: continue for _k, _v in v.items(): if isinstance(_v, Lazy): if self.writable: _v.get() else: try: v.__setitem__(_k, _v.get(), replace=True) except: print "Error ini key:", _k raise del _v self._globals = SortedDict()
python
def freeze(self): """ Process all EvalValue to real value """ self._lazy = False for k, v in self.items(): if k in self._env: continue for _k, _v in v.items(): if isinstance(_v, Lazy): if self.writable: _v.get() else: try: v.__setitem__(_k, _v.get(), replace=True) except: print "Error ini key:", _k raise del _v self._globals = SortedDict()
[ "def", "freeze", "(", "self", ")", ":", "self", ".", "_lazy", "=", "False", "for", "k", ",", "v", "in", "self", ".", "items", "(", ")", ":", "if", "k", "in", "self", ".", "_env", ":", "continue", "for", "_k", ",", "_v", "in", "v", ".", "items", "(", ")", ":", "if", "isinstance", "(", "_v", ",", "Lazy", ")", ":", "if", "self", ".", "writable", ":", "_v", ".", "get", "(", ")", "else", ":", "try", ":", "v", ".", "__setitem__", "(", "_k", ",", "_v", ".", "get", "(", ")", ",", "replace", "=", "True", ")", "except", ":", "print", "\"Error ini key:\"", ",", "_k", "raise", "del", "_v", "self", ".", "_globals", "=", "SortedDict", "(", ")" ]
Process all EvalValue to real value
[ "Process", "all", "EvalValue", "to", "real", "value" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/pyini.py#L708-L727
7,060
limodou/uliweb
uliweb/lib/werkzeug/contrib/securecookie.py
SecureCookie.serialize
def serialize(self, expires=None): """Serialize the secure cookie into a string. If expires is provided, the session will be automatically invalidated after expiration when you unseralize it. This provides better protection against session cookie theft. :param expires: an optional expiration date for the cookie (a :class:`datetime.datetime` object) """ if self.secret_key is None: raise RuntimeError('no secret key defined') if expires: self['_expires'] = _date_to_unix(expires) result = [] mac = hmac(self.secret_key, None, self.hash_method) for key, value in sorted(self.items()): result.append(('%s=%s' % ( url_quote_plus(key), self.quote(value).decode('ascii') )).encode('ascii')) mac.update(b'|' + result[-1]) return b'?'.join([ base64.b64encode(mac.digest()).strip(), b'&'.join(result) ])
python
def serialize(self, expires=None): """Serialize the secure cookie into a string. If expires is provided, the session will be automatically invalidated after expiration when you unseralize it. This provides better protection against session cookie theft. :param expires: an optional expiration date for the cookie (a :class:`datetime.datetime` object) """ if self.secret_key is None: raise RuntimeError('no secret key defined') if expires: self['_expires'] = _date_to_unix(expires) result = [] mac = hmac(self.secret_key, None, self.hash_method) for key, value in sorted(self.items()): result.append(('%s=%s' % ( url_quote_plus(key), self.quote(value).decode('ascii') )).encode('ascii')) mac.update(b'|' + result[-1]) return b'?'.join([ base64.b64encode(mac.digest()).strip(), b'&'.join(result) ])
[ "def", "serialize", "(", "self", ",", "expires", "=", "None", ")", ":", "if", "self", ".", "secret_key", "is", "None", ":", "raise", "RuntimeError", "(", "'no secret key defined'", ")", "if", "expires", ":", "self", "[", "'_expires'", "]", "=", "_date_to_unix", "(", "expires", ")", "result", "=", "[", "]", "mac", "=", "hmac", "(", "self", ".", "secret_key", ",", "None", ",", "self", ".", "hash_method", ")", "for", "key", ",", "value", "in", "sorted", "(", "self", ".", "items", "(", ")", ")", ":", "result", ".", "append", "(", "(", "'%s=%s'", "%", "(", "url_quote_plus", "(", "key", ")", ",", "self", ".", "quote", "(", "value", ")", ".", "decode", "(", "'ascii'", ")", ")", ")", ".", "encode", "(", "'ascii'", ")", ")", "mac", ".", "update", "(", "b'|'", "+", "result", "[", "-", "1", "]", ")", "return", "b'?'", ".", "join", "(", "[", "base64", ".", "b64encode", "(", "mac", ".", "digest", "(", ")", ")", ".", "strip", "(", ")", ",", "b'&'", ".", "join", "(", "result", ")", "]", ")" ]
Serialize the secure cookie into a string. If expires is provided, the session will be automatically invalidated after expiration when you unseralize it. This provides better protection against session cookie theft. :param expires: an optional expiration date for the cookie (a :class:`datetime.datetime` object)
[ "Serialize", "the", "secure", "cookie", "into", "a", "string", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/contrib/securecookie.py#L203-L228
7,061
limodou/uliweb
uliweb/lib/werkzeug/contrib/securecookie.py
SecureCookie.unserialize
def unserialize(cls, string, secret_key): """Load the secure cookie from a serialized string. :param string: the cookie value to unserialize. :param secret_key: the secret key used to serialize the cookie. :return: a new :class:`SecureCookie`. """ if isinstance(string, text_type): string = string.encode('utf-8', 'replace') if isinstance(secret_key, text_type): secret_key = secret_key.encode('utf-8', 'replace') try: base64_hash, data = string.split(b'?', 1) except (ValueError, IndexError): items = () else: items = {} mac = hmac(secret_key, None, cls.hash_method) for item in data.split(b'&'): mac.update(b'|' + item) if not b'=' in item: items = None break key, value = item.split(b'=', 1) # try to make the key a string key = url_unquote_plus(key.decode('ascii')) try: key = to_native(key) except UnicodeError: pass items[key] = value # no parsing error and the mac looks okay, we can now # sercurely unpickle our cookie. try: client_hash = base64.b64decode(base64_hash) except TypeError: items = client_hash = None if items is not None and safe_str_cmp(client_hash, mac.digest()): try: for key, value in iteritems(items): items[key] = cls.unquote(value) except UnquoteError: items = () else: if '_expires' in items: if time() > items['_expires']: items = () else: del items['_expires'] else: items = () return cls(items, secret_key, False)
python
def unserialize(cls, string, secret_key): """Load the secure cookie from a serialized string. :param string: the cookie value to unserialize. :param secret_key: the secret key used to serialize the cookie. :return: a new :class:`SecureCookie`. """ if isinstance(string, text_type): string = string.encode('utf-8', 'replace') if isinstance(secret_key, text_type): secret_key = secret_key.encode('utf-8', 'replace') try: base64_hash, data = string.split(b'?', 1) except (ValueError, IndexError): items = () else: items = {} mac = hmac(secret_key, None, cls.hash_method) for item in data.split(b'&'): mac.update(b'|' + item) if not b'=' in item: items = None break key, value = item.split(b'=', 1) # try to make the key a string key = url_unquote_plus(key.decode('ascii')) try: key = to_native(key) except UnicodeError: pass items[key] = value # no parsing error and the mac looks okay, we can now # sercurely unpickle our cookie. try: client_hash = base64.b64decode(base64_hash) except TypeError: items = client_hash = None if items is not None and safe_str_cmp(client_hash, mac.digest()): try: for key, value in iteritems(items): items[key] = cls.unquote(value) except UnquoteError: items = () else: if '_expires' in items: if time() > items['_expires']: items = () else: del items['_expires'] else: items = () return cls(items, secret_key, False)
[ "def", "unserialize", "(", "cls", ",", "string", ",", "secret_key", ")", ":", "if", "isinstance", "(", "string", ",", "text_type", ")", ":", "string", "=", "string", ".", "encode", "(", "'utf-8'", ",", "'replace'", ")", "if", "isinstance", "(", "secret_key", ",", "text_type", ")", ":", "secret_key", "=", "secret_key", ".", "encode", "(", "'utf-8'", ",", "'replace'", ")", "try", ":", "base64_hash", ",", "data", "=", "string", ".", "split", "(", "b'?'", ",", "1", ")", "except", "(", "ValueError", ",", "IndexError", ")", ":", "items", "=", "(", ")", "else", ":", "items", "=", "{", "}", "mac", "=", "hmac", "(", "secret_key", ",", "None", ",", "cls", ".", "hash_method", ")", "for", "item", "in", "data", ".", "split", "(", "b'&'", ")", ":", "mac", ".", "update", "(", "b'|'", "+", "item", ")", "if", "not", "b'='", "in", "item", ":", "items", "=", "None", "break", "key", ",", "value", "=", "item", ".", "split", "(", "b'='", ",", "1", ")", "# try to make the key a string", "key", "=", "url_unquote_plus", "(", "key", ".", "decode", "(", "'ascii'", ")", ")", "try", ":", "key", "=", "to_native", "(", "key", ")", "except", "UnicodeError", ":", "pass", "items", "[", "key", "]", "=", "value", "# no parsing error and the mac looks okay, we can now", "# sercurely unpickle our cookie.", "try", ":", "client_hash", "=", "base64", ".", "b64decode", "(", "base64_hash", ")", "except", "TypeError", ":", "items", "=", "client_hash", "=", "None", "if", "items", "is", "not", "None", "and", "safe_str_cmp", "(", "client_hash", ",", "mac", ".", "digest", "(", ")", ")", ":", "try", ":", "for", "key", ",", "value", "in", "iteritems", "(", "items", ")", ":", "items", "[", "key", "]", "=", "cls", ".", "unquote", "(", "value", ")", "except", "UnquoteError", ":", "items", "=", "(", ")", "else", ":", "if", "'_expires'", "in", "items", ":", "if", "time", "(", ")", ">", "items", "[", "'_expires'", "]", ":", "items", "=", "(", ")", "else", ":", "del", "items", "[", "'_expires'", "]", "else", ":", "items", "=", "(", ")", "return", "cls", "(", "items", ",", "secret_key", ",", "False", ")" ]
Load the secure cookie from a serialized string. :param string: the cookie value to unserialize. :param secret_key: the secret key used to serialize the cookie. :return: a new :class:`SecureCookie`.
[ "Load", "the", "secure", "cookie", "from", "a", "serialized", "string", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/contrib/securecookie.py#L231-L283
7,062
limodou/uliweb
uliweb/contrib/model_config/__init__.py
find_model
def find_model(sender, model_name): """ Register new model to ORM """ MC = get_mc() model = MC.get((MC.c.model_name==model_name) & (MC.c.uuid!='')) if model: model_inst = model.get_instance() orm.set_model(model_name, model_inst.table_name, appname=__name__, model_path='') return orm.__models__.get(model_name)
python
def find_model(sender, model_name): """ Register new model to ORM """ MC = get_mc() model = MC.get((MC.c.model_name==model_name) & (MC.c.uuid!='')) if model: model_inst = model.get_instance() orm.set_model(model_name, model_inst.table_name, appname=__name__, model_path='') return orm.__models__.get(model_name)
[ "def", "find_model", "(", "sender", ",", "model_name", ")", ":", "MC", "=", "get_mc", "(", ")", "model", "=", "MC", ".", "get", "(", "(", "MC", ".", "c", ".", "model_name", "==", "model_name", ")", "&", "(", "MC", ".", "c", ".", "uuid", "!=", "''", ")", ")", "if", "model", ":", "model_inst", "=", "model", ".", "get_instance", "(", ")", "orm", ".", "set_model", "(", "model_name", ",", "model_inst", ".", "table_name", ",", "appname", "=", "__name__", ",", "model_path", "=", "''", ")", "return", "orm", ".", "__models__", ".", "get", "(", "model_name", ")" ]
Register new model to ORM
[ "Register", "new", "model", "to", "ORM" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/model_config/__init__.py#L20-L29
7,063
limodou/uliweb
uliweb/contrib/model_config/__init__.py
get_model_fields
def get_model_fields(model, add_reserver_flag=True): """ Creating fields suit for model_config , id will be skipped. """ import uliweb.orm as orm fields = [] m = {'type':'type_name', 'hint':'hint', 'default':'default', 'required':'required'} m1 = {'index':'index', 'unique':'unique'} for name, prop in model.properties.items(): if name == 'id': continue d = {} for k, v in m.items(): d[k] = getattr(prop, v) for k, v in m1.items(): d[k] = bool(prop.kwargs.get(v)) d['name'] = prop.fieldname or name d['verbose_name'] = unicode(prop.verbose_name) d['nullable'] = bool(prop.kwargs.get('nullable', orm.__nullable__)) if d['type'] in ('VARCHAR', 'CHAR', 'BINARY', 'VARBINARY'): d['max_length'] = prop.max_length if d['type'] in ('Reference', 'OneToOne', 'ManyToMany'): d['reference_class'] = prop.reference_class #collection_name will be _collection_name, it the original value d['collection_name'] = prop._collection_name d['server_default'] = prop.kwargs.get('server_default') d['_reserved'] = True fields.append(d) return fields
python
def get_model_fields(model, add_reserver_flag=True): """ Creating fields suit for model_config , id will be skipped. """ import uliweb.orm as orm fields = [] m = {'type':'type_name', 'hint':'hint', 'default':'default', 'required':'required'} m1 = {'index':'index', 'unique':'unique'} for name, prop in model.properties.items(): if name == 'id': continue d = {} for k, v in m.items(): d[k] = getattr(prop, v) for k, v in m1.items(): d[k] = bool(prop.kwargs.get(v)) d['name'] = prop.fieldname or name d['verbose_name'] = unicode(prop.verbose_name) d['nullable'] = bool(prop.kwargs.get('nullable', orm.__nullable__)) if d['type'] in ('VARCHAR', 'CHAR', 'BINARY', 'VARBINARY'): d['max_length'] = prop.max_length if d['type'] in ('Reference', 'OneToOne', 'ManyToMany'): d['reference_class'] = prop.reference_class #collection_name will be _collection_name, it the original value d['collection_name'] = prop._collection_name d['server_default'] = prop.kwargs.get('server_default') d['_reserved'] = True fields.append(d) return fields
[ "def", "get_model_fields", "(", "model", ",", "add_reserver_flag", "=", "True", ")", ":", "import", "uliweb", ".", "orm", "as", "orm", "fields", "=", "[", "]", "m", "=", "{", "'type'", ":", "'type_name'", ",", "'hint'", ":", "'hint'", ",", "'default'", ":", "'default'", ",", "'required'", ":", "'required'", "}", "m1", "=", "{", "'index'", ":", "'index'", ",", "'unique'", ":", "'unique'", "}", "for", "name", ",", "prop", "in", "model", ".", "properties", ".", "items", "(", ")", ":", "if", "name", "==", "'id'", ":", "continue", "d", "=", "{", "}", "for", "k", ",", "v", "in", "m", ".", "items", "(", ")", ":", "d", "[", "k", "]", "=", "getattr", "(", "prop", ",", "v", ")", "for", "k", ",", "v", "in", "m1", ".", "items", "(", ")", ":", "d", "[", "k", "]", "=", "bool", "(", "prop", ".", "kwargs", ".", "get", "(", "v", ")", ")", "d", "[", "'name'", "]", "=", "prop", ".", "fieldname", "or", "name", "d", "[", "'verbose_name'", "]", "=", "unicode", "(", "prop", ".", "verbose_name", ")", "d", "[", "'nullable'", "]", "=", "bool", "(", "prop", ".", "kwargs", ".", "get", "(", "'nullable'", ",", "orm", ".", "__nullable__", ")", ")", "if", "d", "[", "'type'", "]", "in", "(", "'VARCHAR'", ",", "'CHAR'", ",", "'BINARY'", ",", "'VARBINARY'", ")", ":", "d", "[", "'max_length'", "]", "=", "prop", ".", "max_length", "if", "d", "[", "'type'", "]", "in", "(", "'Reference'", ",", "'OneToOne'", ",", "'ManyToMany'", ")", ":", "d", "[", "'reference_class'", "]", "=", "prop", ".", "reference_class", "#collection_name will be _collection_name, it the original value", "d", "[", "'collection_name'", "]", "=", "prop", ".", "_collection_name", "d", "[", "'server_default'", "]", "=", "prop", ".", "kwargs", ".", "get", "(", "'server_default'", ")", "d", "[", "'_reserved'", "]", "=", "True", "fields", ".", "append", "(", "d", ")", "return", "fields" ]
Creating fields suit for model_config , id will be skipped.
[ "Creating", "fields", "suit", "for", "model_config", "id", "will", "be", "skipped", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/model_config/__init__.py#L69-L100
7,064
limodou/uliweb
uliweb/contrib/model_config/__init__.py
get_model_indexes
def get_model_indexes(model, add_reserver_flag=True): """ Creating indexes suit for model_config. """ import uliweb.orm as orm from sqlalchemy.engine.reflection import Inspector indexes = [] engine = model.get_engine().engine insp = Inspector.from_engine(engine) for index in insp.get_indexes(model.tablename): d = {} d['name'] = index['name'] d['unique'] = index['unique'] d['fields'] = index['column_names'] if add_reserver_flag: d['_reserved'] = True indexes.append(d) return indexes
python
def get_model_indexes(model, add_reserver_flag=True): """ Creating indexes suit for model_config. """ import uliweb.orm as orm from sqlalchemy.engine.reflection import Inspector indexes = [] engine = model.get_engine().engine insp = Inspector.from_engine(engine) for index in insp.get_indexes(model.tablename): d = {} d['name'] = index['name'] d['unique'] = index['unique'] d['fields'] = index['column_names'] if add_reserver_flag: d['_reserved'] = True indexes.append(d) return indexes
[ "def", "get_model_indexes", "(", "model", ",", "add_reserver_flag", "=", "True", ")", ":", "import", "uliweb", ".", "orm", "as", "orm", "from", "sqlalchemy", ".", "engine", ".", "reflection", "import", "Inspector", "indexes", "=", "[", "]", "engine", "=", "model", ".", "get_engine", "(", ")", ".", "engine", "insp", "=", "Inspector", ".", "from_engine", "(", "engine", ")", "for", "index", "in", "insp", ".", "get_indexes", "(", "model", ".", "tablename", ")", ":", "d", "=", "{", "}", "d", "[", "'name'", "]", "=", "index", "[", "'name'", "]", "d", "[", "'unique'", "]", "=", "index", "[", "'unique'", "]", "d", "[", "'fields'", "]", "=", "index", "[", "'column_names'", "]", "if", "add_reserver_flag", ":", "d", "[", "'_reserved'", "]", "=", "True", "indexes", ".", "append", "(", "d", ")", "return", "indexes" ]
Creating indexes suit for model_config.
[ "Creating", "indexes", "suit", "for", "model_config", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/model_config/__init__.py#L102-L121
7,065
limodou/uliweb
uliweb/utils/timeit.py
timeit
def timeit(output): """ If output is string, then print the string and also time used """ b = time.time() yield print output, 'time used: %.3fs' % (time.time()-b)
python
def timeit(output): """ If output is string, then print the string and also time used """ b = time.time() yield print output, 'time used: %.3fs' % (time.time()-b)
[ "def", "timeit", "(", "output", ")", ":", "b", "=", "time", ".", "time", "(", ")", "yield", "print", "output", ",", "'time used: %.3fs'", "%", "(", "time", ".", "time", "(", ")", "-", "b", ")" ]
If output is string, then print the string and also time used
[ "If", "output", "is", "string", "then", "print", "the", "string", "and", "also", "time", "used" ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/utils/timeit.py#L5-L11
7,066
limodou/uliweb
uliweb/lib/werkzeug/wsgi.py
host_is_trusted
def host_is_trusted(hostname, trusted_list): """Checks if a host is trusted against a list. This also takes care of port normalization. .. versionadded:: 0.9 :param hostname: the hostname to check :param trusted_list: a list of hostnames to check against. If a hostname starts with a dot it will match against all subdomains as well. """ if not hostname: return False if isinstance(trusted_list, string_types): trusted_list = [trusted_list] def _normalize(hostname): if ':' in hostname: hostname = hostname.rsplit(':', 1)[0] return _encode_idna(hostname) hostname = _normalize(hostname) for ref in trusted_list: if ref.startswith('.'): ref = ref[1:] suffix_match = True else: suffix_match = False ref = _normalize(ref) if ref == hostname: return True if suffix_match and hostname.endswith('.' + ref): return True return False
python
def host_is_trusted(hostname, trusted_list): """Checks if a host is trusted against a list. This also takes care of port normalization. .. versionadded:: 0.9 :param hostname: the hostname to check :param trusted_list: a list of hostnames to check against. If a hostname starts with a dot it will match against all subdomains as well. """ if not hostname: return False if isinstance(trusted_list, string_types): trusted_list = [trusted_list] def _normalize(hostname): if ':' in hostname: hostname = hostname.rsplit(':', 1)[0] return _encode_idna(hostname) hostname = _normalize(hostname) for ref in trusted_list: if ref.startswith('.'): ref = ref[1:] suffix_match = True else: suffix_match = False ref = _normalize(ref) if ref == hostname: return True if suffix_match and hostname.endswith('.' + ref): return True return False
[ "def", "host_is_trusted", "(", "hostname", ",", "trusted_list", ")", ":", "if", "not", "hostname", ":", "return", "False", "if", "isinstance", "(", "trusted_list", ",", "string_types", ")", ":", "trusted_list", "=", "[", "trusted_list", "]", "def", "_normalize", "(", "hostname", ")", ":", "if", "':'", "in", "hostname", ":", "hostname", "=", "hostname", ".", "rsplit", "(", "':'", ",", "1", ")", "[", "0", "]", "return", "_encode_idna", "(", "hostname", ")", "hostname", "=", "_normalize", "(", "hostname", ")", "for", "ref", "in", "trusted_list", ":", "if", "ref", ".", "startswith", "(", "'.'", ")", ":", "ref", "=", "ref", "[", "1", ":", "]", "suffix_match", "=", "True", "else", ":", "suffix_match", "=", "False", "ref", "=", "_normalize", "(", "ref", ")", "if", "ref", "==", "hostname", ":", "return", "True", "if", "suffix_match", "and", "hostname", ".", "endswith", "(", "'.'", "+", "ref", ")", ":", "return", "True", "return", "False" ]
Checks if a host is trusted against a list. This also takes care of port normalization. .. versionadded:: 0.9 :param hostname: the hostname to check :param trusted_list: a list of hostnames to check against. If a hostname starts with a dot it will match against all subdomains as well.
[ "Checks", "if", "a", "host", "is", "trusted", "against", "a", "list", ".", "This", "also", "takes", "care", "of", "port", "normalization", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/wsgi.py#L85-L119
7,067
limodou/uliweb
uliweb/lib/werkzeug/wsgi.py
get_content_length
def get_content_length(environ): """Returns the content length from the WSGI environment as integer. If it's not available `None` is returned. .. versionadded:: 0.9 :param environ: the WSGI environ to fetch the content length from. """ content_length = environ.get('CONTENT_LENGTH') if content_length is not None: try: return max(0, int(content_length)) except (ValueError, TypeError): pass
python
def get_content_length(environ): """Returns the content length from the WSGI environment as integer. If it's not available `None` is returned. .. versionadded:: 0.9 :param environ: the WSGI environ to fetch the content length from. """ content_length = environ.get('CONTENT_LENGTH') if content_length is not None: try: return max(0, int(content_length)) except (ValueError, TypeError): pass
[ "def", "get_content_length", "(", "environ", ")", ":", "content_length", "=", "environ", ".", "get", "(", "'CONTENT_LENGTH'", ")", "if", "content_length", "is", "not", "None", ":", "try", ":", "return", "max", "(", "0", ",", "int", "(", "content_length", ")", ")", "except", "(", "ValueError", ",", "TypeError", ")", ":", "pass" ]
Returns the content length from the WSGI environment as integer. If it's not available `None` is returned. .. versionadded:: 0.9 :param environ: the WSGI environ to fetch the content length from.
[ "Returns", "the", "content", "length", "from", "the", "WSGI", "environment", "as", "integer", ".", "If", "it", "s", "not", "available", "None", "is", "returned", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/wsgi.py#L148-L161
7,068
limodou/uliweb
uliweb/lib/werkzeug/wsgi.py
get_input_stream
def get_input_stream(environ, safe_fallback=True): """Returns the input stream from the WSGI environment and wraps it in the most sensible way possible. The stream returned is not the raw WSGI stream in most cases but one that is safe to read from without taking into account the content length. .. versionadded:: 0.9 :param environ: the WSGI environ to fetch the stream from. :param safe: indicates weather the function should use an empty stream as safe fallback or just return the original WSGI input stream if it can't wrap it safely. The default is to return an empty string in those cases. """ stream = environ['wsgi.input'] content_length = get_content_length(environ) # A wsgi extension that tells us if the input is terminated. In # that case we return the stream unchanged as we know we can savely # read it until the end. if environ.get('wsgi.input_terminated'): return stream # If we don't have a content length we fall back to an empty stream # in case of a safe fallback, otherwise we return the stream unchanged. # The non-safe fallback is not recommended but might be useful in # some situations. if content_length is None: return safe_fallback and _empty_stream or stream # Otherwise limit the stream to the content length return LimitedStream(stream, content_length)
python
def get_input_stream(environ, safe_fallback=True): """Returns the input stream from the WSGI environment and wraps it in the most sensible way possible. The stream returned is not the raw WSGI stream in most cases but one that is safe to read from without taking into account the content length. .. versionadded:: 0.9 :param environ: the WSGI environ to fetch the stream from. :param safe: indicates weather the function should use an empty stream as safe fallback or just return the original WSGI input stream if it can't wrap it safely. The default is to return an empty string in those cases. """ stream = environ['wsgi.input'] content_length = get_content_length(environ) # A wsgi extension that tells us if the input is terminated. In # that case we return the stream unchanged as we know we can savely # read it until the end. if environ.get('wsgi.input_terminated'): return stream # If we don't have a content length we fall back to an empty stream # in case of a safe fallback, otherwise we return the stream unchanged. # The non-safe fallback is not recommended but might be useful in # some situations. if content_length is None: return safe_fallback and _empty_stream or stream # Otherwise limit the stream to the content length return LimitedStream(stream, content_length)
[ "def", "get_input_stream", "(", "environ", ",", "safe_fallback", "=", "True", ")", ":", "stream", "=", "environ", "[", "'wsgi.input'", "]", "content_length", "=", "get_content_length", "(", "environ", ")", "# A wsgi extension that tells us if the input is terminated. In", "# that case we return the stream unchanged as we know we can savely", "# read it until the end.", "if", "environ", ".", "get", "(", "'wsgi.input_terminated'", ")", ":", "return", "stream", "# If we don't have a content length we fall back to an empty stream", "# in case of a safe fallback, otherwise we return the stream unchanged.", "# The non-safe fallback is not recommended but might be useful in", "# some situations.", "if", "content_length", "is", "None", ":", "return", "safe_fallback", "and", "_empty_stream", "or", "stream", "# Otherwise limit the stream to the content length", "return", "LimitedStream", "(", "stream", ",", "content_length", ")" ]
Returns the input stream from the WSGI environment and wraps it in the most sensible way possible. The stream returned is not the raw WSGI stream in most cases but one that is safe to read from without taking into account the content length. .. versionadded:: 0.9 :param environ: the WSGI environ to fetch the stream from. :param safe: indicates weather the function should use an empty stream as safe fallback or just return the original WSGI input stream if it can't wrap it safely. The default is to return an empty string in those cases.
[ "Returns", "the", "input", "stream", "from", "the", "WSGI", "environment", "and", "wraps", "it", "in", "the", "most", "sensible", "way", "possible", ".", "The", "stream", "returned", "is", "not", "the", "raw", "WSGI", "stream", "in", "most", "cases", "but", "one", "that", "is", "safe", "to", "read", "from", "without", "taking", "into", "account", "the", "content", "length", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/wsgi.py#L164-L195
7,069
limodou/uliweb
uliweb/lib/werkzeug/wsgi.py
get_path_info
def get_path_info(environ, charset='utf-8', errors='replace'): """Returns the `PATH_INFO` from the WSGI environment and properly decodes it. This also takes care about the WSGI decoding dance on Python 3 environments. if the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the path from. :param charset: the charset for the path info, or `None` if no decoding should be performed. :param errors: the decoding error handling. """ path = wsgi_get_bytes(environ.get('PATH_INFO', '')) return to_unicode(path, charset, errors, allow_none_charset=True)
python
def get_path_info(environ, charset='utf-8', errors='replace'): """Returns the `PATH_INFO` from the WSGI environment and properly decodes it. This also takes care about the WSGI decoding dance on Python 3 environments. if the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the path from. :param charset: the charset for the path info, or `None` if no decoding should be performed. :param errors: the decoding error handling. """ path = wsgi_get_bytes(environ.get('PATH_INFO', '')) return to_unicode(path, charset, errors, allow_none_charset=True)
[ "def", "get_path_info", "(", "environ", ",", "charset", "=", "'utf-8'", ",", "errors", "=", "'replace'", ")", ":", "path", "=", "wsgi_get_bytes", "(", "environ", ".", "get", "(", "'PATH_INFO'", ",", "''", ")", ")", "return", "to_unicode", "(", "path", ",", "charset", ",", "errors", ",", "allow_none_charset", "=", "True", ")" ]
Returns the `PATH_INFO` from the WSGI environment and properly decodes it. This also takes care about the WSGI decoding dance on Python 3 environments. if the `charset` is set to `None` a bytestring is returned. .. versionadded:: 0.9 :param environ: the WSGI environment object to get the path from. :param charset: the charset for the path info, or `None` if no decoding should be performed. :param errors: the decoding error handling.
[ "Returns", "the", "PATH_INFO", "from", "the", "WSGI", "environment", "and", "properly", "decodes", "it", ".", "This", "also", "takes", "care", "about", "the", "WSGI", "decoding", "dance", "on", "Python", "3", "environments", ".", "if", "the", "charset", "is", "set", "to", "None", "a", "bytestring", "is", "returned", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/wsgi.py#L215-L229
7,070
limodou/uliweb
uliweb/lib/werkzeug/wsgi.py
pop_path_info
def pop_path_info(environ, charset='utf-8', errors='replace'): """Removes and returns the next segment of `PATH_INFO`, pushing it onto `SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`. If the `charset` is set to `None` a bytestring is returned. If there are empty segments (``'/foo//bar``) these are ignored but properly pushed to the `SCRIPT_NAME`: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> pop_path_info(env) 'a' >>> env['SCRIPT_NAME'] '/foo/a' >>> pop_path_info(env) 'b' >>> env['SCRIPT_NAME'] '/foo/a/b' .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is modified. """ path = environ.get('PATH_INFO') if not path: return None script_name = environ.get('SCRIPT_NAME', '') # shift multiple leading slashes over old_path = path path = path.lstrip('/') if path != old_path: script_name += '/' * (len(old_path) - len(path)) if '/' not in path: environ['PATH_INFO'] = '' environ['SCRIPT_NAME'] = script_name + path rv = wsgi_get_bytes(path) else: segment, path = path.split('/', 1) environ['PATH_INFO'] = '/' + path environ['SCRIPT_NAME'] = script_name + segment rv = wsgi_get_bytes(segment) return to_unicode(rv, charset, errors, allow_none_charset=True)
python
def pop_path_info(environ, charset='utf-8', errors='replace'): """Removes and returns the next segment of `PATH_INFO`, pushing it onto `SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`. If the `charset` is set to `None` a bytestring is returned. If there are empty segments (``'/foo//bar``) these are ignored but properly pushed to the `SCRIPT_NAME`: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> pop_path_info(env) 'a' >>> env['SCRIPT_NAME'] '/foo/a' >>> pop_path_info(env) 'b' >>> env['SCRIPT_NAME'] '/foo/a/b' .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is modified. """ path = environ.get('PATH_INFO') if not path: return None script_name = environ.get('SCRIPT_NAME', '') # shift multiple leading slashes over old_path = path path = path.lstrip('/') if path != old_path: script_name += '/' * (len(old_path) - len(path)) if '/' not in path: environ['PATH_INFO'] = '' environ['SCRIPT_NAME'] = script_name + path rv = wsgi_get_bytes(path) else: segment, path = path.split('/', 1) environ['PATH_INFO'] = '/' + path environ['SCRIPT_NAME'] = script_name + segment rv = wsgi_get_bytes(segment) return to_unicode(rv, charset, errors, allow_none_charset=True)
[ "def", "pop_path_info", "(", "environ", ",", "charset", "=", "'utf-8'", ",", "errors", "=", "'replace'", ")", ":", "path", "=", "environ", ".", "get", "(", "'PATH_INFO'", ")", "if", "not", "path", ":", "return", "None", "script_name", "=", "environ", ".", "get", "(", "'SCRIPT_NAME'", ",", "''", ")", "# shift multiple leading slashes over", "old_path", "=", "path", "path", "=", "path", ".", "lstrip", "(", "'/'", ")", "if", "path", "!=", "old_path", ":", "script_name", "+=", "'/'", "*", "(", "len", "(", "old_path", ")", "-", "len", "(", "path", ")", ")", "if", "'/'", "not", "in", "path", ":", "environ", "[", "'PATH_INFO'", "]", "=", "''", "environ", "[", "'SCRIPT_NAME'", "]", "=", "script_name", "+", "path", "rv", "=", "wsgi_get_bytes", "(", "path", ")", "else", ":", "segment", ",", "path", "=", "path", ".", "split", "(", "'/'", ",", "1", ")", "environ", "[", "'PATH_INFO'", "]", "=", "'/'", "+", "path", "environ", "[", "'SCRIPT_NAME'", "]", "=", "script_name", "+", "segment", "rv", "=", "wsgi_get_bytes", "(", "segment", ")", "return", "to_unicode", "(", "rv", ",", "charset", ",", "errors", ",", "allow_none_charset", "=", "True", ")" ]
Removes and returns the next segment of `PATH_INFO`, pushing it onto `SCRIPT_NAME`. Returns `None` if there is nothing left on `PATH_INFO`. If the `charset` is set to `None` a bytestring is returned. If there are empty segments (``'/foo//bar``) these are ignored but properly pushed to the `SCRIPT_NAME`: >>> env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b'} >>> pop_path_info(env) 'a' >>> env['SCRIPT_NAME'] '/foo/a' >>> pop_path_info(env) 'b' >>> env['SCRIPT_NAME'] '/foo/a/b' .. versionadded:: 0.5 .. versionchanged:: 0.9 The path is now decoded and a charset and encoding parameter can be provided. :param environ: the WSGI environment that is modified.
[ "Removes", "and", "returns", "the", "next", "segment", "of", "PATH_INFO", "pushing", "it", "onto", "SCRIPT_NAME", ".", "Returns", "None", "if", "there", "is", "nothing", "left", "on", "PATH_INFO", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/wsgi.py#L249-L298
7,071
limodou/uliweb
uliweb/lib/werkzeug/wsgi.py
_make_chunk_iter
def _make_chunk_iter(stream, limit, buffer_size): """Helper for the line and chunk iter functions.""" if isinstance(stream, (bytes, bytearray, text_type)): raise TypeError('Passed a string or byte object instead of ' 'true iterator or stream.') if not hasattr(stream, 'read'): for item in stream: if item: yield item return if not isinstance(stream, LimitedStream) and limit is not None: stream = LimitedStream(stream, limit) _read = stream.read while 1: item = _read(buffer_size) if not item: break yield item
python
def _make_chunk_iter(stream, limit, buffer_size): """Helper for the line and chunk iter functions.""" if isinstance(stream, (bytes, bytearray, text_type)): raise TypeError('Passed a string or byte object instead of ' 'true iterator or stream.') if not hasattr(stream, 'read'): for item in stream: if item: yield item return if not isinstance(stream, LimitedStream) and limit is not None: stream = LimitedStream(stream, limit) _read = stream.read while 1: item = _read(buffer_size) if not item: break yield item
[ "def", "_make_chunk_iter", "(", "stream", ",", "limit", ",", "buffer_size", ")", ":", "if", "isinstance", "(", "stream", ",", "(", "bytes", ",", "bytearray", ",", "text_type", ")", ")", ":", "raise", "TypeError", "(", "'Passed a string or byte object instead of '", "'true iterator or stream.'", ")", "if", "not", "hasattr", "(", "stream", ",", "'read'", ")", ":", "for", "item", "in", "stream", ":", "if", "item", ":", "yield", "item", "return", "if", "not", "isinstance", "(", "stream", ",", "LimitedStream", ")", "and", "limit", "is", "not", "None", ":", "stream", "=", "LimitedStream", "(", "stream", ",", "limit", ")", "_read", "=", "stream", ".", "read", "while", "1", ":", "item", "=", "_read", "(", "buffer_size", ")", "if", "not", "item", ":", "break", "yield", "item" ]
Helper for the line and chunk iter functions.
[ "Helper", "for", "the", "line", "and", "chunk", "iter", "functions", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/lib/werkzeug/wsgi.py#L745-L762
7,072
limodou/uliweb
uliweb/contrib/soap/__init__.py
soap
def soap(func=None, name=None, returns=None, args=None, doc=None, target='SOAP'): """ soap supports multiple SOAP function collections, it'll save functions to target dict, and you can give other target, but it should be keep up with SoapView.target definition. """ global __soap_functions__ returns = _fix_soap_kwargs(returns) args = _fix_soap_kwargs(args) if isinstance(func, str) and not name: return partial(soap, name=func, returns=returns, args=args, doc=doc, target=target) if not func: return partial(soap, name=name, returns=returns, args=args, doc=doc, target=target) target_functions = __soap_functions__.setdefault(target, {}) if inspect.isfunction(func): f_name = func.__name__ if name: f_name = name target_functions[f_name] = endpoint = ('.'.join([func.__module__, func.__name__]), returns, args, doc) func.soap_endpoint = (f_name, endpoint) elif inspect.isclass(func): if not name: name = func.__name__ for _name in dir(func): f = getattr(func, _name) if (inspect.ismethod(f) or inspect.isfunction(f)) and not _name.startswith('_'): f_name = name + '.' + f.__name__ endpoint = ('.'.join([func.__module__, func.__name__, _name]), returns, args, doc) if hasattr(f, 'soap_endpoint'): #the method has already been decorate by soap _n, _e = f.soap_endpoint target_functions[name + '.' + _n] = endpoint del target_functions[_n] else: target_functions[f_name] = endpoint else: raise Exception("Can't support this type [%r]" % func) return func
python
def soap(func=None, name=None, returns=None, args=None, doc=None, target='SOAP'): """ soap supports multiple SOAP function collections, it'll save functions to target dict, and you can give other target, but it should be keep up with SoapView.target definition. """ global __soap_functions__ returns = _fix_soap_kwargs(returns) args = _fix_soap_kwargs(args) if isinstance(func, str) and not name: return partial(soap, name=func, returns=returns, args=args, doc=doc, target=target) if not func: return partial(soap, name=name, returns=returns, args=args, doc=doc, target=target) target_functions = __soap_functions__.setdefault(target, {}) if inspect.isfunction(func): f_name = func.__name__ if name: f_name = name target_functions[f_name] = endpoint = ('.'.join([func.__module__, func.__name__]), returns, args, doc) func.soap_endpoint = (f_name, endpoint) elif inspect.isclass(func): if not name: name = func.__name__ for _name in dir(func): f = getattr(func, _name) if (inspect.ismethod(f) or inspect.isfunction(f)) and not _name.startswith('_'): f_name = name + '.' + f.__name__ endpoint = ('.'.join([func.__module__, func.__name__, _name]), returns, args, doc) if hasattr(f, 'soap_endpoint'): #the method has already been decorate by soap _n, _e = f.soap_endpoint target_functions[name + '.' + _n] = endpoint del target_functions[_n] else: target_functions[f_name] = endpoint else: raise Exception("Can't support this type [%r]" % func) return func
[ "def", "soap", "(", "func", "=", "None", ",", "name", "=", "None", ",", "returns", "=", "None", ",", "args", "=", "None", ",", "doc", "=", "None", ",", "target", "=", "'SOAP'", ")", ":", "global", "__soap_functions__", "returns", "=", "_fix_soap_kwargs", "(", "returns", ")", "args", "=", "_fix_soap_kwargs", "(", "args", ")", "if", "isinstance", "(", "func", ",", "str", ")", "and", "not", "name", ":", "return", "partial", "(", "soap", ",", "name", "=", "func", ",", "returns", "=", "returns", ",", "args", "=", "args", ",", "doc", "=", "doc", ",", "target", "=", "target", ")", "if", "not", "func", ":", "return", "partial", "(", "soap", ",", "name", "=", "name", ",", "returns", "=", "returns", ",", "args", "=", "args", ",", "doc", "=", "doc", ",", "target", "=", "target", ")", "target_functions", "=", "__soap_functions__", ".", "setdefault", "(", "target", ",", "{", "}", ")", "if", "inspect", ".", "isfunction", "(", "func", ")", ":", "f_name", "=", "func", ".", "__name__", "if", "name", ":", "f_name", "=", "name", "target_functions", "[", "f_name", "]", "=", "endpoint", "=", "(", "'.'", ".", "join", "(", "[", "func", ".", "__module__", ",", "func", ".", "__name__", "]", ")", ",", "returns", ",", "args", ",", "doc", ")", "func", ".", "soap_endpoint", "=", "(", "f_name", ",", "endpoint", ")", "elif", "inspect", ".", "isclass", "(", "func", ")", ":", "if", "not", "name", ":", "name", "=", "func", ".", "__name__", "for", "_name", "in", "dir", "(", "func", ")", ":", "f", "=", "getattr", "(", "func", ",", "_name", ")", "if", "(", "inspect", ".", "ismethod", "(", "f", ")", "or", "inspect", ".", "isfunction", "(", "f", ")", ")", "and", "not", "_name", ".", "startswith", "(", "'_'", ")", ":", "f_name", "=", "name", "+", "'.'", "+", "f", ".", "__name__", "endpoint", "=", "(", "'.'", ".", "join", "(", "[", "func", ".", "__module__", ",", "func", ".", "__name__", ",", "_name", "]", ")", ",", "returns", ",", "args", ",", "doc", ")", "if", "hasattr", "(", "f", ",", "'soap_endpoint'", ")", ":", "#the method has already been decorate by soap ", "_n", ",", "_e", "=", "f", ".", "soap_endpoint", "target_functions", "[", "name", "+", "'.'", "+", "_n", "]", "=", "endpoint", "del", "target_functions", "[", "_n", "]", "else", ":", "target_functions", "[", "f_name", "]", "=", "endpoint", "else", ":", "raise", "Exception", "(", "\"Can't support this type [%r]\"", "%", "func", ")", "return", "func" ]
soap supports multiple SOAP function collections, it'll save functions to target dict, and you can give other target, but it should be keep up with SoapView.target definition.
[ "soap", "supports", "multiple", "SOAP", "function", "collections", "it", "ll", "save", "functions", "to", "target", "dict", "and", "you", "can", "give", "other", "target", "but", "it", "should", "be", "keep", "up", "with", "SoapView", ".", "target", "definition", "." ]
34472f25e4bc0b954a35346672f94e84ef18b076
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/soap/__init__.py#L27-L68
7,073
adamhajari/spyre
spyre/server.py
App.getJsonData
def getJsonData(self, params): """turns the DataFrame returned by getData into a dictionary arguments: the params passed used for table or d3 outputs are forwarded on to getData """ try: return eval("self." + str(params['output_id']) + "(params)") except AttributeError: df = self.getData(params) if df is None: return None return df.to_dict(orient='records')
python
def getJsonData(self, params): """turns the DataFrame returned by getData into a dictionary arguments: the params passed used for table or d3 outputs are forwarded on to getData """ try: return eval("self." + str(params['output_id']) + "(params)") except AttributeError: df = self.getData(params) if df is None: return None return df.to_dict(orient='records')
[ "def", "getJsonData", "(", "self", ",", "params", ")", ":", "try", ":", "return", "eval", "(", "\"self.\"", "+", "str", "(", "params", "[", "'output_id'", "]", ")", "+", "\"(params)\"", ")", "except", "AttributeError", ":", "df", "=", "self", ".", "getData", "(", "params", ")", "if", "df", "is", "None", ":", "return", "None", "return", "df", ".", "to_dict", "(", "orient", "=", "'records'", ")" ]
turns the DataFrame returned by getData into a dictionary arguments: the params passed used for table or d3 outputs are forwarded on to getData
[ "turns", "the", "DataFrame", "returned", "by", "getData", "into", "a", "dictionary" ]
5dd9f6de072e99af636ab7e7393d249761c56e69
https://github.com/adamhajari/spyre/blob/5dd9f6de072e99af636ab7e7393d249761c56e69/spyre/server.py#L330-L342
7,074
adamhajari/spyre
spyre/server.py
App.launch_in_notebook
def launch_in_notebook(self, port=9095, width=900, height=600): """launch the app within an iframe in ipython notebook""" from IPython.lib import backgroundjobs as bg from IPython.display import HTML jobs = bg.BackgroundJobManager() jobs.new(self.launch, kw=dict(port=port)) frame = HTML( '<iframe src=http://localhost:{} width={} height={}></iframe>' .format(port, width, height) ) return frame
python
def launch_in_notebook(self, port=9095, width=900, height=600): """launch the app within an iframe in ipython notebook""" from IPython.lib import backgroundjobs as bg from IPython.display import HTML jobs = bg.BackgroundJobManager() jobs.new(self.launch, kw=dict(port=port)) frame = HTML( '<iframe src=http://localhost:{} width={} height={}></iframe>' .format(port, width, height) ) return frame
[ "def", "launch_in_notebook", "(", "self", ",", "port", "=", "9095", ",", "width", "=", "900", ",", "height", "=", "600", ")", ":", "from", "IPython", ".", "lib", "import", "backgroundjobs", "as", "bg", "from", "IPython", ".", "display", "import", "HTML", "jobs", "=", "bg", ".", "BackgroundJobManager", "(", ")", "jobs", ".", "new", "(", "self", ".", "launch", ",", "kw", "=", "dict", "(", "port", "=", "port", ")", ")", "frame", "=", "HTML", "(", "'<iframe src=http://localhost:{} width={} height={}></iframe>'", ".", "format", "(", "port", ",", "width", ",", "height", ")", ")", "return", "frame" ]
launch the app within an iframe in ipython notebook
[ "launch", "the", "app", "within", "an", "iframe", "in", "ipython", "notebook" ]
5dd9f6de072e99af636ab7e7393d249761c56e69
https://github.com/adamhajari/spyre/blob/5dd9f6de072e99af636ab7e7393d249761c56e69/spyre/server.py#L469-L480
7,075
adamhajari/spyre
spyre/server.py
Site.launch
def launch(self, host="local", port=8080): """Calling the Launch method on a Site object will serve the top node of the cherrypy Root object tree""" # Need to add in the appbar if many apps self.root.templateVars['app_bar'] = self.site_app_bar for fullRoute, _ in self.site_app_bar[1:]: parent, route = self.get_route(fullRoute) parent.__dict__[route].templateVars['app_bar'] = self.site_app_bar if host != "local": cherrypy.server.socket_host = '0.0.0.0' cherrypy.server.socket_port = port cherrypy.quickstart(self.root)
python
def launch(self, host="local", port=8080): """Calling the Launch method on a Site object will serve the top node of the cherrypy Root object tree""" # Need to add in the appbar if many apps self.root.templateVars['app_bar'] = self.site_app_bar for fullRoute, _ in self.site_app_bar[1:]: parent, route = self.get_route(fullRoute) parent.__dict__[route].templateVars['app_bar'] = self.site_app_bar if host != "local": cherrypy.server.socket_host = '0.0.0.0' cherrypy.server.socket_port = port cherrypy.quickstart(self.root)
[ "def", "launch", "(", "self", ",", "host", "=", "\"local\"", ",", "port", "=", "8080", ")", ":", "# Need to add in the appbar if many apps", "self", ".", "root", ".", "templateVars", "[", "'app_bar'", "]", "=", "self", ".", "site_app_bar", "for", "fullRoute", ",", "_", "in", "self", ".", "site_app_bar", "[", "1", ":", "]", ":", "parent", ",", "route", "=", "self", ".", "get_route", "(", "fullRoute", ")", "parent", ".", "__dict__", "[", "route", "]", ".", "templateVars", "[", "'app_bar'", "]", "=", "self", ".", "site_app_bar", "if", "host", "!=", "\"local\"", ":", "cherrypy", ".", "server", ".", "socket_host", "=", "'0.0.0.0'", "cherrypy", ".", "server", ".", "socket_port", "=", "port", "cherrypy", ".", "quickstart", "(", "self", ".", "root", ")" ]
Calling the Launch method on a Site object will serve the top node of the cherrypy Root object tree
[ "Calling", "the", "Launch", "method", "on", "a", "Site", "object", "will", "serve", "the", "top", "node", "of", "the", "cherrypy", "Root", "object", "tree" ]
5dd9f6de072e99af636ab7e7393d249761c56e69
https://github.com/adamhajari/spyre/blob/5dd9f6de072e99af636ab7e7393d249761c56e69/spyre/server.py#L552-L565
7,076
googlefonts/fontmake
Lib/fontmake/__main__.py
exclude_args
def exclude_args(parser, args, excluded_args, target): """Delete options that are not appropriate for a following code path; exit with an error if excluded options were passed in by the user. argparse generates a namespace with all options it knows, but not every attribute should be passed to all code paths (i.e. options about interpolation should not reach `run_from_ufos()`). This function can be run before entering a particular code path to clean up the kwargs passed to it. Exit with an error message if the user actually passed the options in. """ msg = '"%s" option invalid for %s' for argname in excluded_args: if argname not in args: continue if args[argname]: optname = "--%s" % argname.replace("_", "-") parser.error(msg % (optname, target)) del args[argname]
python
def exclude_args(parser, args, excluded_args, target): """Delete options that are not appropriate for a following code path; exit with an error if excluded options were passed in by the user. argparse generates a namespace with all options it knows, but not every attribute should be passed to all code paths (i.e. options about interpolation should not reach `run_from_ufos()`). This function can be run before entering a particular code path to clean up the kwargs passed to it. Exit with an error message if the user actually passed the options in. """ msg = '"%s" option invalid for %s' for argname in excluded_args: if argname not in args: continue if args[argname]: optname = "--%s" % argname.replace("_", "-") parser.error(msg % (optname, target)) del args[argname]
[ "def", "exclude_args", "(", "parser", ",", "args", ",", "excluded_args", ",", "target", ")", ":", "msg", "=", "'\"%s\" option invalid for %s'", "for", "argname", "in", "excluded_args", ":", "if", "argname", "not", "in", "args", ":", "continue", "if", "args", "[", "argname", "]", ":", "optname", "=", "\"--%s\"", "%", "argname", ".", "replace", "(", "\"_\"", ",", "\"-\"", ")", "parser", ".", "error", "(", "msg", "%", "(", "optname", ",", "target", ")", ")", "del", "args", "[", "argname", "]" ]
Delete options that are not appropriate for a following code path; exit with an error if excluded options were passed in by the user. argparse generates a namespace with all options it knows, but not every attribute should be passed to all code paths (i.e. options about interpolation should not reach `run_from_ufos()`). This function can be run before entering a particular code path to clean up the kwargs passed to it. Exit with an error message if the user actually passed the options in.
[ "Delete", "options", "that", "are", "not", "appropriate", "for", "a", "following", "code", "path", ";", "exit", "with", "an", "error", "if", "excluded", "options", "were", "passed", "in", "by", "the", "user", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/__main__.py#L42-L60
7,077
googlefonts/fontmake
Lib/fontmake/font_project.py
_varLib_finder
def _varLib_finder(source, directory="", ext="ttf"): """Finder function to be used with varLib.build to find master TTFs given the filename of the source UFO master as specified in the designspace. It replaces the UFO directory with the one specified in 'directory' argument, and replaces the file extension with 'ext'. """ fname = os.path.splitext(os.path.basename(source))[0] + "." + ext return os.path.join(directory, fname)
python
def _varLib_finder(source, directory="", ext="ttf"): """Finder function to be used with varLib.build to find master TTFs given the filename of the source UFO master as specified in the designspace. It replaces the UFO directory with the one specified in 'directory' argument, and replaces the file extension with 'ext'. """ fname = os.path.splitext(os.path.basename(source))[0] + "." + ext return os.path.join(directory, fname)
[ "def", "_varLib_finder", "(", "source", ",", "directory", "=", "\"\"", ",", "ext", "=", "\"ttf\"", ")", ":", "fname", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "source", ")", ")", "[", "0", "]", "+", "\".\"", "+", "ext", "return", "os", ".", "path", ".", "join", "(", "directory", ",", "fname", ")" ]
Finder function to be used with varLib.build to find master TTFs given the filename of the source UFO master as specified in the designspace. It replaces the UFO directory with the one specified in 'directory' argument, and replaces the file extension with 'ext'.
[ "Finder", "function", "to", "be", "used", "with", "varLib", ".", "build", "to", "find", "master", "TTFs", "given", "the", "filename", "of", "the", "source", "UFO", "master", "as", "specified", "in", "the", "designspace", ".", "It", "replaces", "the", "UFO", "directory", "with", "the", "one", "specified", "in", "directory", "argument", "and", "replaces", "the", "file", "extension", "with", "ext", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L1158-L1165
7,078
googlefonts/fontmake
Lib/fontmake/font_project.py
FontProject.build_master_ufos
def build_master_ufos( self, glyphs_path, designspace_path=None, master_dir=None, instance_dir=None, family_name=None, mti_source=None, ): """Build UFOs and MutatorMath designspace from Glyphs source.""" import glyphsLib if master_dir is None: master_dir = self._output_dir("ufo") if not os.path.isdir(master_dir): os.mkdir(master_dir) if instance_dir is None: instance_dir = self._output_dir("ufo", is_instance=True) if not os.path.isdir(instance_dir): os.mkdir(instance_dir) font = glyphsLib.GSFont(glyphs_path) if designspace_path is not None: designspace_dir = os.path.dirname(designspace_path) else: designspace_dir = master_dir # glyphsLib.to_designspace expects instance_dir to be relative instance_dir = os.path.relpath(instance_dir, designspace_dir) designspace = glyphsLib.to_designspace( font, family_name=family_name, instance_dir=instance_dir ) masters = {} # multiple sources can have the same font/filename (but different layer), # we want to save a font only once for source in designspace.sources: if source.filename in masters: assert source.font is masters[source.filename] continue ufo_path = os.path.join(master_dir, source.filename) # no need to also set the relative 'filename' attribute as that # will be auto-updated on writing the designspace document source.path = ufo_path source.font.save(ufo_path) masters[source.filename] = source.font if designspace_path is None: designspace_path = os.path.join(master_dir, designspace.filename) designspace.write(designspace_path) if mti_source: self.add_mti_features_to_master_ufos(mti_source, masters.values()) return designspace_path
python
def build_master_ufos( self, glyphs_path, designspace_path=None, master_dir=None, instance_dir=None, family_name=None, mti_source=None, ): """Build UFOs and MutatorMath designspace from Glyphs source.""" import glyphsLib if master_dir is None: master_dir = self._output_dir("ufo") if not os.path.isdir(master_dir): os.mkdir(master_dir) if instance_dir is None: instance_dir = self._output_dir("ufo", is_instance=True) if not os.path.isdir(instance_dir): os.mkdir(instance_dir) font = glyphsLib.GSFont(glyphs_path) if designspace_path is not None: designspace_dir = os.path.dirname(designspace_path) else: designspace_dir = master_dir # glyphsLib.to_designspace expects instance_dir to be relative instance_dir = os.path.relpath(instance_dir, designspace_dir) designspace = glyphsLib.to_designspace( font, family_name=family_name, instance_dir=instance_dir ) masters = {} # multiple sources can have the same font/filename (but different layer), # we want to save a font only once for source in designspace.sources: if source.filename in masters: assert source.font is masters[source.filename] continue ufo_path = os.path.join(master_dir, source.filename) # no need to also set the relative 'filename' attribute as that # will be auto-updated on writing the designspace document source.path = ufo_path source.font.save(ufo_path) masters[source.filename] = source.font if designspace_path is None: designspace_path = os.path.join(master_dir, designspace.filename) designspace.write(designspace_path) if mti_source: self.add_mti_features_to_master_ufos(mti_source, masters.values()) return designspace_path
[ "def", "build_master_ufos", "(", "self", ",", "glyphs_path", ",", "designspace_path", "=", "None", ",", "master_dir", "=", "None", ",", "instance_dir", "=", "None", ",", "family_name", "=", "None", ",", "mti_source", "=", "None", ",", ")", ":", "import", "glyphsLib", "if", "master_dir", "is", "None", ":", "master_dir", "=", "self", ".", "_output_dir", "(", "\"ufo\"", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "master_dir", ")", ":", "os", ".", "mkdir", "(", "master_dir", ")", "if", "instance_dir", "is", "None", ":", "instance_dir", "=", "self", ".", "_output_dir", "(", "\"ufo\"", ",", "is_instance", "=", "True", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "instance_dir", ")", ":", "os", ".", "mkdir", "(", "instance_dir", ")", "font", "=", "glyphsLib", ".", "GSFont", "(", "glyphs_path", ")", "if", "designspace_path", "is", "not", "None", ":", "designspace_dir", "=", "os", ".", "path", ".", "dirname", "(", "designspace_path", ")", "else", ":", "designspace_dir", "=", "master_dir", "# glyphsLib.to_designspace expects instance_dir to be relative", "instance_dir", "=", "os", ".", "path", ".", "relpath", "(", "instance_dir", ",", "designspace_dir", ")", "designspace", "=", "glyphsLib", ".", "to_designspace", "(", "font", ",", "family_name", "=", "family_name", ",", "instance_dir", "=", "instance_dir", ")", "masters", "=", "{", "}", "# multiple sources can have the same font/filename (but different layer),", "# we want to save a font only once", "for", "source", "in", "designspace", ".", "sources", ":", "if", "source", ".", "filename", "in", "masters", ":", "assert", "source", ".", "font", "is", "masters", "[", "source", ".", "filename", "]", "continue", "ufo_path", "=", "os", ".", "path", ".", "join", "(", "master_dir", ",", "source", ".", "filename", ")", "# no need to also set the relative 'filename' attribute as that", "# will be auto-updated on writing the designspace document", "source", ".", "path", "=", "ufo_path", "source", ".", "font", ".", "save", "(", "ufo_path", ")", "masters", "[", "source", ".", "filename", "]", "=", "source", ".", "font", "if", "designspace_path", "is", "None", ":", "designspace_path", "=", "os", ".", "path", ".", "join", "(", "master_dir", ",", "designspace", ".", "filename", ")", "designspace", ".", "write", "(", "designspace_path", ")", "if", "mti_source", ":", "self", ".", "add_mti_features_to_master_ufos", "(", "mti_source", ",", "masters", ".", "values", "(", ")", ")", "return", "designspace_path" ]
Build UFOs and MutatorMath designspace from Glyphs source.
[ "Build", "UFOs", "and", "MutatorMath", "designspace", "from", "Glyphs", "source", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L110-L163
7,079
googlefonts/fontmake
Lib/fontmake/font_project.py
FontProject.remove_overlaps
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)): """Remove overlaps in UFOs' glyphs' contours.""" from booleanOperations import union, BooleanOperationsError for ufo in ufos: font_name = self._font_name(ufo) logger.info("Removing overlaps for " + font_name) for glyph in ufo: if not glyph_filter(glyph): continue contours = list(glyph) glyph.clearContours() try: union(contours, glyph.getPointPen()) except BooleanOperationsError: logger.error( "Failed to remove overlaps for %s: %r", font_name, glyph.name ) raise
python
def remove_overlaps(self, ufos, glyph_filter=lambda g: len(g)): """Remove overlaps in UFOs' glyphs' contours.""" from booleanOperations import union, BooleanOperationsError for ufo in ufos: font_name = self._font_name(ufo) logger.info("Removing overlaps for " + font_name) for glyph in ufo: if not glyph_filter(glyph): continue contours = list(glyph) glyph.clearContours() try: union(contours, glyph.getPointPen()) except BooleanOperationsError: logger.error( "Failed to remove overlaps for %s: %r", font_name, glyph.name ) raise
[ "def", "remove_overlaps", "(", "self", ",", "ufos", ",", "glyph_filter", "=", "lambda", "g", ":", "len", "(", "g", ")", ")", ":", "from", "booleanOperations", "import", "union", ",", "BooleanOperationsError", "for", "ufo", "in", "ufos", ":", "font_name", "=", "self", ".", "_font_name", "(", "ufo", ")", "logger", ".", "info", "(", "\"Removing overlaps for \"", "+", "font_name", ")", "for", "glyph", "in", "ufo", ":", "if", "not", "glyph_filter", "(", "glyph", ")", ":", "continue", "contours", "=", "list", "(", "glyph", ")", "glyph", ".", "clearContours", "(", ")", "try", ":", "union", "(", "contours", ",", "glyph", ".", "getPointPen", "(", ")", ")", "except", "BooleanOperationsError", ":", "logger", ".", "error", "(", "\"Failed to remove overlaps for %s: %r\"", ",", "font_name", ",", "glyph", ".", "name", ")", "raise" ]
Remove overlaps in UFOs' glyphs' contours.
[ "Remove", "overlaps", "in", "UFOs", "glyphs", "contours", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L187-L205
7,080
googlefonts/fontmake
Lib/fontmake/font_project.py
FontProject.decompose_glyphs
def decompose_glyphs(self, ufos, glyph_filter=lambda g: True): """Move components of UFOs' glyphs to their outlines.""" for ufo in ufos: logger.info("Decomposing glyphs for " + self._font_name(ufo)) for glyph in ufo: if not glyph.components or not glyph_filter(glyph): continue self._deep_copy_contours(ufo, glyph, glyph, Transform()) glyph.clearComponents()
python
def decompose_glyphs(self, ufos, glyph_filter=lambda g: True): """Move components of UFOs' glyphs to their outlines.""" for ufo in ufos: logger.info("Decomposing glyphs for " + self._font_name(ufo)) for glyph in ufo: if not glyph.components or not glyph_filter(glyph): continue self._deep_copy_contours(ufo, glyph, glyph, Transform()) glyph.clearComponents()
[ "def", "decompose_glyphs", "(", "self", ",", "ufos", ",", "glyph_filter", "=", "lambda", "g", ":", "True", ")", ":", "for", "ufo", "in", "ufos", ":", "logger", ".", "info", "(", "\"Decomposing glyphs for \"", "+", "self", ".", "_font_name", "(", "ufo", ")", ")", "for", "glyph", "in", "ufo", ":", "if", "not", "glyph", ".", "components", "or", "not", "glyph_filter", "(", "glyph", ")", ":", "continue", "self", ".", "_deep_copy_contours", "(", "ufo", ",", "glyph", ",", "glyph", ",", "Transform", "(", ")", ")", "glyph", ".", "clearComponents", "(", ")" ]
Move components of UFOs' glyphs to their outlines.
[ "Move", "components", "of", "UFOs", "glyphs", "to", "their", "outlines", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L209-L218
7,081
googlefonts/fontmake
Lib/fontmake/font_project.py
FontProject.build_ttfs
def build_ttfs(self, ufos, **kwargs): """Build OpenType binaries with TrueType outlines.""" self.save_otfs(ufos, ttf=True, **kwargs)
python
def build_ttfs(self, ufos, **kwargs): """Build OpenType binaries with TrueType outlines.""" self.save_otfs(ufos, ttf=True, **kwargs)
[ "def", "build_ttfs", "(", "self", ",", "ufos", ",", "*", "*", "kwargs", ")", ":", "self", ".", "save_otfs", "(", "ufos", ",", "ttf", "=", "True", ",", "*", "*", "kwargs", ")" ]
Build OpenType binaries with TrueType outlines.
[ "Build", "OpenType", "binaries", "with", "TrueType", "outlines", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L271-L273
7,082
googlefonts/fontmake
Lib/fontmake/font_project.py
FontProject.build_variable_font
def build_variable_font( self, designspace, output_path=None, output_dir=None, master_bin_dir=None, ttf=True, ): """Build OpenType variable font from masters in a designspace.""" assert not (output_path and output_dir), "mutually exclusive args" ext = "ttf" if ttf else "otf" if hasattr(designspace, "__fspath__"): designspace = designspace.__fspath__() if isinstance(designspace, basestring): designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace) if master_bin_dir is None: master_bin_dir = self._output_dir(ext, interpolatable=True) finder = partial(_varLib_finder, directory=master_bin_dir) else: assert all(isinstance(s.font, TTFont) for s in designspace.sources) finder = lambda s: s # noqa: E731 if output_path is None: output_path = ( os.path.splitext(os.path.basename(designspace.path))[0] + "-VF" ) output_path = self._output_path( output_path, ext, is_variable=True, output_dir=output_dir ) logger.info("Building variable font " + output_path) font, _, _ = varLib.build(designspace, finder) font.save(output_path)
python
def build_variable_font( self, designspace, output_path=None, output_dir=None, master_bin_dir=None, ttf=True, ): """Build OpenType variable font from masters in a designspace.""" assert not (output_path and output_dir), "mutually exclusive args" ext = "ttf" if ttf else "otf" if hasattr(designspace, "__fspath__"): designspace = designspace.__fspath__() if isinstance(designspace, basestring): designspace = designspaceLib.DesignSpaceDocument.fromfile(designspace) if master_bin_dir is None: master_bin_dir = self._output_dir(ext, interpolatable=True) finder = partial(_varLib_finder, directory=master_bin_dir) else: assert all(isinstance(s.font, TTFont) for s in designspace.sources) finder = lambda s: s # noqa: E731 if output_path is None: output_path = ( os.path.splitext(os.path.basename(designspace.path))[0] + "-VF" ) output_path = self._output_path( output_path, ext, is_variable=True, output_dir=output_dir ) logger.info("Building variable font " + output_path) font, _, _ = varLib.build(designspace, finder) font.save(output_path)
[ "def", "build_variable_font", "(", "self", ",", "designspace", ",", "output_path", "=", "None", ",", "output_dir", "=", "None", ",", "master_bin_dir", "=", "None", ",", "ttf", "=", "True", ",", ")", ":", "assert", "not", "(", "output_path", "and", "output_dir", ")", ",", "\"mutually exclusive args\"", "ext", "=", "\"ttf\"", "if", "ttf", "else", "\"otf\"", "if", "hasattr", "(", "designspace", ",", "\"__fspath__\"", ")", ":", "designspace", "=", "designspace", ".", "__fspath__", "(", ")", "if", "isinstance", "(", "designspace", ",", "basestring", ")", ":", "designspace", "=", "designspaceLib", ".", "DesignSpaceDocument", ".", "fromfile", "(", "designspace", ")", "if", "master_bin_dir", "is", "None", ":", "master_bin_dir", "=", "self", ".", "_output_dir", "(", "ext", ",", "interpolatable", "=", "True", ")", "finder", "=", "partial", "(", "_varLib_finder", ",", "directory", "=", "master_bin_dir", ")", "else", ":", "assert", "all", "(", "isinstance", "(", "s", ".", "font", ",", "TTFont", ")", "for", "s", "in", "designspace", ".", "sources", ")", "finder", "=", "lambda", "s", ":", "s", "# noqa: E731", "if", "output_path", "is", "None", ":", "output_path", "=", "(", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "designspace", ".", "path", ")", ")", "[", "0", "]", "+", "\"-VF\"", ")", "output_path", "=", "self", ".", "_output_path", "(", "output_path", ",", "ext", ",", "is_variable", "=", "True", ",", "output_dir", "=", "output_dir", ")", "logger", ".", "info", "(", "\"Building variable font \"", "+", "output_path", ")", "font", ",", "_", ",", "_", "=", "varLib", ".", "build", "(", "designspace", ",", "finder", ")", "font", ".", "save", "(", "output_path", ")" ]
Build OpenType variable font from masters in a designspace.
[ "Build", "OpenType", "variable", "font", "from", "masters", "in", "a", "designspace", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L341-L377
7,083
googlefonts/fontmake
Lib/fontmake/font_project.py
FontProject.subset_otf_from_ufo
def subset_otf_from_ufo(self, otf_path, ufo): """Subset a font using export flags set by glyphsLib. There are two more settings that can change export behavior: "Export Glyphs" and "Remove Glyphs", which are currently not supported for complexity reasons. See https://github.com/googlei18n/glyphsLib/issues/295. """ from fontTools import subset # ufo2ft always inserts a ".notdef" glyph as the first glyph ufo_order = makeOfficialGlyphOrder(ufo) if ".notdef" not in ufo_order: ufo_order.insert(0, ".notdef") ot_order = TTFont(otf_path).getGlyphOrder() assert ot_order[0] == ".notdef" assert len(ufo_order) == len(ot_order) for key in (KEEP_GLYPHS_NEW_KEY, KEEP_GLYPHS_OLD_KEY): keep_glyphs_list = ufo.lib.get(key) if keep_glyphs_list is not None: keep_glyphs = set(keep_glyphs_list) break else: keep_glyphs = None include = [] for source_name, binary_name in zip(ufo_order, ot_order): if keep_glyphs and source_name not in keep_glyphs: continue if source_name in ufo: exported = ufo[source_name].lib.get(GLYPH_EXPORT_KEY, True) if not exported: continue include.append(binary_name) # copied from nototools.subset opt = subset.Options() opt.name_IDs = ["*"] opt.name_legacy = True opt.name_languages = ["*"] opt.layout_features = ["*"] opt.notdef_outline = True opt.recalc_bounds = True opt.recalc_timestamp = True opt.canonical_order = True opt.glyph_names = True font = subset.load_font(otf_path, opt, lazy=False) subsetter = subset.Subsetter(options=opt) subsetter.populate(glyphs=include) subsetter.subset(font) subset.save_font(font, otf_path, opt)
python
def subset_otf_from_ufo(self, otf_path, ufo): """Subset a font using export flags set by glyphsLib. There are two more settings that can change export behavior: "Export Glyphs" and "Remove Glyphs", which are currently not supported for complexity reasons. See https://github.com/googlei18n/glyphsLib/issues/295. """ from fontTools import subset # ufo2ft always inserts a ".notdef" glyph as the first glyph ufo_order = makeOfficialGlyphOrder(ufo) if ".notdef" not in ufo_order: ufo_order.insert(0, ".notdef") ot_order = TTFont(otf_path).getGlyphOrder() assert ot_order[0] == ".notdef" assert len(ufo_order) == len(ot_order) for key in (KEEP_GLYPHS_NEW_KEY, KEEP_GLYPHS_OLD_KEY): keep_glyphs_list = ufo.lib.get(key) if keep_glyphs_list is not None: keep_glyphs = set(keep_glyphs_list) break else: keep_glyphs = None include = [] for source_name, binary_name in zip(ufo_order, ot_order): if keep_glyphs and source_name not in keep_glyphs: continue if source_name in ufo: exported = ufo[source_name].lib.get(GLYPH_EXPORT_KEY, True) if not exported: continue include.append(binary_name) # copied from nototools.subset opt = subset.Options() opt.name_IDs = ["*"] opt.name_legacy = True opt.name_languages = ["*"] opt.layout_features = ["*"] opt.notdef_outline = True opt.recalc_bounds = True opt.recalc_timestamp = True opt.canonical_order = True opt.glyph_names = True font = subset.load_font(otf_path, opt, lazy=False) subsetter = subset.Subsetter(options=opt) subsetter.populate(glyphs=include) subsetter.subset(font) subset.save_font(font, otf_path, opt)
[ "def", "subset_otf_from_ufo", "(", "self", ",", "otf_path", ",", "ufo", ")", ":", "from", "fontTools", "import", "subset", "# ufo2ft always inserts a \".notdef\" glyph as the first glyph", "ufo_order", "=", "makeOfficialGlyphOrder", "(", "ufo", ")", "if", "\".notdef\"", "not", "in", "ufo_order", ":", "ufo_order", ".", "insert", "(", "0", ",", "\".notdef\"", ")", "ot_order", "=", "TTFont", "(", "otf_path", ")", ".", "getGlyphOrder", "(", ")", "assert", "ot_order", "[", "0", "]", "==", "\".notdef\"", "assert", "len", "(", "ufo_order", ")", "==", "len", "(", "ot_order", ")", "for", "key", "in", "(", "KEEP_GLYPHS_NEW_KEY", ",", "KEEP_GLYPHS_OLD_KEY", ")", ":", "keep_glyphs_list", "=", "ufo", ".", "lib", ".", "get", "(", "key", ")", "if", "keep_glyphs_list", "is", "not", "None", ":", "keep_glyphs", "=", "set", "(", "keep_glyphs_list", ")", "break", "else", ":", "keep_glyphs", "=", "None", "include", "=", "[", "]", "for", "source_name", ",", "binary_name", "in", "zip", "(", "ufo_order", ",", "ot_order", ")", ":", "if", "keep_glyphs", "and", "source_name", "not", "in", "keep_glyphs", ":", "continue", "if", "source_name", "in", "ufo", ":", "exported", "=", "ufo", "[", "source_name", "]", ".", "lib", ".", "get", "(", "GLYPH_EXPORT_KEY", ",", "True", ")", "if", "not", "exported", ":", "continue", "include", ".", "append", "(", "binary_name", ")", "# copied from nototools.subset", "opt", "=", "subset", ".", "Options", "(", ")", "opt", ".", "name_IDs", "=", "[", "\"*\"", "]", "opt", ".", "name_legacy", "=", "True", "opt", ".", "name_languages", "=", "[", "\"*\"", "]", "opt", ".", "layout_features", "=", "[", "\"*\"", "]", "opt", ".", "notdef_outline", "=", "True", "opt", ".", "recalc_bounds", "=", "True", "opt", ".", "recalc_timestamp", "=", "True", "opt", ".", "canonical_order", "=", "True", "opt", ".", "glyph_names", "=", "True", "font", "=", "subset", ".", "load_font", "(", "otf_path", ",", "opt", ",", "lazy", "=", "False", ")", "subsetter", "=", "subset", ".", "Subsetter", "(", "options", "=", "opt", ")", "subsetter", ".", "populate", "(", "glyphs", "=", "include", ")", "subsetter", ".", "subset", "(", "font", ")", "subset", ".", "save_font", "(", "font", ",", "otf_path", ",", "opt", ")" ]
Subset a font using export flags set by glyphsLib. There are two more settings that can change export behavior: "Export Glyphs" and "Remove Glyphs", which are currently not supported for complexity reasons. See https://github.com/googlei18n/glyphsLib/issues/295.
[ "Subset", "a", "font", "using", "export", "flags", "set", "by", "glyphsLib", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L625-L680
7,084
googlefonts/fontmake
Lib/fontmake/font_project.py
FontProject.run_from_glyphs
def run_from_glyphs( self, glyphs_path, designspace_path=None, master_dir=None, instance_dir=None, family_name=None, mti_source=None, **kwargs ): """Run toolchain from Glyphs source. Args: glyphs_path: Path to source file. designspace_path: Output path of generated designspace document. By default it's "<family_name>[-<base_style>].designspace". master_dir: Directory where to save UFO masters (default: "master_ufo"). instance_dir: Directory where to save UFO instances (default: "instance_ufo"). family_name: If provided, uses this family name in the output. mti_source: Path to property list file containing a dictionary mapping UFO masters to dictionaries mapping layout table tags to MTI source paths which should be compiled into those tables. kwargs: Arguments passed along to run_from_designspace. """ logger.info("Building master UFOs and designspace from Glyphs source") designspace_path = self.build_master_ufos( glyphs_path, designspace_path=designspace_path, master_dir=master_dir, instance_dir=instance_dir, family_name=family_name, mti_source=mti_source, ) self.run_from_designspace(designspace_path, **kwargs)
python
def run_from_glyphs( self, glyphs_path, designspace_path=None, master_dir=None, instance_dir=None, family_name=None, mti_source=None, **kwargs ): """Run toolchain from Glyphs source. Args: glyphs_path: Path to source file. designspace_path: Output path of generated designspace document. By default it's "<family_name>[-<base_style>].designspace". master_dir: Directory where to save UFO masters (default: "master_ufo"). instance_dir: Directory where to save UFO instances (default: "instance_ufo"). family_name: If provided, uses this family name in the output. mti_source: Path to property list file containing a dictionary mapping UFO masters to dictionaries mapping layout table tags to MTI source paths which should be compiled into those tables. kwargs: Arguments passed along to run_from_designspace. """ logger.info("Building master UFOs and designspace from Glyphs source") designspace_path = self.build_master_ufos( glyphs_path, designspace_path=designspace_path, master_dir=master_dir, instance_dir=instance_dir, family_name=family_name, mti_source=mti_source, ) self.run_from_designspace(designspace_path, **kwargs)
[ "def", "run_from_glyphs", "(", "self", ",", "glyphs_path", ",", "designspace_path", "=", "None", ",", "master_dir", "=", "None", ",", "instance_dir", "=", "None", ",", "family_name", "=", "None", ",", "mti_source", "=", "None", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "info", "(", "\"Building master UFOs and designspace from Glyphs source\"", ")", "designspace_path", "=", "self", ".", "build_master_ufos", "(", "glyphs_path", ",", "designspace_path", "=", "designspace_path", ",", "master_dir", "=", "master_dir", ",", "instance_dir", "=", "instance_dir", ",", "family_name", "=", "family_name", ",", "mti_source", "=", "mti_source", ",", ")", "self", ".", "run_from_designspace", "(", "designspace_path", ",", "*", "*", "kwargs", ")" ]
Run toolchain from Glyphs source. Args: glyphs_path: Path to source file. designspace_path: Output path of generated designspace document. By default it's "<family_name>[-<base_style>].designspace". master_dir: Directory where to save UFO masters (default: "master_ufo"). instance_dir: Directory where to save UFO instances (default: "instance_ufo"). family_name: If provided, uses this family name in the output. mti_source: Path to property list file containing a dictionary mapping UFO masters to dictionaries mapping layout table tags to MTI source paths which should be compiled into those tables. kwargs: Arguments passed along to run_from_designspace.
[ "Run", "toolchain", "from", "Glyphs", "source", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L682-L719
7,085
googlefonts/fontmake
Lib/fontmake/font_project.py
FontProject.interpolate_instance_ufos
def interpolate_instance_ufos( self, designspace, include=None, round_instances=False, expand_features_to_instances=False, ): """Interpolate master UFOs with MutatorMath and return instance UFOs. Args: designspace: a DesignSpaceDocument object containing sources and instances. include (str): optional regular expression pattern to match the DS instance 'name' attribute and only interpolate the matching instances. round_instances (bool): round instances' coordinates to integer. expand_features_to_instances: parses the master feature file, expands all include()s and writes the resulting full feature file to all instance UFOs. Use this if you share feature files among masters in external files. Otherwise, the relative include paths can break as instances may end up elsewhere. Only done on interpolation. Returns: list of defcon.Font objects corresponding to the UFO instances. Raises: FontmakeError: if any of the sources defines a custom 'layer', for this is not supported by MutatorMath. ValueError: "expand_features_to_instances" is True but no source in the designspace document is designated with '<features copy="1"/>'. """ from glyphsLib.interpolation import apply_instance_data from mutatorMath.ufo.document import DesignSpaceDocumentReader if any(source.layerName is not None for source in designspace.sources): raise FontmakeError( "MutatorMath doesn't support DesignSpace sources with 'layer' " "attribute" ) # TODO: replace mutatorMath with ufoProcessor? builder = DesignSpaceDocumentReader( designspace.path, ufoVersion=3, roundGeometry=round_instances, verbose=True ) logger.info("Interpolating master UFOs from designspace") if include is not None: instances = self._search_instances(designspace, pattern=include) for instance_name in instances: builder.readInstance(("name", instance_name)) filenames = set(instances.values()) else: builder.readInstances() filenames = None # will include all instances logger.info("Applying instance data from designspace") instance_ufos = apply_instance_data(designspace, include_filenames=filenames) if expand_features_to_instances: logger.debug("Expanding features to instance UFOs") master_source = next( (s for s in designspace.sources if s.copyFeatures), None ) if not master_source: raise ValueError("No source is designated as the master for features.") else: master_source_font = builder.sources[master_source.name][0] master_source_features = parseLayoutFeatures(master_source_font).asFea() for instance_ufo in instance_ufos: instance_ufo.features.text = master_source_features instance_ufo.save() return instance_ufos
python
def interpolate_instance_ufos( self, designspace, include=None, round_instances=False, expand_features_to_instances=False, ): """Interpolate master UFOs with MutatorMath and return instance UFOs. Args: designspace: a DesignSpaceDocument object containing sources and instances. include (str): optional regular expression pattern to match the DS instance 'name' attribute and only interpolate the matching instances. round_instances (bool): round instances' coordinates to integer. expand_features_to_instances: parses the master feature file, expands all include()s and writes the resulting full feature file to all instance UFOs. Use this if you share feature files among masters in external files. Otherwise, the relative include paths can break as instances may end up elsewhere. Only done on interpolation. Returns: list of defcon.Font objects corresponding to the UFO instances. Raises: FontmakeError: if any of the sources defines a custom 'layer', for this is not supported by MutatorMath. ValueError: "expand_features_to_instances" is True but no source in the designspace document is designated with '<features copy="1"/>'. """ from glyphsLib.interpolation import apply_instance_data from mutatorMath.ufo.document import DesignSpaceDocumentReader if any(source.layerName is not None for source in designspace.sources): raise FontmakeError( "MutatorMath doesn't support DesignSpace sources with 'layer' " "attribute" ) # TODO: replace mutatorMath with ufoProcessor? builder = DesignSpaceDocumentReader( designspace.path, ufoVersion=3, roundGeometry=round_instances, verbose=True ) logger.info("Interpolating master UFOs from designspace") if include is not None: instances = self._search_instances(designspace, pattern=include) for instance_name in instances: builder.readInstance(("name", instance_name)) filenames = set(instances.values()) else: builder.readInstances() filenames = None # will include all instances logger.info("Applying instance data from designspace") instance_ufos = apply_instance_data(designspace, include_filenames=filenames) if expand_features_to_instances: logger.debug("Expanding features to instance UFOs") master_source = next( (s for s in designspace.sources if s.copyFeatures), None ) if not master_source: raise ValueError("No source is designated as the master for features.") else: master_source_font = builder.sources[master_source.name][0] master_source_features = parseLayoutFeatures(master_source_font).asFea() for instance_ufo in instance_ufos: instance_ufo.features.text = master_source_features instance_ufo.save() return instance_ufos
[ "def", "interpolate_instance_ufos", "(", "self", ",", "designspace", ",", "include", "=", "None", ",", "round_instances", "=", "False", ",", "expand_features_to_instances", "=", "False", ",", ")", ":", "from", "glyphsLib", ".", "interpolation", "import", "apply_instance_data", "from", "mutatorMath", ".", "ufo", ".", "document", "import", "DesignSpaceDocumentReader", "if", "any", "(", "source", ".", "layerName", "is", "not", "None", "for", "source", "in", "designspace", ".", "sources", ")", ":", "raise", "FontmakeError", "(", "\"MutatorMath doesn't support DesignSpace sources with 'layer' \"", "\"attribute\"", ")", "# TODO: replace mutatorMath with ufoProcessor?", "builder", "=", "DesignSpaceDocumentReader", "(", "designspace", ".", "path", ",", "ufoVersion", "=", "3", ",", "roundGeometry", "=", "round_instances", ",", "verbose", "=", "True", ")", "logger", ".", "info", "(", "\"Interpolating master UFOs from designspace\"", ")", "if", "include", "is", "not", "None", ":", "instances", "=", "self", ".", "_search_instances", "(", "designspace", ",", "pattern", "=", "include", ")", "for", "instance_name", "in", "instances", ":", "builder", ".", "readInstance", "(", "(", "\"name\"", ",", "instance_name", ")", ")", "filenames", "=", "set", "(", "instances", ".", "values", "(", ")", ")", "else", ":", "builder", ".", "readInstances", "(", ")", "filenames", "=", "None", "# will include all instances", "logger", ".", "info", "(", "\"Applying instance data from designspace\"", ")", "instance_ufos", "=", "apply_instance_data", "(", "designspace", ",", "include_filenames", "=", "filenames", ")", "if", "expand_features_to_instances", ":", "logger", ".", "debug", "(", "\"Expanding features to instance UFOs\"", ")", "master_source", "=", "next", "(", "(", "s", "for", "s", "in", "designspace", ".", "sources", "if", "s", ".", "copyFeatures", ")", ",", "None", ")", "if", "not", "master_source", ":", "raise", "ValueError", "(", "\"No source is designated as the master for features.\"", ")", "else", ":", "master_source_font", "=", "builder", ".", "sources", "[", "master_source", ".", "name", "]", "[", "0", "]", "master_source_features", "=", "parseLayoutFeatures", "(", "master_source_font", ")", ".", "asFea", "(", ")", "for", "instance_ufo", "in", "instance_ufos", ":", "instance_ufo", ".", "features", ".", "text", "=", "master_source_features", "instance_ufo", ".", "save", "(", ")", "return", "instance_ufos" ]
Interpolate master UFOs with MutatorMath and return instance UFOs. Args: designspace: a DesignSpaceDocument object containing sources and instances. include (str): optional regular expression pattern to match the DS instance 'name' attribute and only interpolate the matching instances. round_instances (bool): round instances' coordinates to integer. expand_features_to_instances: parses the master feature file, expands all include()s and writes the resulting full feature file to all instance UFOs. Use this if you share feature files among masters in external files. Otherwise, the relative include paths can break as instances may end up elsewhere. Only done on interpolation. Returns: list of defcon.Font objects corresponding to the UFO instances. Raises: FontmakeError: if any of the sources defines a custom 'layer', for this is not supported by MutatorMath. ValueError: "expand_features_to_instances" is True but no source in the designspace document is designated with '<features copy="1"/>'.
[ "Interpolate", "master", "UFOs", "with", "MutatorMath", "and", "return", "instance", "UFOs", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L721-L789
7,086
googlefonts/fontmake
Lib/fontmake/font_project.py
FontProject.run_from_ufos
def run_from_ufos(self, ufos, output=(), **kwargs): """Run toolchain from UFO sources. Args: ufos: List of UFO sources, as either paths or opened objects. output: List of output formats to generate. kwargs: Arguments passed along to save_otfs. """ if set(output) == {"ufo"}: return # the `ufos` parameter can be a list of UFO objects # or it can be a path (string) with a glob syntax ufo_paths = [] if isinstance(ufos, basestring): ufo_paths = glob.glob(ufos) ufos = [Font(x) for x in ufo_paths] elif isinstance(ufos, list): # ufos can be either paths or open Font objects, so normalize them ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos] ufo_paths = [x.path for x in ufos] else: raise FontmakeError( "UFOs parameter is neither a defcon.Font object, a path or a glob, " "nor a list of any of these.", ufos, ) need_reload = False if "otf" in output: self.build_otfs(ufos, **kwargs) need_reload = True if "ttf" in output: if need_reload: ufos = [Font(path) for path in ufo_paths] self.build_ttfs(ufos, **kwargs) need_reload = True
python
def run_from_ufos(self, ufos, output=(), **kwargs): """Run toolchain from UFO sources. Args: ufos: List of UFO sources, as either paths or opened objects. output: List of output formats to generate. kwargs: Arguments passed along to save_otfs. """ if set(output) == {"ufo"}: return # the `ufos` parameter can be a list of UFO objects # or it can be a path (string) with a glob syntax ufo_paths = [] if isinstance(ufos, basestring): ufo_paths = glob.glob(ufos) ufos = [Font(x) for x in ufo_paths] elif isinstance(ufos, list): # ufos can be either paths or open Font objects, so normalize them ufos = [Font(x) if isinstance(x, basestring) else x for x in ufos] ufo_paths = [x.path for x in ufos] else: raise FontmakeError( "UFOs parameter is neither a defcon.Font object, a path or a glob, " "nor a list of any of these.", ufos, ) need_reload = False if "otf" in output: self.build_otfs(ufos, **kwargs) need_reload = True if "ttf" in output: if need_reload: ufos = [Font(path) for path in ufo_paths] self.build_ttfs(ufos, **kwargs) need_reload = True
[ "def", "run_from_ufos", "(", "self", ",", "ufos", ",", "output", "=", "(", ")", ",", "*", "*", "kwargs", ")", ":", "if", "set", "(", "output", ")", "==", "{", "\"ufo\"", "}", ":", "return", "# the `ufos` parameter can be a list of UFO objects", "# or it can be a path (string) with a glob syntax", "ufo_paths", "=", "[", "]", "if", "isinstance", "(", "ufos", ",", "basestring", ")", ":", "ufo_paths", "=", "glob", ".", "glob", "(", "ufos", ")", "ufos", "=", "[", "Font", "(", "x", ")", "for", "x", "in", "ufo_paths", "]", "elif", "isinstance", "(", "ufos", ",", "list", ")", ":", "# ufos can be either paths or open Font objects, so normalize them", "ufos", "=", "[", "Font", "(", "x", ")", "if", "isinstance", "(", "x", ",", "basestring", ")", "else", "x", "for", "x", "in", "ufos", "]", "ufo_paths", "=", "[", "x", ".", "path", "for", "x", "in", "ufos", "]", "else", ":", "raise", "FontmakeError", "(", "\"UFOs parameter is neither a defcon.Font object, a path or a glob, \"", "\"nor a list of any of these.\"", ",", "ufos", ",", ")", "need_reload", "=", "False", "if", "\"otf\"", "in", "output", ":", "self", ".", "build_otfs", "(", "ufos", ",", "*", "*", "kwargs", ")", "need_reload", "=", "True", "if", "\"ttf\"", "in", "output", ":", "if", "need_reload", ":", "ufos", "=", "[", "Font", "(", "path", ")", "for", "path", "in", "ufo_paths", "]", "self", ".", "build_ttfs", "(", "ufos", ",", "*", "*", "kwargs", ")", "need_reload", "=", "True" ]
Run toolchain from UFO sources. Args: ufos: List of UFO sources, as either paths or opened objects. output: List of output formats to generate. kwargs: Arguments passed along to save_otfs.
[ "Run", "toolchain", "from", "UFO", "sources", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L943-L981
7,087
googlefonts/fontmake
Lib/fontmake/font_project.py
FontProject._font_name
def _font_name(self, ufo): """Generate a postscript-style font name.""" family_name = ( ufo.info.familyName.replace(" ", "") if ufo.info.familyName is not None else "None" ) style_name = ( ufo.info.styleName.replace(" ", "") if ufo.info.styleName is not None else "None" ) return "{}-{}".format(family_name, style_name)
python
def _font_name(self, ufo): """Generate a postscript-style font name.""" family_name = ( ufo.info.familyName.replace(" ", "") if ufo.info.familyName is not None else "None" ) style_name = ( ufo.info.styleName.replace(" ", "") if ufo.info.styleName is not None else "None" ) return "{}-{}".format(family_name, style_name)
[ "def", "_font_name", "(", "self", ",", "ufo", ")", ":", "family_name", "=", "(", "ufo", ".", "info", ".", "familyName", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "if", "ufo", ".", "info", ".", "familyName", "is", "not", "None", "else", "\"None\"", ")", "style_name", "=", "(", "ufo", ".", "info", ".", "styleName", ".", "replace", "(", "\" \"", ",", "\"\"", ")", "if", "ufo", ".", "info", ".", "styleName", "is", "not", "None", "else", "\"None\"", ")", "return", "\"{}-{}\"", ".", "format", "(", "family_name", ",", "style_name", ")" ]
Generate a postscript-style font name.
[ "Generate", "a", "postscript", "-", "style", "font", "name", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L994-L1006
7,088
googlefonts/fontmake
Lib/fontmake/font_project.py
FontProject._output_dir
def _output_dir( self, ext, is_instance=False, interpolatable=False, autohinted=False, is_variable=False, ): """Generate an output directory. Args: ext: extension string. is_instance: The output is instance font or not. interpolatable: The output is interpolatable or not. autohinted: The output is autohinted or not. is_variable: The output is variable font or not. Return: output directory string. """ assert not (is_variable and any([is_instance, interpolatable])) # FIXME? Use user configurable destination folders. if is_variable: dir_prefix = "variable_" elif is_instance: dir_prefix = "instance_" else: dir_prefix = "master_" dir_suffix = "_interpolatable" if interpolatable else "" output_dir = dir_prefix + ext + dir_suffix if autohinted: output_dir = os.path.join("autohinted", output_dir) return output_dir
python
def _output_dir( self, ext, is_instance=False, interpolatable=False, autohinted=False, is_variable=False, ): """Generate an output directory. Args: ext: extension string. is_instance: The output is instance font or not. interpolatable: The output is interpolatable or not. autohinted: The output is autohinted or not. is_variable: The output is variable font or not. Return: output directory string. """ assert not (is_variable and any([is_instance, interpolatable])) # FIXME? Use user configurable destination folders. if is_variable: dir_prefix = "variable_" elif is_instance: dir_prefix = "instance_" else: dir_prefix = "master_" dir_suffix = "_interpolatable" if interpolatable else "" output_dir = dir_prefix + ext + dir_suffix if autohinted: output_dir = os.path.join("autohinted", output_dir) return output_dir
[ "def", "_output_dir", "(", "self", ",", "ext", ",", "is_instance", "=", "False", ",", "interpolatable", "=", "False", ",", "autohinted", "=", "False", ",", "is_variable", "=", "False", ",", ")", ":", "assert", "not", "(", "is_variable", "and", "any", "(", "[", "is_instance", ",", "interpolatable", "]", ")", ")", "# FIXME? Use user configurable destination folders.", "if", "is_variable", ":", "dir_prefix", "=", "\"variable_\"", "elif", "is_instance", ":", "dir_prefix", "=", "\"instance_\"", "else", ":", "dir_prefix", "=", "\"master_\"", "dir_suffix", "=", "\"_interpolatable\"", "if", "interpolatable", "else", "\"\"", "output_dir", "=", "dir_prefix", "+", "ext", "+", "dir_suffix", "if", "autohinted", ":", "output_dir", "=", "os", ".", "path", ".", "join", "(", "\"autohinted\"", ",", "output_dir", ")", "return", "output_dir" ]
Generate an output directory. Args: ext: extension string. is_instance: The output is instance font or not. interpolatable: The output is interpolatable or not. autohinted: The output is autohinted or not. is_variable: The output is variable font or not. Return: output directory string.
[ "Generate", "an", "output", "directory", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L1008-L1040
7,089
googlefonts/fontmake
Lib/fontmake/font_project.py
FontProject._output_path
def _output_path( self, ufo_or_font_name, ext, is_instance=False, interpolatable=False, autohinted=False, is_variable=False, output_dir=None, suffix=None, ): """Generate output path for a font file with given extension.""" if isinstance(ufo_or_font_name, basestring): font_name = ufo_or_font_name elif ufo_or_font_name.path: font_name = os.path.splitext( os.path.basename(os.path.normpath(ufo_or_font_name.path)) )[0] else: font_name = self._font_name(ufo_or_font_name) if output_dir is None: output_dir = self._output_dir( ext, is_instance, interpolatable, autohinted, is_variable ) if not os.path.exists(output_dir): os.makedirs(output_dir) if suffix: return os.path.join(output_dir, "{}-{}.{}".format(font_name, suffix, ext)) else: return os.path.join(output_dir, "{}.{}".format(font_name, ext))
python
def _output_path( self, ufo_or_font_name, ext, is_instance=False, interpolatable=False, autohinted=False, is_variable=False, output_dir=None, suffix=None, ): """Generate output path for a font file with given extension.""" if isinstance(ufo_or_font_name, basestring): font_name = ufo_or_font_name elif ufo_or_font_name.path: font_name = os.path.splitext( os.path.basename(os.path.normpath(ufo_or_font_name.path)) )[0] else: font_name = self._font_name(ufo_or_font_name) if output_dir is None: output_dir = self._output_dir( ext, is_instance, interpolatable, autohinted, is_variable ) if not os.path.exists(output_dir): os.makedirs(output_dir) if suffix: return os.path.join(output_dir, "{}-{}.{}".format(font_name, suffix, ext)) else: return os.path.join(output_dir, "{}.{}".format(font_name, ext))
[ "def", "_output_path", "(", "self", ",", "ufo_or_font_name", ",", "ext", ",", "is_instance", "=", "False", ",", "interpolatable", "=", "False", ",", "autohinted", "=", "False", ",", "is_variable", "=", "False", ",", "output_dir", "=", "None", ",", "suffix", "=", "None", ",", ")", ":", "if", "isinstance", "(", "ufo_or_font_name", ",", "basestring", ")", ":", "font_name", "=", "ufo_or_font_name", "elif", "ufo_or_font_name", ".", "path", ":", "font_name", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "os", ".", "path", ".", "normpath", "(", "ufo_or_font_name", ".", "path", ")", ")", ")", "[", "0", "]", "else", ":", "font_name", "=", "self", ".", "_font_name", "(", "ufo_or_font_name", ")", "if", "output_dir", "is", "None", ":", "output_dir", "=", "self", ".", "_output_dir", "(", "ext", ",", "is_instance", ",", "interpolatable", ",", "autohinted", ",", "is_variable", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "output_dir", ")", ":", "os", ".", "makedirs", "(", "output_dir", ")", "if", "suffix", ":", "return", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"{}-{}.{}\"", ".", "format", "(", "font_name", ",", "suffix", ",", "ext", ")", ")", "else", ":", "return", "os", ".", "path", ".", "join", "(", "output_dir", ",", "\"{}.{}\"", ".", "format", "(", "font_name", ",", "ext", ")", ")" ]
Generate output path for a font file with given extension.
[ "Generate", "output", "path", "for", "a", "font", "file", "with", "given", "extension", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L1042-L1074
7,090
googlefonts/fontmake
Lib/fontmake/font_project.py
FontProject._designspace_locations
def _designspace_locations(self, designspace): """Map font filenames to their locations in a designspace.""" maps = [] for elements in (designspace.sources, designspace.instances): location_map = {} for element in elements: path = _normpath(element.path) location_map[path] = element.location maps.append(location_map) return maps
python
def _designspace_locations(self, designspace): """Map font filenames to their locations in a designspace.""" maps = [] for elements in (designspace.sources, designspace.instances): location_map = {} for element in elements: path = _normpath(element.path) location_map[path] = element.location maps.append(location_map) return maps
[ "def", "_designspace_locations", "(", "self", ",", "designspace", ")", ":", "maps", "=", "[", "]", "for", "elements", "in", "(", "designspace", ".", "sources", ",", "designspace", ".", "instances", ")", ":", "location_map", "=", "{", "}", "for", "element", "in", "elements", ":", "path", "=", "_normpath", "(", "element", ".", "path", ")", "location_map", "[", "path", "]", "=", "element", ".", "location", "maps", ".", "append", "(", "location_map", ")", "return", "maps" ]
Map font filenames to their locations in a designspace.
[ "Map", "font", "filenames", "to", "their", "locations", "in", "a", "designspace", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L1076-L1086
7,091
googlefonts/fontmake
Lib/fontmake/font_project.py
FontProject._closest_location
def _closest_location(self, location_map, target): """Return path of font whose location is closest to target.""" def dist(a, b): return math.sqrt(sum((a[k] - b[k]) ** 2 for k in a.keys())) paths = iter(location_map.keys()) closest = next(paths) closest_dist = dist(target, location_map[closest]) for path in paths: cur_dist = dist(target, location_map[path]) if cur_dist < closest_dist: closest = path closest_dist = cur_dist return closest
python
def _closest_location(self, location_map, target): """Return path of font whose location is closest to target.""" def dist(a, b): return math.sqrt(sum((a[k] - b[k]) ** 2 for k in a.keys())) paths = iter(location_map.keys()) closest = next(paths) closest_dist = dist(target, location_map[closest]) for path in paths: cur_dist = dist(target, location_map[path]) if cur_dist < closest_dist: closest = path closest_dist = cur_dist return closest
[ "def", "_closest_location", "(", "self", ",", "location_map", ",", "target", ")", ":", "def", "dist", "(", "a", ",", "b", ")", ":", "return", "math", ".", "sqrt", "(", "sum", "(", "(", "a", "[", "k", "]", "-", "b", "[", "k", "]", ")", "**", "2", "for", "k", "in", "a", ".", "keys", "(", ")", ")", ")", "paths", "=", "iter", "(", "location_map", ".", "keys", "(", ")", ")", "closest", "=", "next", "(", "paths", ")", "closest_dist", "=", "dist", "(", "target", ",", "location_map", "[", "closest", "]", ")", "for", "path", "in", "paths", ":", "cur_dist", "=", "dist", "(", "target", ",", "location_map", "[", "path", "]", ")", "if", "cur_dist", "<", "closest_dist", ":", "closest", "=", "path", "closest_dist", "=", "cur_dist", "return", "closest" ]
Return path of font whose location is closest to target.
[ "Return", "path", "of", "font", "whose", "location", "is", "closest", "to", "target", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/font_project.py#L1088-L1102
7,092
googlefonts/fontmake
Lib/fontmake/ttfautohint.py
ttfautohint
def ttfautohint(in_file, out_file, args=None, **kwargs): """Thin wrapper around the ttfautohint command line tool. Can take in command line arguments directly as a string, or spelled out as Python keyword arguments. """ arg_list = ["ttfautohint"] file_args = [in_file, out_file] if args is not None: if kwargs: raise TypeError("Should not provide both cmd args and kwargs.") rv = subprocess.call(arg_list + args.split() + file_args) if rv != 0: raise TTFAError(rv) return boolean_options = ( "debug", "composites", "dehint", "help", "ignore_restrictions", "detailed_info", "no_info", "adjust_subglyphs", "symbol", "ttfa_table", "verbose", "version", "windows_compatibility", ) other_options = ( "default_script", "fallback_script", "family_suffix", "hinting_limit", "fallback_stem_width", "hinting_range_min", "control_file", "hinting_range_max", "strong_stem_width", "increase_x_height", "x_height_snapping_exceptions", ) for option in boolean_options: if kwargs.pop(option, False): arg_list.append("--" + option.replace("_", "-")) for option in other_options: arg = kwargs.pop(option, None) if arg is not None: arg_list.append("--{}={}".format(option.replace("_", "-"), arg)) if kwargs: raise TypeError("Unexpected argument(s): " + ", ".join(kwargs.keys())) rv = subprocess.call(arg_list + file_args) if rv != 0: raise TTFAError(rv)
python
def ttfautohint(in_file, out_file, args=None, **kwargs): """Thin wrapper around the ttfautohint command line tool. Can take in command line arguments directly as a string, or spelled out as Python keyword arguments. """ arg_list = ["ttfautohint"] file_args = [in_file, out_file] if args is not None: if kwargs: raise TypeError("Should not provide both cmd args and kwargs.") rv = subprocess.call(arg_list + args.split() + file_args) if rv != 0: raise TTFAError(rv) return boolean_options = ( "debug", "composites", "dehint", "help", "ignore_restrictions", "detailed_info", "no_info", "adjust_subglyphs", "symbol", "ttfa_table", "verbose", "version", "windows_compatibility", ) other_options = ( "default_script", "fallback_script", "family_suffix", "hinting_limit", "fallback_stem_width", "hinting_range_min", "control_file", "hinting_range_max", "strong_stem_width", "increase_x_height", "x_height_snapping_exceptions", ) for option in boolean_options: if kwargs.pop(option, False): arg_list.append("--" + option.replace("_", "-")) for option in other_options: arg = kwargs.pop(option, None) if arg is not None: arg_list.append("--{}={}".format(option.replace("_", "-"), arg)) if kwargs: raise TypeError("Unexpected argument(s): " + ", ".join(kwargs.keys())) rv = subprocess.call(arg_list + file_args) if rv != 0: raise TTFAError(rv)
[ "def", "ttfautohint", "(", "in_file", ",", "out_file", ",", "args", "=", "None", ",", "*", "*", "kwargs", ")", ":", "arg_list", "=", "[", "\"ttfautohint\"", "]", "file_args", "=", "[", "in_file", ",", "out_file", "]", "if", "args", "is", "not", "None", ":", "if", "kwargs", ":", "raise", "TypeError", "(", "\"Should not provide both cmd args and kwargs.\"", ")", "rv", "=", "subprocess", ".", "call", "(", "arg_list", "+", "args", ".", "split", "(", ")", "+", "file_args", ")", "if", "rv", "!=", "0", ":", "raise", "TTFAError", "(", "rv", ")", "return", "boolean_options", "=", "(", "\"debug\"", ",", "\"composites\"", ",", "\"dehint\"", ",", "\"help\"", ",", "\"ignore_restrictions\"", ",", "\"detailed_info\"", ",", "\"no_info\"", ",", "\"adjust_subglyphs\"", ",", "\"symbol\"", ",", "\"ttfa_table\"", ",", "\"verbose\"", ",", "\"version\"", ",", "\"windows_compatibility\"", ",", ")", "other_options", "=", "(", "\"default_script\"", ",", "\"fallback_script\"", ",", "\"family_suffix\"", ",", "\"hinting_limit\"", ",", "\"fallback_stem_width\"", ",", "\"hinting_range_min\"", ",", "\"control_file\"", ",", "\"hinting_range_max\"", ",", "\"strong_stem_width\"", ",", "\"increase_x_height\"", ",", "\"x_height_snapping_exceptions\"", ",", ")", "for", "option", "in", "boolean_options", ":", "if", "kwargs", ".", "pop", "(", "option", ",", "False", ")", ":", "arg_list", ".", "append", "(", "\"--\"", "+", "option", ".", "replace", "(", "\"_\"", ",", "\"-\"", ")", ")", "for", "option", "in", "other_options", ":", "arg", "=", "kwargs", ".", "pop", "(", "option", ",", "None", ")", "if", "arg", "is", "not", "None", ":", "arg_list", ".", "append", "(", "\"--{}={}\"", ".", "format", "(", "option", ".", "replace", "(", "\"_\"", ",", "\"-\"", ")", ",", "arg", ")", ")", "if", "kwargs", ":", "raise", "TypeError", "(", "\"Unexpected argument(s): \"", "+", "\", \"", ".", "join", "(", "kwargs", ".", "keys", "(", ")", ")", ")", "rv", "=", "subprocess", ".", "call", "(", "arg_list", "+", "file_args", ")", "if", "rv", "!=", "0", ":", "raise", "TTFAError", "(", "rv", ")" ]
Thin wrapper around the ttfautohint command line tool. Can take in command line arguments directly as a string, or spelled out as Python keyword arguments.
[ "Thin", "wrapper", "around", "the", "ttfautohint", "command", "line", "tool", "." ]
b611baf49929575c2a30fd18662055365219ce2d
https://github.com/googlefonts/fontmake/blob/b611baf49929575c2a30fd18662055365219ce2d/Lib/fontmake/ttfautohint.py#L21-L82
7,093
ulule/python-logstash-formatter
logstash_formatter/__init__.py
_default_json_default
def _default_json_default(obj): """ Coerce everything to strings. All objects representing time get output as ISO8601. """ if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)): return obj.isoformat() else: return str(obj)
python
def _default_json_default(obj): """ Coerce everything to strings. All objects representing time get output as ISO8601. """ if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)): return obj.isoformat() else: return str(obj)
[ "def", "_default_json_default", "(", "obj", ")", ":", "if", "isinstance", "(", "obj", ",", "(", "datetime", ".", "datetime", ",", "datetime", ".", "date", ",", "datetime", ".", "time", ")", ")", ":", "return", "obj", ".", "isoformat", "(", ")", "else", ":", "return", "str", "(", "obj", ")" ]
Coerce everything to strings. All objects representing time get output as ISO8601.
[ "Coerce", "everything", "to", "strings", ".", "All", "objects", "representing", "time", "get", "output", "as", "ISO8601", "." ]
a29f7c8f5faec9467aaedfb74d5f40eacb2b50ea
https://github.com/ulule/python-logstash-formatter/blob/a29f7c8f5faec9467aaedfb74d5f40eacb2b50ea/logstash_formatter/__init__.py#L13-L21
7,094
ulule/python-logstash-formatter
logstash_formatter/__init__.py
LogstashFormatter.format
def format(self, record): """ Format a log record to JSON, if the message is a dict assume an empty message and use the dict as additional fields. """ fields = record.__dict__.copy() if isinstance(record.msg, dict): fields.update(record.msg) fields.pop('msg') msg = "" else: msg = record.getMessage() try: msg = msg.format(**fields) except (KeyError, IndexError, ValueError): pass except: # in case we can not format the msg properly we log it as is instead of crashing msg = msg if 'msg' in fields: fields.pop('msg') if 'exc_info' in fields: if fields['exc_info']: formatted = tb.format_exception(*fields['exc_info']) fields['exception'] = formatted fields.pop('exc_info') if 'exc_text' in fields and not fields['exc_text']: fields.pop('exc_text') logr = self.defaults.copy() logr.update({'@message': msg, '@timestamp': datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'), '@source_host': self.source_host, '@fields': self._build_fields(logr, fields)}) return json.dumps(logr, default=self.json_default, cls=self.json_cls)
python
def format(self, record): """ Format a log record to JSON, if the message is a dict assume an empty message and use the dict as additional fields. """ fields = record.__dict__.copy() if isinstance(record.msg, dict): fields.update(record.msg) fields.pop('msg') msg = "" else: msg = record.getMessage() try: msg = msg.format(**fields) except (KeyError, IndexError, ValueError): pass except: # in case we can not format the msg properly we log it as is instead of crashing msg = msg if 'msg' in fields: fields.pop('msg') if 'exc_info' in fields: if fields['exc_info']: formatted = tb.format_exception(*fields['exc_info']) fields['exception'] = formatted fields.pop('exc_info') if 'exc_text' in fields and not fields['exc_text']: fields.pop('exc_text') logr = self.defaults.copy() logr.update({'@message': msg, '@timestamp': datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ'), '@source_host': self.source_host, '@fields': self._build_fields(logr, fields)}) return json.dumps(logr, default=self.json_default, cls=self.json_cls)
[ "def", "format", "(", "self", ",", "record", ")", ":", "fields", "=", "record", ".", "__dict__", ".", "copy", "(", ")", "if", "isinstance", "(", "record", ".", "msg", ",", "dict", ")", ":", "fields", ".", "update", "(", "record", ".", "msg", ")", "fields", ".", "pop", "(", "'msg'", ")", "msg", "=", "\"\"", "else", ":", "msg", "=", "record", ".", "getMessage", "(", ")", "try", ":", "msg", "=", "msg", ".", "format", "(", "*", "*", "fields", ")", "except", "(", "KeyError", ",", "IndexError", ",", "ValueError", ")", ":", "pass", "except", ":", "# in case we can not format the msg properly we log it as is instead of crashing", "msg", "=", "msg", "if", "'msg'", "in", "fields", ":", "fields", ".", "pop", "(", "'msg'", ")", "if", "'exc_info'", "in", "fields", ":", "if", "fields", "[", "'exc_info'", "]", ":", "formatted", "=", "tb", ".", "format_exception", "(", "*", "fields", "[", "'exc_info'", "]", ")", "fields", "[", "'exception'", "]", "=", "formatted", "fields", ".", "pop", "(", "'exc_info'", ")", "if", "'exc_text'", "in", "fields", "and", "not", "fields", "[", "'exc_text'", "]", ":", "fields", ".", "pop", "(", "'exc_text'", ")", "logr", "=", "self", ".", "defaults", ".", "copy", "(", ")", "logr", ".", "update", "(", "{", "'@message'", ":", "msg", ",", "'@timestamp'", ":", "datetime", ".", "datetime", ".", "utcnow", "(", ")", ".", "strftime", "(", "'%Y-%m-%dT%H:%M:%S.%fZ'", ")", ",", "'@source_host'", ":", "self", ".", "source_host", ",", "'@fields'", ":", "self", ".", "_build_fields", "(", "logr", ",", "fields", ")", "}", ")", "return", "json", ".", "dumps", "(", "logr", ",", "default", "=", "self", ".", "json_default", ",", "cls", "=", "self", ".", "json_cls", ")" ]
Format a log record to JSON, if the message is a dict assume an empty message and use the dict as additional fields.
[ "Format", "a", "log", "record", "to", "JSON", "if", "the", "message", "is", "a", "dict", "assume", "an", "empty", "message", "and", "use", "the", "dict", "as", "additional", "fields", "." ]
a29f7c8f5faec9467aaedfb74d5f40eacb2b50ea
https://github.com/ulule/python-logstash-formatter/blob/a29f7c8f5faec9467aaedfb74d5f40eacb2b50ea/logstash_formatter/__init__.py#L65-L108
7,095
ulule/python-logstash-formatter
logstash_formatter/__init__.py
LogstashFormatter._build_fields
def _build_fields(self, defaults, fields): """Return provided fields including any in defaults >>> f = LogstashFormatter() # Verify that ``fields`` is used >>> f._build_fields({}, {'foo': 'one'}) == \ {'foo': 'one'} True # Verify that ``@fields`` in ``defaults`` is used >>> f._build_fields({'@fields': {'bar': 'two'}}, {'foo': 'one'}) == \ {'foo': 'one', 'bar': 'two'} True # Verify that ``fields`` takes precedence >>> f._build_fields({'@fields': {'foo': 'two'}}, {'foo': 'one'}) == \ {'foo': 'one'} True """ return dict(list(defaults.get('@fields', {}).items()) + list(fields.items()))
python
def _build_fields(self, defaults, fields): """Return provided fields including any in defaults >>> f = LogstashFormatter() # Verify that ``fields`` is used >>> f._build_fields({}, {'foo': 'one'}) == \ {'foo': 'one'} True # Verify that ``@fields`` in ``defaults`` is used >>> f._build_fields({'@fields': {'bar': 'two'}}, {'foo': 'one'}) == \ {'foo': 'one', 'bar': 'two'} True # Verify that ``fields`` takes precedence >>> f._build_fields({'@fields': {'foo': 'two'}}, {'foo': 'one'}) == \ {'foo': 'one'} True """ return dict(list(defaults.get('@fields', {}).items()) + list(fields.items()))
[ "def", "_build_fields", "(", "self", ",", "defaults", ",", "fields", ")", ":", "return", "dict", "(", "list", "(", "defaults", ".", "get", "(", "'@fields'", ",", "{", "}", ")", ".", "items", "(", ")", ")", "+", "list", "(", "fields", ".", "items", "(", ")", ")", ")" ]
Return provided fields including any in defaults >>> f = LogstashFormatter() # Verify that ``fields`` is used >>> f._build_fields({}, {'foo': 'one'}) == \ {'foo': 'one'} True # Verify that ``@fields`` in ``defaults`` is used >>> f._build_fields({'@fields': {'bar': 'two'}}, {'foo': 'one'}) == \ {'foo': 'one', 'bar': 'two'} True # Verify that ``fields`` takes precedence >>> f._build_fields({'@fields': {'foo': 'two'}}, {'foo': 'one'}) == \ {'foo': 'one'} True
[ "Return", "provided", "fields", "including", "any", "in", "defaults" ]
a29f7c8f5faec9467aaedfb74d5f40eacb2b50ea
https://github.com/ulule/python-logstash-formatter/blob/a29f7c8f5faec9467aaedfb74d5f40eacb2b50ea/logstash_formatter/__init__.py#L110-L127
7,096
tchellomello/python-arlo
pyarlo/__init__.py
PyArlo._authenticate
def _authenticate(self): """Authenticate user and generate token.""" self.cleanup_headers() url = LOGIN_ENDPOINT data = self.query( url, method='POST', extra_params={ 'email': self.__username, 'password': self.__password }) if isinstance(data, dict) and data.get('success'): data = data.get('data') self.authenticated = data.get('authenticated') self.country_code = data.get('countryCode') self.date_created = data.get('dateCreated') self.__token = data.get('token') self.userid = data.get('userId') # update header with the generated token self.__headers['Authorization'] = self.__token
python
def _authenticate(self): """Authenticate user and generate token.""" self.cleanup_headers() url = LOGIN_ENDPOINT data = self.query( url, method='POST', extra_params={ 'email': self.__username, 'password': self.__password }) if isinstance(data, dict) and data.get('success'): data = data.get('data') self.authenticated = data.get('authenticated') self.country_code = data.get('countryCode') self.date_created = data.get('dateCreated') self.__token = data.get('token') self.userid = data.get('userId') # update header with the generated token self.__headers['Authorization'] = self.__token
[ "def", "_authenticate", "(", "self", ")", ":", "self", ".", "cleanup_headers", "(", ")", "url", "=", "LOGIN_ENDPOINT", "data", "=", "self", ".", "query", "(", "url", ",", "method", "=", "'POST'", ",", "extra_params", "=", "{", "'email'", ":", "self", ".", "__username", ",", "'password'", ":", "self", ".", "__password", "}", ")", "if", "isinstance", "(", "data", ",", "dict", ")", "and", "data", ".", "get", "(", "'success'", ")", ":", "data", "=", "data", ".", "get", "(", "'data'", ")", "self", ".", "authenticated", "=", "data", ".", "get", "(", "'authenticated'", ")", "self", ".", "country_code", "=", "data", ".", "get", "(", "'countryCode'", ")", "self", ".", "date_created", "=", "data", ".", "get", "(", "'dateCreated'", ")", "self", ".", "__token", "=", "data", ".", "get", "(", "'token'", ")", "self", ".", "userid", "=", "data", ".", "get", "(", "'userId'", ")", "# update header with the generated token", "self", ".", "__headers", "[", "'Authorization'", "]", "=", "self", ".", "__token" ]
Authenticate user and generate token.
[ "Authenticate", "user", "and", "generate", "token", "." ]
db70aeb81705309c56ad32bbab1094f6cd146524
https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/__init__.py#L63-L84
7,097
tchellomello/python-arlo
pyarlo/__init__.py
PyArlo.cleanup_headers
def cleanup_headers(self): """Reset the headers and params.""" headers = {'Content-Type': 'application/json'} headers['Authorization'] = self.__token self.__headers = headers self.__params = {}
python
def cleanup_headers(self): """Reset the headers and params.""" headers = {'Content-Type': 'application/json'} headers['Authorization'] = self.__token self.__headers = headers self.__params = {}
[ "def", "cleanup_headers", "(", "self", ")", ":", "headers", "=", "{", "'Content-Type'", ":", "'application/json'", "}", "headers", "[", "'Authorization'", "]", "=", "self", ".", "__token", "self", ".", "__headers", "=", "headers", "self", ".", "__params", "=", "{", "}" ]
Reset the headers and params.
[ "Reset", "the", "headers", "and", "params", "." ]
db70aeb81705309c56ad32bbab1094f6cd146524
https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/__init__.py#L86-L91
7,098
tchellomello/python-arlo
pyarlo/__init__.py
PyArlo.query
def query(self, url, method='GET', extra_params=None, extra_headers=None, retry=3, raw=False, stream=False): """ Return a JSON object or raw session. :param url: Arlo API URL :param method: Specify the method GET, POST or PUT. Default is GET. :param extra_params: Dictionary to be appended on request.body :param extra_headers: Dictionary to be apppended on request.headers :param retry: Attempts to retry a query. Default is 3. :param raw: Boolean if query() will return request object instead JSON. :param stream: Boolean if query() will return a stream object. """ response = None loop = 0 # always make sure the headers and params are clean self.cleanup_headers() while loop <= retry: # override request.body or request.headers dictionary if extra_params: params = self.__params params.update(extra_params) else: params = self.__params _LOGGER.debug("Params: %s", params) if extra_headers: headers = self.__headers headers.update(extra_headers) else: headers = self.__headers _LOGGER.debug("Headers: %s", headers) _LOGGER.debug("Querying %s on attempt: %s/%s", url, loop, retry) loop += 1 # define connection method req = None if method == 'GET': req = self.session.get(url, headers=headers, stream=stream) elif method == 'PUT': req = self.session.put(url, json=params, headers=headers) elif method == 'POST': req = self.session.post(url, json=params, headers=headers) if req and (req.status_code == 200): if raw: _LOGGER.debug("Required raw object.") response = req else: response = req.json() # leave if everything worked fine break return response
python
def query(self, url, method='GET', extra_params=None, extra_headers=None, retry=3, raw=False, stream=False): """ Return a JSON object or raw session. :param url: Arlo API URL :param method: Specify the method GET, POST or PUT. Default is GET. :param extra_params: Dictionary to be appended on request.body :param extra_headers: Dictionary to be apppended on request.headers :param retry: Attempts to retry a query. Default is 3. :param raw: Boolean if query() will return request object instead JSON. :param stream: Boolean if query() will return a stream object. """ response = None loop = 0 # always make sure the headers and params are clean self.cleanup_headers() while loop <= retry: # override request.body or request.headers dictionary if extra_params: params = self.__params params.update(extra_params) else: params = self.__params _LOGGER.debug("Params: %s", params) if extra_headers: headers = self.__headers headers.update(extra_headers) else: headers = self.__headers _LOGGER.debug("Headers: %s", headers) _LOGGER.debug("Querying %s on attempt: %s/%s", url, loop, retry) loop += 1 # define connection method req = None if method == 'GET': req = self.session.get(url, headers=headers, stream=stream) elif method == 'PUT': req = self.session.put(url, json=params, headers=headers) elif method == 'POST': req = self.session.post(url, json=params, headers=headers) if req and (req.status_code == 200): if raw: _LOGGER.debug("Required raw object.") response = req else: response = req.json() # leave if everything worked fine break return response
[ "def", "query", "(", "self", ",", "url", ",", "method", "=", "'GET'", ",", "extra_params", "=", "None", ",", "extra_headers", "=", "None", ",", "retry", "=", "3", ",", "raw", "=", "False", ",", "stream", "=", "False", ")", ":", "response", "=", "None", "loop", "=", "0", "# always make sure the headers and params are clean", "self", ".", "cleanup_headers", "(", ")", "while", "loop", "<=", "retry", ":", "# override request.body or request.headers dictionary", "if", "extra_params", ":", "params", "=", "self", ".", "__params", "params", ".", "update", "(", "extra_params", ")", "else", ":", "params", "=", "self", ".", "__params", "_LOGGER", ".", "debug", "(", "\"Params: %s\"", ",", "params", ")", "if", "extra_headers", ":", "headers", "=", "self", ".", "__headers", "headers", ".", "update", "(", "extra_headers", ")", "else", ":", "headers", "=", "self", ".", "__headers", "_LOGGER", ".", "debug", "(", "\"Headers: %s\"", ",", "headers", ")", "_LOGGER", ".", "debug", "(", "\"Querying %s on attempt: %s/%s\"", ",", "url", ",", "loop", ",", "retry", ")", "loop", "+=", "1", "# define connection method", "req", "=", "None", "if", "method", "==", "'GET'", ":", "req", "=", "self", ".", "session", ".", "get", "(", "url", ",", "headers", "=", "headers", ",", "stream", "=", "stream", ")", "elif", "method", "==", "'PUT'", ":", "req", "=", "self", ".", "session", ".", "put", "(", "url", ",", "json", "=", "params", ",", "headers", "=", "headers", ")", "elif", "method", "==", "'POST'", ":", "req", "=", "self", ".", "session", ".", "post", "(", "url", ",", "json", "=", "params", ",", "headers", "=", "headers", ")", "if", "req", "and", "(", "req", ".", "status_code", "==", "200", ")", ":", "if", "raw", ":", "_LOGGER", ".", "debug", "(", "\"Required raw object.\"", ")", "response", "=", "req", "else", ":", "response", "=", "req", ".", "json", "(", ")", "# leave if everything worked fine", "break", "return", "response" ]
Return a JSON object or raw session. :param url: Arlo API URL :param method: Specify the method GET, POST or PUT. Default is GET. :param extra_params: Dictionary to be appended on request.body :param extra_headers: Dictionary to be apppended on request.headers :param retry: Attempts to retry a query. Default is 3. :param raw: Boolean if query() will return request object instead JSON. :param stream: Boolean if query() will return a stream object.
[ "Return", "a", "JSON", "object", "or", "raw", "session", "." ]
db70aeb81705309c56ad32bbab1094f6cd146524
https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/__init__.py#L93-L158
7,099
tchellomello/python-arlo
pyarlo/__init__.py
PyArlo.devices
def devices(self): """Return all devices on Arlo account.""" if self._all_devices: return self._all_devices self._all_devices = {} self._all_devices['cameras'] = [] self._all_devices['base_station'] = [] url = DEVICES_ENDPOINT data = self.query(url) for device in data.get('data'): name = device.get('deviceName') if ((device.get('deviceType') == 'camera' or device.get('deviceType') == 'arloq' or device.get('deviceType') == 'arloqs') and device.get('state') == 'provisioned'): camera = ArloCamera(name, device, self) self._all_devices['cameras'].append(camera) if (device.get('state') == 'provisioned' and (device.get('deviceType') == 'basestation' or device.get('modelId') == 'ABC1000')): base = ArloBaseStation(name, device, self.__token, self) self._all_devices['base_station'].append(base) return self._all_devices
python
def devices(self): """Return all devices on Arlo account.""" if self._all_devices: return self._all_devices self._all_devices = {} self._all_devices['cameras'] = [] self._all_devices['base_station'] = [] url = DEVICES_ENDPOINT data = self.query(url) for device in data.get('data'): name = device.get('deviceName') if ((device.get('deviceType') == 'camera' or device.get('deviceType') == 'arloq' or device.get('deviceType') == 'arloqs') and device.get('state') == 'provisioned'): camera = ArloCamera(name, device, self) self._all_devices['cameras'].append(camera) if (device.get('state') == 'provisioned' and (device.get('deviceType') == 'basestation' or device.get('modelId') == 'ABC1000')): base = ArloBaseStation(name, device, self.__token, self) self._all_devices['base_station'].append(base) return self._all_devices
[ "def", "devices", "(", "self", ")", ":", "if", "self", ".", "_all_devices", ":", "return", "self", ".", "_all_devices", "self", ".", "_all_devices", "=", "{", "}", "self", ".", "_all_devices", "[", "'cameras'", "]", "=", "[", "]", "self", ".", "_all_devices", "[", "'base_station'", "]", "=", "[", "]", "url", "=", "DEVICES_ENDPOINT", "data", "=", "self", ".", "query", "(", "url", ")", "for", "device", "in", "data", ".", "get", "(", "'data'", ")", ":", "name", "=", "device", ".", "get", "(", "'deviceName'", ")", "if", "(", "(", "device", ".", "get", "(", "'deviceType'", ")", "==", "'camera'", "or", "device", ".", "get", "(", "'deviceType'", ")", "==", "'arloq'", "or", "device", ".", "get", "(", "'deviceType'", ")", "==", "'arloqs'", ")", "and", "device", ".", "get", "(", "'state'", ")", "==", "'provisioned'", ")", ":", "camera", "=", "ArloCamera", "(", "name", ",", "device", ",", "self", ")", "self", ".", "_all_devices", "[", "'cameras'", "]", ".", "append", "(", "camera", ")", "if", "(", "device", ".", "get", "(", "'state'", ")", "==", "'provisioned'", "and", "(", "device", ".", "get", "(", "'deviceType'", ")", "==", "'basestation'", "or", "device", ".", "get", "(", "'modelId'", ")", "==", "'ABC1000'", ")", ")", ":", "base", "=", "ArloBaseStation", "(", "name", ",", "device", ",", "self", ".", "__token", ",", "self", ")", "self", ".", "_all_devices", "[", "'base_station'", "]", ".", "append", "(", "base", ")", "return", "self", ".", "_all_devices" ]
Return all devices on Arlo account.
[ "Return", "all", "devices", "on", "Arlo", "account", "." ]
db70aeb81705309c56ad32bbab1094f6cd146524
https://github.com/tchellomello/python-arlo/blob/db70aeb81705309c56ad32bbab1094f6cd146524/pyarlo/__init__.py#L171-L198