code string | signature string | docstring string | loss_without_docstring float64 | loss_with_docstring float64 | factor float64 |
|---|---|---|---|---|---|
if not s3_path.startswith('s3://'):
raise ValueError('s3_path is expected to start with \'s3://\', '
'but was {}'.format(s3_path))
bucket_key = s3_path[len('s3://'):]
bucket_name, key = bucket_key.split('/', 1)
return S3Path(bucket_name, key) | def _s3_path_split(s3_path) | Split an S3 path into bucket and key.
Parameters
----------
s3_path : str
Returns
-------
splitted : (str, str)
(bucket, key)
Examples
--------
>>> _s3_path_split('s3://my-bucket/foo/bar.jpg')
S3Path(bucket_name='my-bucket', key='foo/bar.jpg') | 2.183439 | 2.426237 | 0.899929 |
meta = {}
try:
from PIL import Image
with Image.open(filepath) as img:
width, height = img.size
meta['width'] = width
meta['height'] = height
meta['channels'] = len(img.mode) # RGB, RGBA - does this always work?
except ImportError:
pass
# Get times - creation, last edit, last open
meta['file'] = mpu.io.get_file_meta(filepath)
return meta | def get_meta(filepath) | Get meta-information of an image.
Parameters
----------
filepath : str
Returns
-------
meta : dict | 5.063487 | 5.618209 | 0.901263 |
flat_list = []
for item in iterable:
is_iterable = (isinstance(item, collections.Iterable) and
(string_flattening or
(not string_flattening and not isinstance(item, str))
))
if is_iterable:
flat_list.extend(flatten(item))
else:
flat_list.append(item)
return flat_list | def flatten(iterable, string_flattening=False) | Flatten an given iterable of iterables into one list.
Parameters
----------
iterable : iterable
string_flattening : bool
If this is False, then strings are NOT flattened
Returns
-------
flat_list : List
Examples
--------
>>> flatten([1, [2, [3]]])
[1, 2, 3]
>>> flatten(((1, 2), (3, 4), (5, 6)))
[1, 2, 3, 4, 5, 6]
>>> flatten(EList([EList([1, 2]), (3, [4, [[5]]])]))
[1, 2, 3, 4, 5] | 2.184077 | 2.6006 | 0.839836 |
new_dict = {}
if merge_method in ['take_right_shallow', 'take_right_deep']:
return _dict_merge_right(dict_left, dict_right, merge_method)
elif merge_method == 'take_left_shallow':
return dict_merge(dict_right, dict_left, 'take_right_shallow')
elif merge_method == 'take_left_deep':
return dict_merge(dict_right, dict_left, 'take_right_deep')
elif merge_method == 'sum':
new_dict = deepcopy(dict_left)
for key, value in dict_right.items():
if key not in new_dict:
new_dict[key] = value
else:
recurse = isinstance(value, dict)
if recurse:
new_dict[key] = dict_merge(dict_left[key],
dict_right[key],
merge_method='sum')
else:
new_dict[key] = dict_left[key] + dict_right[key]
return new_dict
else:
raise NotImplementedError('merge_method=\'{}\' is not known.'
.format(merge_method)) | def dict_merge(dict_left, dict_right, merge_method='take_left_shallow') | Merge two dictionaries.
This method does NOT modify dict_left or dict_right!
Apply this method multiple times if the dictionary is nested.
Parameters
----------
dict_left : dict
dict_right: dict
merge_method : {'take_left_shallow', 'take_left_deep', \
'take_right_shallow', 'take_right_deep', \
'sum'}
* take_left_shallow: Use both dictinaries. If both have the same key,
take the value of dict_left
* take_left_deep : If both dictionaries have the same key and the value
is a dict for both again, then merge those sub-dictionaries
* take_right_shallow : See take_left_shallow
* take_right_deep : See take_left_deep
* sum : sum up both dictionaries. If one does not have a value for a
key of the other, assume the missing value to be zero.
Returns
-------
merged_dict : dict
Examples
--------
>>> dict_merge({'a': 1, 'b': 2}, {'c': 3}) == {'a': 1, 'b': 2, 'c': 3}
True
>>> out = dict_merge({'a': {'A': 1}},
... {'a': {'A': 2, 'B': 3}}, 'take_left_deep')
>>> expected = {'a': {'A': 1, 'B': 3}}
>>> out == expected
True
>>> out = dict_merge({'a': {'A': 1}},
... {'a': {'A': 2, 'B': 3}}, 'take_left_shallow')
>>> expected = {'a': {'A': 1}}
>>> out == expected
True
>>> out = dict_merge({'a': 1, 'b': {'c': 2}},
... {'b': {'c': 3, 'd': 4}},
... 'sum')
>>> expected = {'a': 1, 'b': {'c': 5, 'd': 4}}
>>> out == expected
True | 1.717919 | 1.79945 | 0.954691 |
new_dict = deepcopy(dict_left)
for key, value in dict_right.items():
if key not in new_dict:
new_dict[key] = value
else:
recurse = (merge_method == 'take_right_deep' and
isinstance(dict_left[key], dict) and
isinstance(dict_right[key], dict))
if recurse:
new_dict[key] = dict_merge(dict_left[key],
dict_right[key],
merge_method='take_right_deep')
else:
new_dict[key] = value
return new_dict | def _dict_merge_right(dict_left, dict_right, merge_method) | See documentation of mpu.datastructures.dict_merge. | 2.00482 | 1.904819 | 1.052499 |
orig = dictionary
for key in keys[:-1]:
dictionary = dictionary.setdefault(key, {})
dictionary[keys[-1]] = value
return orig | def set_dict_value(dictionary, keys, value) | Set a value in a (nested) dictionary by defining a list of keys.
.. note:: Side-effects
This function does not make a copy of dictionary, but directly
edits it.
Parameters
----------
dictionary : dict
keys : List[Any]
value : object
Returns
-------
dictionary : dict
Examples
--------
>>> d = {'a': {'b': 'c', 'd': 'e'}}
>>> expected = {'a': {'b': 'foobar', 'd': 'e'}}
>>> set_dict_value(d, ['a', 'b'], 'foobar') == expected
True | 2.610173 | 4.354228 | 0.599457 |
for key in list_:
if key not in dict_:
return False
dict_ = dict_[key]
return True | def does_keychain_exist(dict_, list_) | Check if a sequence of keys exist in a nested dictionary.
Parameters
----------
dict_ : Dict[str/int/tuple, Any]
list_ : List[str/int/tuple]
Returns
-------
keychain_exists : bool
Examples
--------
>>> d = {'a': {'b': {'c': 'd'}}}
>>> l_exists = ['a', 'b']
>>> does_keychain_exist(d, l_exists)
True
>>> l_no_existant = ['a', 'c']
>>> does_keychain_exist(d, l_no_existant)
False | 2.78348 | 4.349114 | 0.640011 |
new_list = []
for index, element in enumerate(self):
if index not in indices:
new_list.append(element)
return EList(new_list) | def remove_indices(self, indices) | Remove rows by which have the given indices.
Parameters
----------
indices : list
Returns
-------
filtered_list : EList | 3.184353 | 2.669843 | 1.192712 |
filepaths = []
for path, _, files in os.walk(root, followlinks=followlinks):
for name in files:
filepaths.append(os.path.abspath(os.path.join(path, name)))
return filepaths | def get_all_files(root, followlinks=False) | Get all files within the given root directory.
Note that this list is not ordered.
Parameters
----------
root : str
Path to a directory
followlinks : bool, optional (default: False)
Returns
-------
filepaths : list
List of absolute paths to files | 1.837462 | 2.285528 | 0.803955 |
filepath = pkg_resources.resource_filename(package_name, path)
return os.path.abspath(filepath) | def get_from_package(package_name, path) | Get the absolute path to a file in a package.
Parameters
----------
package_name : str
e.g. 'mpu'
path : str
Path within a package
Returns
-------
filepath : str | 2.988844 | 4.314631 | 0.692723 |
country_names = ['Germany',
'France',
'Indonesia',
'Ireland',
'Spain',
'Vatican']
population = [82521653, 66991000, 255461700, 4761865, 46549045, None]
population_time = [dt.datetime(2016, 12, 1),
dt.datetime(2017, 1, 1),
dt.datetime(2017, 1, 1),
None, # Ireland
dt.datetime(2017, 6, 1), # Spain
None,
]
euro = [True, True, False, True, True, True]
df = pd.DataFrame({'country': country_names,
'population': population,
'population_time': population_time,
'EUR': euro})
df = df[['country', 'population', 'population_time', 'EUR']]
return df | def example_df() | Create an example dataframe. | 3.110292 | 3.055516 | 1.017927 |
if dtype is None:
dtype = {}
print('Number of datapoints: {datapoints}'.format(datapoints=len(df)))
column_info, column_info_meta = _get_column_info(df, dtype)
if len(column_info['int']) > 0:
_describe_int(df, column_info)
if len(column_info['float']) > 0:
_describe_float(df, column_info)
if len(column_info['category']) > 0:
_describe_category(df, column_info, column_info_meta)
if len(column_info['time']) > 0:
_describe_time(df, column_info, column_info_meta)
if len(column_info['other']) > 0:
_describe_other(df, column_info, column_info_meta)
column_types = {}
for column_type, columns in column_info.items():
for column_name in columns:
if column_type == 'other':
column_type = 'str'
column_types[column_name] = column_type
return column_types | def describe(df, dtype=None) | Print a description of a Pandas dataframe.
Parameters
----------
df : Pandas.DataFrame
dtype : dict
Maps column names to types | 1.94303 | 2.069396 | 0.938936 |
if nb_classes < 1:
raise ValueError('nb_classes={}, but positive number expected'
.format(nb_classes))
one_hot = []
for index in indices:
one_hot.append([0] * nb_classes)
one_hot[-1][index] = 1
return one_hot | def indices2one_hot(indices, nb_classes) | Convert an iterable of indices to one-hot encoded list.
You might also be interested in sklearn.preprocessing.OneHotEncoder
Parameters
----------
indices : iterable
iterable of indices
nb_classes : int
Number of classes
dtype : type
Returns
-------
one_hot : list
Examples
--------
>>> indices2one_hot([0, 1, 1], 3)
[[1, 0, 0], [0, 1, 0], [0, 1, 0]]
>>> indices2one_hot([0, 1, 1], 2)
[[1, 0], [0, 1], [0, 1]] | 2.605162 | 3.433923 | 0.758655 |
indices = []
for one_hot in one_hots:
indices.append(argmax(one_hot))
return indices | def one_hot2indices(one_hots) | Convert an iterable of one-hot encoded targets to a list of indices.
Parameters
----------
one_hot : list
Returns
-------
indices : list
Examples
--------
>>> one_hot2indices([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
[0, 1, 2]
>>> one_hot2indices([[1, 0], [1, 0], [0, 1]])
[0, 0, 1] | 2.26992 | 5.296019 | 0.428609 |
divisors = {} # map number to at least one divisor
candidate = 2 # next potential prime
while True:
if candidate in divisors:
# candidate is composite. divisors[candidate] is the list of primes
# that divide it. Since we've reached candidate, we no longer need
# it in the map, but we'll mark the next multiples of its witnesses
# to prepare for larger numbers
for p in divisors[candidate]:
divisors.setdefault(p + candidate, []).append(p)
del divisors[candidate]
else:
# candidate is a new prime
yield candidate
# mark its first multiple that isn't
# already marked in previous iterations
divisors[candidate * candidate] = [candidate]
candidate += 1 | def generate_primes() | Generate an infinite sequence of prime numbers.
The algorithm was originally written by David Eppstein, UC Irvine. See:
http://code.activestate.com/recipes/117119/
Examples
--------
>>> g = generate_primes()
>>> next(g)
2
>>> next(g)
3
>>> next(g)
5 | 5.129196 | 5.981043 | 0.857575 |
if not isinstance(number, int):
raise ValueError('integer expected, but type(number)={}'
.format(type(number)))
if number < 0:
return [-1] + factorize(number * (-1))
elif number == 0:
raise ValueError('All primes are prime factors of 0.')
else:
for i in range(2, int(math_stl.ceil(number**0.5)) + 1):
if number % i == 0:
if i == number:
return [i]
else:
return [i] + factorize(int(number / i))
return [number] | def factorize(number) | Get the prime factors of an integer except for 1.
Parameters
----------
number : int
Returns
-------
primes : iterable
Examples
--------
>>> factorize(-17)
[-1, 17]
>>> factorize(8)
[2, 2, 2]
>>> factorize(3**25)
[3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3]
>>> factorize(1)
[1] | 2.976472 | 3.044546 | 0.977641 |
max_value = None
max_index = None
for index, value in enumerate(iterable):
if (max_value is None) or max_value < value:
max_value = value
max_index = index
return max_index | def argmax(iterable) | Find the first index of the biggest value in the iterable.
Parameters
----------
iterable : iterable
Returns
-------
argmax : int
Examples
--------
>>> argmax([0, 0, 0])
0
>>> argmax([1, 0, 0])
0
>>> argmax([0, 1, 0])
1
>>> argmax([]) | 1.792435 | 2.833577 | 0.63257 |
from math import floor
d = int('1' + ('0' * decimal_places))
return floor(x * d) / d | def round_down(x, decimal_places) | Round a float down to decimal_places.
Parameters
----------
x : float
decimal_places : int
Returns
-------
rounded_float : float
Examples
--------
>>> round_down(1.23456, 3)
1.234
>>> round_down(1.23456, 2)
1.23 | 4.063254 | 7.32121 | 0.554998 |
seconds += minutes * 60
seconds += hours * 60**2
seconds += days * 24 * 60**2
t14 = datetime_obj + dt.timedelta(seconds=seconds) # Invalid timezone!
t14 = t14.astimezone(pytz.utc).astimezone(t14.tzinfo) # Fix the timezone
return t14 | def add_time(datetime_obj, days=0, hours=0, minutes=0, seconds=0) | Add time to a timezone-aware datetime object.
This keeps the timezone correct, even if it changes due to daylight
saving time (DST).
Parameters
----------
datetime_obj : datetime.datetime
days : int
hours : int
minutes : int
seconds : int
Returns
-------
datetime : datetime.datetime | 3.693018 | 3.851763 | 0.958786 |
if not (minimum < maximum):
raise ValueError('{} is not smaller than {}'.format(minimum, maximum))
# Python 3 allows direct multiplication of timedelta with a float, but
# Python 2.7 does not. Hence this work-around.
time_d = maximum - minimum
time_d_float = time_d.total_seconds()
time_d_rand = dt.timedelta(seconds=time_d_float * local_random.random())
generated = minimum + time_d_rand
return generated | def generate(minimum, maximum, local_random=random.Random()) | Generate a random date.
The generated dates are uniformly distributed.
Parameters
----------
minimum : datetime object
maximum : datetime object
local_random : random.Random
Returns
-------
generated_date : datetime object
Examples
--------
>>> import random; r = random.Random(); r.seed(0)
>>> from datetime import datetime
>>> generate(datetime(2018, 1, 1), datetime(2018, 1, 2), local_random=r)
datetime.datetime(2018, 1, 1, 20, 15, 58, 47972)
>>> generate(datetime(2018, 1, 1), datetime(2018, 1, 2), local_random=r)
datetime.datetime(2018, 1, 1, 18, 11, 27, 260414) | 4.845577 | 5.294341 | 0.915237 |
root = args.root
if root is None:
root = '.'
root = os.path.abspath(root)
project_data = _get_package_data()
project_name = project_data['project_name']
directories = [os.path.join(root, 'bin'),
os.path.join(root, 'docs'),
os.path.join(root, 'tests'),
os.path.join(root, project_name),
]
for dir_path in directories:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
script_paths = [os.path.join(root, 'README.md'),
os.path.join(root, 'tests/__init__.py'),
]
for script_path in script_paths:
if not os.path.exists(script_path):
os.mknod(script_path)
copy_samples = [(resource_filename('mpu', 'package/templates/tox.ini.txt'),
os.path.join(root, 'tox.ini')),
(resource_filename('mpu',
'package/templates/setup.cfg.txt'),
os.path.join(root, 'setup.cfg')),
(resource_filename('mpu',
'package/templates/setup.py.txt'),
os.path.join(root, 'setup.py')),
(resource_filename('mpu',
'package/templates/_version.py.txt'),
os.path.join(root, project_name + '/_version.py')),
(resource_filename('mpu',
'package/templates/coveragerc.txt'),
os.path.join(root, '.coveragerc')),
(resource_filename('mpu', 'package/templates/init.py.txt'),
os.path.join(root, project_name + '/__init__.py')),
]
translate = {'[[project_name]]': project_data['project_name'],
'[[license]]': project_data['license'],
'[[author]]': project_data['author'],
'[[email]]': project_data['email'],
}
for source, destination in copy_samples:
if not os.path.exists(destination):
copyfile(source, destination)
_adjust_template(destination, translate) | def run_init(args) | Run project initialization.
This will ask the user for input.
Parameters
----------
args : argparse named arguments | 1.966381 | 2.01523 | 0.97576 |
# Create a regular expression from all of the dictionary keys
regex = re.compile("|".join(map(re.escape, search_replace_dict.keys())))
# For each match, look up the corresponding value in the dictionary
return regex.sub(lambda match: search_replace_dict[match.group(0)], text) | def _multiple_replace(text, search_replace_dict) | Replace multiple things at once in a text.
Parameters
----------
text : str
search_replace_dict : dict
Returns
-------
replaced_text : str
Examples
--------
>>> d = {'a': 'b', 'b': 'c', 'c': 'd', 'd': 'e'}
>>> _multiple_replace('abcdefghijklm', d)
'bcdeefghijklm' | 2.099457 | 2.602673 | 0.806654 |
with open(filepath, 'r') as file:
filedata = file.read()
filedata = _multiple_replace(filedata, translate)
with open(filepath, 'w') as file:
file.write(filedata) | def _adjust_template(filepath, translate) | Search and replace contents of a filepath.
Parameters
----------
filepath : str
translate : dict | 2.099565 | 2.373308 | 0.884658 |
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
if parser is None:
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
subparsers = parser.add_subparsers()
pkg_init_parser = subparsers.add_parser('init')
pkg_init_parser.add_argument("root",
nargs='?',
help="project root - should be empty")
pkg_init_parser.set_defaults(func=run_init)
return parser | def get_parser(parser=None) | Get parser for mpu. | 2.684293 | 2.626391 | 1.022046 |
import multiprocessing.pool
from contextlib import closing
with closing(multiprocessing.pool.ThreadPool(nb_threads)) as pool:
return pool.map(loop_function, parameters) | def parallel_for(loop_function, parameters, nb_threads=100) | Execute the loop body in parallel.
.. note:: Race-Conditions
Executing code in parallel can cause an error class called
"race-condition".
Parameters
----------
loop_function : Python function which takes a tuple as input
parameters : List of tuples
Each element here should be executed in parallel.
Returns
-------
return_values : list of return values | 2.552675 | 3.300194 | 0.773492 |
if lowest is not None:
number = max(number, lowest)
if highest is not None:
number = min(number, highest)
return number | def clip(number, lowest=None, highest=None) | Clip a number to a given lowest / highest value.
Parameters
----------
number : number
lowest : number, optional
highest : number, optional
Returns
-------
clipped_number : number
Examples
--------
>>> clip(42, lowest=0, highest=10)
10 | 1.802291 | 3.19831 | 0.563513 |
perm = list(range(len(lists[0])))
random.shuffle(perm)
lists = tuple([sublist[index] for index in perm]
for sublist in lists)
return lists | def consistent_shuffle(*lists) | Shuffle lists consistently.
Parameters
----------
*lists
Variable length number of lists
Returns
-------
shuffled_lists : tuple of lists
All of the lists are shuffled consistently
Examples
--------
>>> import mpu, random; random.seed(8)
>>> mpu.consistent_shuffle([1,2,3], ['a', 'b', 'c'], ['A', 'B', 'C'])
([3, 2, 1], ['c', 'b', 'a'], ['C', 'B', 'A']) | 3.614671 | 4.894814 | 0.73847 |
lat1, lon1 = origin
lat2, lon2 = destination
if not (-90.0 <= lat1 <= 90):
raise ValueError('lat1={:2.2f}, but must be in [-90,+90]'.format(lat1))
if not (-90.0 <= lat2 <= 90):
raise ValueError('lat2={:2.2f}, but must be in [-90,+90]'.format(lat2))
if not (-180.0 <= lon1 <= 180):
raise ValueError('lon1={:2.2f}, but must be in [-180,+180]'
.format(lat1))
if not (-180.0 <= lon2 <= 180):
raise ValueError('lon1={:2.2f}, but must be in [-180,+180]'
.format(lat1))
radius = 6371 # km
dlat = math_stl.radians(lat2 - lat1)
dlon = math_stl.radians(lon2 - lon1)
a = (math_stl.sin(dlat / 2) * math_stl.sin(dlat / 2) +
math_stl.cos(math_stl.radians(lat1)) *
math_stl.cos(math_stl.radians(lat2)) *
math_stl.sin(dlon / 2) * math_stl.sin(dlon / 2))
c = 2 * math_stl.atan2(math_stl.sqrt(a), math_stl.sqrt(1 - a))
d = radius * c
return d | def haversine_distance(origin, destination) | Calculate the Haversine distance.
Parameters
----------
origin : tuple of float
(lat, long)
destination : tuple of float
(lat, long)
Returns
-------
distance_in_km : float
Examples
--------
>>> munich = (48.1372, 11.5756)
>>> berlin = (52.5186, 13.4083)
>>> round(haversine_distance(munich, berlin), 1)
504.2
>>> new_york_city = (40.712777777778, -74.005833333333) # NYC
>>> round(haversine_distance(berlin, new_york_city), 1)
6385.3 | 1.437497 | 1.530596 | 0.939174 |
if not (min_value <= value <= max_value):
raise ValueError('{}={} is not in [{}, {}]'
.format(name, value, min_value, max_value)) | def is_in_intervall(value, min_value, max_value, name='variable') | Raise an exception if value is not in an interval.
Parameters
----------
value : orderable
min_value : orderable
max_value : orderable
name : str
Name of the variable to print in exception. | 2.362107 | 2.752918 | 0.858037 |
write_val = {'exception_type': str(exctype),
'message': str(traceback.format_tb(tb, 10))}
logging.exception(str(write_val)) | def exception_logging(exctype, value, tb) | Log exception by using the root logger.
Use it as `sys.excepthook = exception_logging`.
Parameters
----------
exctype : type
value : NameError
tb : traceback | 5.192241 | 7.303971 | 0.710879 |
if not (-90 <= latitude <= 90):
raise ValueError('latitude was {}, but has to be in [-90, 90]'
.format(latitude))
self._latitude = latitude | def latitude(self, latitude) | Setter for latiutde. | 3.669589 | 3.497525 | 1.049196 |
if not (-180 <= longitude <= 180):
raise ValueError('longitude was {}, but has to be in [-180, 180]'
.format(longitude))
self._longitude = longitude | def longitude(self, longitude) | Setter for longitude. | 3.326585 | 3.226046 | 1.031165 |
return haversine_distance((self.latitude, self.longitude),
(there.latitude, there.longitude)) | def distance(self, there) | Calculate the distance from this location to there.
Parameters
----------
there : Location
Returns
-------
distance_in_m : float | 4.16349 | 5.035546 | 0.82682 |
parser = get_parser()
args = parser.parse_args()
if hasattr(args, 'func') and args.func:
args.func(args)
else:
parser.print_help() | def main() | Command line interface of mpu. | 2.073219 | 2.031374 | 1.0206 |
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('--version',
action='version',
version='mpu {}'.format(mpu.__version__))
subparsers = parser.add_subparsers(help='Python package commands')
package_parser = subparsers.add_parser('package')
mpu.package.cli.get_parser(package_parser)
return parser | def get_parser() | Get parser for mpu. | 2.631003 | 2.22035 | 1.18495 |
if filepath.lower().endswith('.csv'):
return _read_csv(filepath, kwargs)
elif filepath.lower().endswith('.json'):
with open(filepath) as data_file:
data = json.load(data_file, **kwargs)
return data
elif filepath.lower().endswith('.jsonl'):
return _read_jsonl(filepath, kwargs)
elif filepath.lower().endswith('.pickle'):
with open(filepath, 'rb') as handle:
data = pickle.load(handle)
return data
elif (filepath.lower().endswith('.yml') or
filepath.lower().endswith('.yaml')):
raise NotImplementedError('YAML is not supported, because you need '
'PyYAML in Python3. '
'See '
'https://stackoverflow.com/a/42054860/562769'
' as a guide how to use it.')
elif (filepath.lower().endswith('.h5') or
filepath.lower().endswith('.hdf5')):
raise NotImplementedError('HDF5 is not supported. See '
'https://stackoverflow.com/a/41586571/562769'
' as a guide how to use it.')
else:
raise NotImplementedError('File \'{}\' is not known.'.format(filepath)) | def read(filepath, **kwargs) | Read a file.
Supported formats:
* CSV
* JSON, JSONL
* pickle
Parameters
----------
filepath : str
Path to the file that should be read. This methods action depends
mainly on the file extension.
kwargs : dict
Any keywords for the specific file format. For CSV, this is
'delimiter', 'quotechar', 'skiprows', 'format'
Returns
-------
data : str or bytes | 2.520237 | 2.535662 | 0.993917 |
if 'delimiter' not in kwargs:
kwargs['delimiter'] = ','
if 'quotechar' not in kwargs:
kwargs['quotechar'] = '"'
if 'skiprows' not in kwargs:
kwargs['skiprows'] = []
if isinstance(kwargs['skiprows'], int):
kwargs['skiprows'] = [i for i in range(kwargs['skiprows'])]
if 'format' in kwargs:
format_ = kwargs['format']
kwargs.pop('format', None)
else:
format_ = 'default'
skiprows = kwargs['skiprows']
kwargs.pop('skiprows', None)
kwargs_open = {'newline': ''}
mode = 'r'
if sys.version_info < (3, 0):
kwargs_open.pop('newline', None)
mode = 'rb'
with open(filepath, mode, **kwargs_open) as fp:
if format_ == 'default':
reader = csv.reader(fp, **kwargs)
data = EList([row for row in reader])
data = data.remove_indices(skiprows)
elif format_ == 'dicts':
reader_list = csv.DictReader(fp, **kwargs)
data = [row for row in reader_list]
else:
raise NotImplementedError('Format \'{}\' unknown'
.format(format_))
return data | def _read_csv(filepath, kwargs) | See documentation of mpu.io.read. | 2.173511 | 2.160272 | 1.006128 |
with open(filepath) as data_file:
data = [json.loads(line, **kwargs)
for line in data_file
if len(line) > 0]
return data | def _read_jsonl(filepath, kwargs) | See documentation of mpu.io.read. | 2.897425 | 2.738597 | 1.057996 |
if filepath.lower().endswith('.csv'):
return _write_csv(filepath, data, kwargs)
elif filepath.lower().endswith('.json'):
return _write_json(filepath, data, kwargs)
elif filepath.lower().endswith('.jsonl'):
return _write_jsonl(filepath, data, kwargs)
elif filepath.lower().endswith('.pickle'):
return _write_pickle(filepath, data, kwargs)
elif (filepath.lower().endswith('.yml') or
filepath.lower().endswith('.yaml')):
raise NotImplementedError('YAML is not supported, because you need '
'PyYAML in Python3. '
'See '
'https://stackoverflow.com/a/42054860/562769'
' as a guide how to use it.')
elif (filepath.lower().endswith('.h5') or
filepath.lower().endswith('.hdf5')):
raise NotImplementedError('YAML is not supported. See '
'https://stackoverflow.com/a/41586571/562769'
' as a guide how to use it.')
else:
raise NotImplementedError('File \'{}\' is not known.'.format(filepath)) | def write(filepath, data, **kwargs) | Write a file.
Supported formats:
* CSV
* JSON, JSONL
* pickle
Parameters
----------
filepath : str
Path to the file that should be read. This methods action depends
mainly on the file extension.
data : dict or list
Content that should be written
kwargs : dict
Any keywords for the specific file format.
Returns
-------
data : str or bytes | 2.586619 | 2.558618 | 1.010944 |
kwargs_open = {'newline': ''}
mode = 'w'
if sys.version_info < (3, 0):
kwargs_open.pop('newline', None)
mode = 'wb'
with open(filepath, mode, **kwargs_open) as fp:
if 'delimiter' not in kwargs:
kwargs['delimiter'] = ','
if 'quotechar' not in kwargs:
kwargs['quotechar'] = '"'
with open(filepath, 'w') as fp:
writer = csv.writer(fp, **kwargs)
writer.writerows(data)
return data | def _write_csv(filepath, data, kwargs) | See documentation of mpu.io.write. | 2.101646 | 2.101936 | 0.999862 |
with io_stl.open(filepath, 'w', encoding='utf8') as outfile:
if 'indent' not in kwargs:
kwargs['indent'] = 4
if 'sort_keys' not in kwargs:
kwargs['sort_keys'] = True
if 'separators' not in kwargs:
kwargs['separators'] = (',', ': ')
if 'ensure_ascii' not in kwargs:
kwargs['ensure_ascii'] = False
str_ = json.dumps(data, **kwargs)
outfile.write(to_unicode(str_))
return data | def _write_json(filepath, data, kwargs) | See documentation of mpu.io.write. | 2.147091 | 2.154235 | 0.996684 |
with io_stl.open(filepath, 'w', encoding='utf8') as outfile:
kwargs['indent'] = None # JSON has to be on one line!
if 'sort_keys' not in kwargs:
kwargs['sort_keys'] = True
if 'separators' not in kwargs:
kwargs['separators'] = (',', ': ')
if 'ensure_ascii' not in kwargs:
kwargs['ensure_ascii'] = False
for line in data:
str_ = json.dumps(line, **kwargs)
outfile.write(to_unicode(str_))
outfile.write(u'\n')
return data | def _write_jsonl(filepath, data, kwargs) | See documentation of mpu.io.write. | 2.721669 | 2.730095 | 0.996914 |
if 'protocol' not in kwargs:
kwargs['protocol'] = pickle.HIGHEST_PROTOCOL
with open(filepath, 'wb') as handle:
pickle.dump(data, handle, **kwargs)
return data | def _write_pickle(filepath, data, kwargs) | See documentation of mpu.io.write. | 2.068873 | 2.130371 | 0.971133 |
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
response = urlopen(url)
content = response.read()
content = content.decode(encoding)
return content | def urlread(url, encoding='utf8') | Read the content of an URL.
Parameters
----------
url : str
Returns
-------
content : str | 2.050627 | 2.318613 | 0.88442 |
try:
from urllib.request import urlretrieve # Python 3
except ImportError:
from urllib import urlretrieve # Python 2
if sink is None:
sink = os.path.abspath(os.path.split(source)[1])
urlretrieve(source, sink)
return sink | def download(source, sink=None) | Download a file.
Parameters
----------
source : str
Where the file comes from. Some URL.
sink : str or None (default: same filename in current directory)
Where the file gets stored. Some filepath in the local file system. | 2.14113 | 2.373952 | 0.901926 |
if method == 'sha1':
hash_function = hashlib.sha1()
elif method == 'md5':
hash_function = hashlib.md5()
else:
raise NotImplementedError('Only md5 and sha1 hashes are known, but '
' \'{}\' was specified.'.format(method))
with open(filepath, 'rb') as fp:
while True:
data = fp.read(buffer_size)
if not data:
break
hash_function.update(data)
return hash_function.hexdigest() | def hash(filepath, method='sha1', buffer_size=65536) | Calculate a hash of a local file.
Parameters
----------
filepath : str
method : {'sha1', 'md5'}
buffer_size : int, optional (default: 65536 byte = 64 KiB)
in byte
Returns
-------
hash : str | 1.877203 | 2.072359 | 0.905829 |
if platform.system() == 'Windows':
return datetime.fromtimestamp(os.path.getctime(filepath))
else:
stat = os.stat(filepath)
try:
return datetime.fromtimestamp(stat.st_birthtime)
except AttributeError:
# We're probably on Linux. No easy way to get creation dates here,
# so we'll settle for when its content was last modified.
return None | def get_creation_datetime(filepath) | Get the date that a file was created.
Parameters
----------
filepath : str
Returns
-------
creation_datetime : datetime.datetime or None | 1.52872 | 1.79335 | 0.852439 |
import tzlocal
timezone = tzlocal.get_localzone()
mtime = datetime.fromtimestamp(os.path.getmtime(filepath))
return mtime.replace(tzinfo=timezone) | def get_modification_datetime(filepath) | Get the datetime that a file was last modified.
Parameters
----------
filepath : str
Returns
-------
modification_datetime : datetime.datetime | 2.645772 | 3.069675 | 0.861906 |
import tzlocal
tz = tzlocal.get_localzone()
mtime = datetime.fromtimestamp(os.path.getatime(filepath))
return mtime.replace(tzinfo=tz) | def get_access_datetime(filepath) | Get the last time filepath was accessed.
Parameters
----------
filepath : str
Returns
-------
access_datetime : datetime.datetime | 2.833079 | 3.388037 | 0.836201 |
meta = {}
meta['filepath'] = os.path.abspath(filepath)
meta['creation_datetime'] = get_creation_datetime(filepath)
meta['last_access_datetime'] = get_access_datetime(filepath)
meta['modification_datetime'] = get_modification_datetime(filepath)
try:
import magic
f_mime = magic.Magic(mime=True, uncompress=True)
f_other = magic.Magic(mime=False, uncompress=True)
meta['mime'] = f_mime.from_file(meta['filepath'])
meta['magic-type'] = f_other.from_file(meta['filepath'])
except ImportError:
pass
return meta | def get_file_meta(filepath) | Get meta-information about a file.
Parameters
----------
filepath : str
Returns
-------
meta : dict | 2.168929 | 2.505523 | 0.865659 |
import gzip
with open(source, 'rb') as f_in, gzip.open(sink, 'wb') as f_out:
f_out.writelines(f_in) | def gzip_file(source, sink) | Create a GZIP file from a source file.
Parameters
----------
source : str
Filepath
sink : str
Filepath | 1.962102 | 2.101796 | 0.933536 |
self._patcher = mock.patch(target=self.target)
MockClient = self._patcher.start()
instance = MockClient.return_value
instance.model.side_effect = mock.Mock(
side_effect=self.model
) | def start(self) | Start the patch | 5.384172 | 5.200429 | 1.035332 |
cls.client_id = client_id
cls.client_secret = client_secret | def setup(cls, client_id, client_secret) | Configure client in session | 2.144197 | 2.079728 | 1.030999 |
if wok:
if not mapping:
mapping = WOK_TAG_KEY_MAPPING
return Wok(filelines, mapping).parse()
else:
if not mapping:
mapping = TAG_KEY_MAPPING
return Ris(filelines, mapping).parse() | def read(filelines, mapping=None, wok=False) | Parse a ris lines and return a list of entries.
Entries are codified as dictionaries whose keys are the
different tags. For single line and singly occurring tags,
the content is codified as a string. In the case of multiline
or multiple key occurrences, the content is returned as a list
of strings.
Keyword arguments:
bibliography_file -- ris filehandle
mapping -- custom RIS tags mapping
wok -- flag, Web of Knowledge format is used if True, otherwise
Refman's RIS specifications are used. | 3.718309 | 3.071368 | 1.210636 |
User = self.model('res.user')
self.context = User.get_preferences(True)
return self.context | def refresh_context(self) | Get the default context of the user and save it | 18.089108 | 12.578699 | 1.438075 |
rv = self.session.post(
self.host,
dumps({
"method": "common.db.login",
"params": [login, password]
}),
)
rv = loads(rv.content)['result']
if set_auth:
self.set_auth(
SessionAuth(login, *rv)
)
return rv | def login(self, login, password, set_auth=False) | Attempts a login to the remote server
and on success returns user id and session
or None
Warning: Do not depend on this. This will be deprecated
with SSO.
param set_auth: sets the authentication on the client | 5.036927 | 5.350023 | 0.941478 |
"Return true if the auth is not expired, else false"
model = self.model('ir.model')
try:
model.search([], None, 1, None)
except ClientError as err:
if err and err.message['code'] == 403:
return False
raise
except Exception:
raise
else:
return True | def is_auth_alive(self) | Return true if the auth is not expired, else false | 7.184114 | 5.892263 | 1.219245 |
if data is None:
data = {}
data.update(kwargs)
return self.model.write([self.id], data) | def update(self, data=None, **kwargs) | Update the record right away.
:param data: dictionary of changes
:param kwargs: possibly a list of keyword args to change | 4.933315 | 6.312463 | 0.78152 |
if context is None:
context = {}
if limit is None:
# When no limit is specified, all the records
# should be fetched.
record_count = self.search_count(domain, context=context)
end = record_count + offset
else:
end = limit + offset
for page_offset in range(offset, end, batch_size):
if page_offset + batch_size > end:
batch_size = end - page_offset
for record in self.search_read(
domain, page_offset, batch_size,
order, fields, context=context):
yield record | def search_read_all(self, domain, order, fields, batch_size=500,
context=None, offset=0, limit=None) | An endless iterator that iterates over records.
:param domain: A search domain
:param order: The order clause for search read
:param fields: The fields argument for search_read
:param batch_size: The optimal batch size when sending paginated
requests | 2.595179 | 2.883883 | 0.89989 |
if filter is None:
filter = []
rv = self.client.session.get(
self.path,
params={
'filter': dumps(filter or []),
'page': page,
'per_page': per_page,
'field': fields,
'context': dumps(context or self.client.context),
}
)
response_received.send(rv)
return rv | def find(self, filter=None, page=1, per_page=10, fields=None, context=None) | Find records that match the filter.
Pro Tip: The fields could have nested fields names if the field is
a relationship type. For example if you were looking up an order
and also want to get the shipping address country then fields would be:
`['shipment_address', 'shipment_address.country']`
but country in this case is the ID of the country which is not very
useful if you don't already have a map. You can fetch the country code
by adding `'shipment_address.country.code'` to the fields.
:param filter: A domain expression (Refer docs for domain syntax)
:param page: The page to fetch to get paginated results
:param per_page: The number of records to fetch per page
:param fields: A list of field names to fetch.
:param context: Any overrides to the context. | 3.222471 | 4.102113 | 0.785564 |
Attachment = self.client.model('ir.attachment')
return Attachment.add_attachment_from_url(
filename, url, '%s,%s' % (self.model_name, id)
) | def attach(self, id, filename, url) | Add an attachmemt to record from url
:param id: ID of record
:param filename: File name of attachment
:param url: Public url to download file from. | 5.869438 | 8.42227 | 0.696895 |
if self.state in (self.PENDING, self.STARTED):
try:
response, = self._fetch_result()['tasks']
except (KeyError, ValueError):
raise Exception(
"Unable to find results for task."
)
if 'error' in response:
self.state == self.FAILURE
raise ServerError(response['error'])
if 'state' in response:
self.state = response['state']
self.result = response['result'] | def refresh_if_needed(self) | Refresh the status of the task from server if required. | 4.828231 | 4.157265 | 1.161396 |
Product = client.model('product.product')
return Product.get_product_inventory(
[product_id], warehouse_ids
)[product_id] | def get_product_inventory(product_id, warehouse_ids) | Return the product inventory in each location. The returned response
will look like::
{
12: { // Product ID
4: { // Location ID
'quantity_on_hand': 12.0,
'quantity_available': 8.0
},
5: { // Location ID
'quantity_on_hand': 8.0,
'quantity_available': 8.0
},
},
126: { // Product ID
4: { // Location ID
'quantity_on_hand': 16.0,
'quantity_available': 15.0
},
5: { // Location ID
'quantity_on_hand': 9.0,
'quantity_available': 8.0
},
}
}
Read more:
http://docs.fulfiliorestapi.apiary.io/#reference/product/product-inventory | 6.731622 | 9.407041 | 0.715594 |
Party = client.model('party.party')
results = Party.find([('code', '=', code)])
if results:
return results[0]['id'] | def get_customer(code) | Fetch a customer with the code.
Returns None if the customer is not found. | 8.280164 | 8.426366 | 0.982649 |
Address = client.model('party.address')
addresses = Address.find(
[('party', '=', customer_id)],
fields=[
'name', 'street', 'street_bis', 'city', 'zip',
'subdivision.code', 'country.code'
]
)
for address in addresses:
if (
address['name'] == data['name'] and
address['street'] == data['street'] and
address['street_bis'] == data['street_bis'] and
address['city'] == data['city'] and
address['zip'] == data['zip'] and
address['subdivision.code'].endswith(data['state']) and
address['country.code'] == data['country']):
return address['id'] | def get_address(customer_id, data) | Easier to fetch the addresses of customer and then check one by one.
You can get fancy by using some validation mechanism too | 2.712492 | 2.693337 | 1.007112 |
Address = client.model('party.address')
Country = client.model('country.country')
Subdivision = client.model('country.subdivision')
country, = Country.find([('code', '=', data['country'])])
state, = Subdivision.find([
('code', 'ilike', '%-' + data['state']), # state codes are US-CA, IN-KL
('country', '=', country['id'])
])
address, = Address.create([{
'party': customer_id,
'name': data['name'],
'street': data['street'],
'street_bis': data['street_bis'],
'city': data['city'],
'zip': data['zip'],
'country': country['id'],
'subdivision': state['id'],
}])
return address['id'] | def create_address(customer_id, data) | Create an address and return the id | 3.163781 | 3.077043 | 1.028189 |
Party = client.model('party.party')
ContactMechanism = client.model('party.contact_mechanism')
party, = Party.create([{'name': name}])
# Bulk create the email and phone
ContactMechanism.create([
{'type': 'email', 'value': email, 'party': party},
{'type': 'phone', 'value': phone, 'party': party},
])
return party | def create_customer(name, email, phone) | Create a customer with the name.
Then attach the email and phone as contact methods | 3.631957 | 3.527174 | 1.029707 |
SaleOrder = client.model('sale.sale')
SaleOrderLine = client.model('sale.line')
# Check if customer exists, if not create one
customer_id = get_customer(order['customer']['code'])
if not customer_id:
customer_id = create_customer(
order['customer']['name'],
order['customer']['email'],
order['customer']['phone'],
)
# No check if there is a matching address
invoice_address = get_address(
customer_id,
order['invoice_address']
)
if not invoice_address:
invoice_address = create_address(
customer_id,
order['invoice_address']
)
# See if the shipping address exists, if not create it
shipment_address = get_address(
customer_id,
order['shipment_address']
)
if not shipment_address:
shipment_address = create_address(
customer_id,
order['shipment_address']
)
sale_order_id, = SaleOrder.create([{
'reference': order['number'],
'sale_date': order['date'],
'party': customer_id,
'invoice_address': invoice_address,
'shipment_address': shipment_address,
}])
# fetch inventory of all the products before we create lines
warehouses = get_warehouses()
warehouse_ids = [warehouse['id'] for warehouse in warehouses]
lines = []
for item in order['items']:
# get the product. We assume ti already exists.
product = get_product(item['product'])
# find the first location that has inventory
product_inventory = get_product_inventory(product, warehouse_ids)
for location, quantities in product_inventory.items():
if quantities['quantity_available'] >= item['quantity']:
break
lines.append({
'sale': sale_order_id,
'product': product,
'quantity': item['quantity'],
'unit_price': item['unit_price'],
'warehouse': location,
})
SaleOrderLine.create(lines)
SaleOrder.quote([sale_order_id])
SaleOrder.confirm([sale_order_id]) | def create_order(order) | Create an order on fulfil from order_details.
See the calling function below for an example of the order_details | 2.600979 | 2.59484 | 1.002366 |
return type(
'BaseModel',
(Model,),
{
'fulfil_client': fulfil_client,
'cache_backend': cache_backend,
'cache_expire': cache_expire,
'__abstract__': True,
'__modelregistry__': {},
},
) | def model_base(fulfil_client, cache_backend=None, cache_expire=10 * 60) | Return a Base Model class that binds to the fulfil client instance and
the cache instance.
This design is inspired by the declarative base pattern in SQL Alchemy. | 2.755136 | 2.859581 | 0.963476 |
return self.rpc_model.search_read_all(
self.domain,
self._order_by,
self.fields,
context=self.context,
offset=self._offset or 0,
limit=self._limit,
) | def all(self) | Return the results represented by this Query as a list.
.. versionchanged:: 0.10.0
Returns an iterator that lazily loads
records instead of fetching thousands
of records at once. | 6.77135 | 6.444405 | 1.050733 |
"Return a count of rows this Query would return."
return self.rpc_model.search_count(
self.domain, context=self.context
) | def count(self) | Return a count of rows this Query would return. | 20.616631 | 9.787149 | 2.1065 |
return self.rpc_model.search_count(
self.domain, context=self.context
) > 0 | def exists(self) | A convenience method that returns True if a record
satisfying the query exists | 15.028821 | 15.382018 | 0.977038 |
query = self._copy()
query.active_only = state
return query | def show_active_only(self, state) | Set active only to true or false on a copy of this query | 9.875261 | 4.674894 | 2.112403 |
query = self._copy()
for field, value in kwargs.items():
query.domain.append(
(field, '=', value)
)
return query | def filter_by(self, **kwargs) | Apply the given filtering criterion to a copy of this Query, using
keyword expressions. | 5.583456 | 4.953361 | 1.127205 |
query = self._copy()
query.domain = domain
return query | def filter_by_domain(self, domain) | Apply the given domain to a copy of this query | 9.372087 | 5.030948 | 1.862887 |
results = self.rpc_model.search_read(
self.domain, None, 1, self._order_by, self.fields,
context=self.context
)
return results and results[0] or None | def first(self) | Return the first result of this Query or None if the result
doesn't contain any row. | 8.060748 | 6.5496 | 1.230724 |
ctx = self.context.copy()
ctx['active_test'] = False
results = self.rpc_model.search_read(
[('id', '=', id)],
None, None, None, self.fields,
context=ctx
)
return results and results[0] or None | def get(self, id) | Return an instance based on the given primary key identifier,
or None if not found.
This returns a record whether active or not. | 6.288485 | 4.742527 | 1.325978 |
query = self._copy()
query._limit = limit
return query | def limit(self, limit) | Apply a LIMIT to the query and return the newly resulting Query. | 9.052152 | 4.583931 | 1.974757 |
query = self._copy()
query._offset = offset
return query | def offset(self, offset) | Apply an OFFSET to the query and return the newly resulting Query. | 10.100432 | 4.476629 | 2.256259 |
results = self.rpc_model.search_read(
self.domain, 2, None, self._order_by, self.fields,
context=self.context
)
if not results:
raise fulfil_client.exc.NoResultFound
if len(results) > 1:
raise fulfil_client.exc.MultipleResultsFound
return results[0] | def one(self) | Return exactly one result or raise an exception.
Raises fulfil_client.exc.NoResultFound if the query selects no rows.
Raises fulfil_client.exc.MultipleResultsFound if multiple rows are
found. | 5.977725 | 3.861521 | 1.548023 |
query = self._copy()
query._order_by = criterion
return query | def order_by(self, *criterion) | apply one or more ORDER BY criterion to the query and
return the newly resulting Query
All existing ORDER BY settings can be suppressed by passing None -
this will suppress any ORDER BY configured on mappers as well. | 5.696944 | 8.260416 | 0.689668 |
ids = self.rpc_model.search(self.domain, context=self.context)
if ids:
self.rpc_model.delete(ids) | def delete(self) | Delete all records matching the query.
Warning: This is a desctructive operation.
Not every model allows deletion of records and several models
even restrict based on status. For example, deleting products
that have been transacted is restricted. Another example is sales
orders which can be deleted only when they are draft.
If deletion fails, a server error is thrown. | 6.720006 | 7.025667 | 0.956494 |
ids = self.rpc_model.search(self.domain, context=self.context)
if ids:
self.rpc_model.write(ids, {'active': False}) | def archive(self) | Archives (soft delete) all the records matching the query.
This assumes that the model allows archiving (not many do - especially
transactional documents).
Internal implementation sets the active field to False. | 7.400098 | 5.546527 | 1.334186 |
if re.match(r'^:(testserver\.local|tmi\.twitch\.tv)'
r' NOTICE \* :'
r'(Login unsuccessful|Error logging in)*$',
data.strip()):
return False
else:
return True | def _logged_in_successful(data) | Test the login status from the returned communication of the
server.
:param data: bytes received from server during login
:type data: list of bytes
:return boolean, True when you are logged in. | 12.464607 | 12.565342 | 0.991983 |
# Do not use non-blocking stream, they are not reliably
# non-blocking
# s.setblocking(False)
# s.settimeout(1.0)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connect_host = "irc.twitch.tv"
connect_port = 6667
try:
s.connect((connect_host, connect_port))
except (Exception, IOError):
print("Unable to create a socket to %s:%s" % (
connect_host,
connect_port))
raise # unexpected, because it is a blocking socket
# Connected to twitch
# Sending our details to twitch...
s.send(('PASS %s\r\n' % self.oauth).encode('utf-8'))
s.send(('NICK %s\r\n' % self.username).encode('utf-8'))
if self.verbose:
print('PASS %s\r\n' % self.oauth)
print('NICK %s\r\n' % self.username)
received = s.recv(1024).decode()
if self.verbose:
print(received)
if not TwitchChatStream._logged_in_successful(received):
# ... and they didn't accept our details
raise IOError("Twitch did not accept the username-oauth "
"combination")
else:
# ... and they accepted our details
# Connected to twitch.tv!
# now make this socket non-blocking on the OS-level
fcntl.fcntl(s, fcntl.F_SETFL, os.O_NONBLOCK)
if self.s is not None:
self.s.close() # close the previous socket
self.s = s # store the new socket
self.join_channel(self.username)
# Wait until we have switched channels
while self.current_channel != self.username:
self.twitch_receive_messages() | def connect(self) | Connect to Twitch | 3.852637 | 3.750753 | 1.027164 |
if len(self.buffer) > 0:
if time.time() - self.last_sent_time > 5:
try:
message = self.buffer.pop(0)
self.s.send(message.encode('utf-8'))
if self.verbose:
print(message)
finally:
self.last_sent_time = time.time() | def _push_from_buffer(self) | Push a message on the stack to the IRC stream.
This is necessary to avoid Twitch overflow control. | 2.548998 | 2.382745 | 1.069774 |
self.s.send(('JOIN #%s\r\n' % channel).encode('utf-8'))
if self.verbose:
print('JOIN #%s\r\n' % channel) | def join_channel(self, channel) | Join a different chat channel on Twitch.
Note, this function returns immediately, but the switch might
take a moment
:param channel: name of the channel (without #) | 3.326267 | 3.707543 | 0.897162 |
if TwitchChatStream._check_has_ping(data):
self._send_pong()
if TwitchChatStream._check_has_channel(data):
self.current_channel = \
TwitchChatStream._check_has_channel(data)[0]
if TwitchChatStream._check_has_message(data):
return {
'channel': re.findall(r'^:.+![a-zA-Z0-9_]+'
r'@[a-zA-Z0-9_]+'
r'.+ '
r'PRIVMSG (.*?) :',
data)[0],
'username': re.findall(r'^:([a-zA-Z0-9_]+)!', data)[0],
'message': re.findall(r'PRIVMSG #[a-zA-Z0-9_]+ :(.+)',
data)[0].decode('utf8')
}
else:
return None | def _parse_message(self, data) | Parse the bytes received from the socket.
:param data: the bytes received from the socket
:return: | 3.092165 | 3.078804 | 1.004339 |
self._push_from_buffer()
result = []
while True:
# process the complete buffer, until no data is left no more
try:
msg = self.s.recv(4096).decode() # NON-BLOCKING RECEIVE!
except socket.error as e:
err = e.args[0]
if err == errno.EAGAIN or err == errno.EWOULDBLOCK:
# There is no more data available to read
return result
else:
# a "real" error occurred
# import traceback
# import sys
# print(traceback.format_exc())
# print("Trying to recover...")
self.connect()
return result
else:
if self.verbose:
print(msg)
rec = [self._parse_message(line)
for line in filter(None, msg.split('\r\n'))]
rec = [r for r in rec if r] # remove Nones
result.extend(rec) | def twitch_receive_messages(self) | Call this function to process everything received by the socket
This needs to be called frequently enough (~10s) Twitch logs off
users not replying to ping commands.
:return: list of chat messages received. Each message is a dict
with the keys ['channel', 'username', 'message'] | 4.225198 | 4.118159 | 1.025992 |
if self.ffmpeg_process is not None:
# Close the previous stream
try:
self.ffmpeg_process.send_signal(signal.SIGINT)
except OSError:
pass
command = []
command.extend([
self.ffmpeg_binary,
'-loglevel', 'verbose',
'-y', # overwrite previous file/stream
# '-re', # native frame-rate
'-analyzeduration', '1',
'-f', 'rawvideo',
'-r', '%d' % self.fps, # set a fixed frame rate
'-vcodec', 'rawvideo',
# size of one frame
'-s', '%dx%d' % (self.width, self.height),
'-pix_fmt', 'rgb24', # The input are raw bytes
'-thread_queue_size', '1024',
'-i', '/tmp/videopipe', # The input comes from a pipe
# Twitch needs to receive sound in their streams!
# '-an', # Tells FFMPEG not to expect any audio
])
if self.audio_enabled:
command.extend([
'-ar', '%d' % AUDIORATE,
'-ac', '2',
'-f', 's16le',
'-thread_queue_size', '1024',
'-i', '/tmp/audiopipe'
])
else:
command.extend([
'-ar', '8000',
'-ac', '1',
'-f', 's16le',
'-i', '/dev/zero', # silence alternative, works forever
# '-i','http://stream1.radiostyle.ru:8001/tunguska',
# '-filter_complex',
# '[0:1][1:0]amix=inputs=2:duration=first[all_audio]'
])
command.extend([
# VIDEO CODEC PARAMETERS
'-vcodec', 'libx264',
'-r', '%d' % self.fps,
'-b:v', '3000k',
'-s', '%dx%d' % (self.width, self.height),
'-preset', 'faster', '-tune', 'zerolatency',
'-crf', '23',
'-pix_fmt', 'yuv420p',
# '-force_key_frames', r'expr:gte(t,n_forced*2)',
'-minrate', '3000k', '-maxrate', '3000k',
'-bufsize', '12000k',
'-g', '60', # key frame distance
'-keyint_min', '1',
# '-filter:v "setpts=0.25*PTS"'
# '-vsync','passthrough',
# AUDIO CODEC PARAMETERS
'-acodec', 'libmp3lame', '-ar', '44100', '-b:a', '160k',
# '-bufsize', '8192k',
'-ac', '1',
# '-acodec', 'aac', '-strict', 'experimental',
# '-ab', '128k', '-ar', '44100', '-ac', '1',
# '-async','44100',
# '-filter_complex', 'asplit', #for audio sync?
# STORE THE VIDEO PARAMETERS
# '-vcodec', 'libx264', '-s', '%dx%d'%(width, height),
# '-preset', 'libx264-fast',
# 'my_output_videofile2.avi'
# MAP THE STREAMS
# use only video from first input and only audio from second
'-map', '0:v', '-map', '1:a',
# NUMBER OF THREADS
'-threads', '2',
# STREAM TO TWITCH
'-f', 'flv', 'rtmp://live-ams.twitch.tv/app/%s' %
self.twitch_stream_key
])
devnullpipe = open("/dev/null", "w") # Throw away stream
if self.verbose:
devnullpipe = None
self.ffmpeg_process = subprocess.Popen(
command,
stdin=subprocess.PIPE,
stderr=devnullpipe,
stdout=devnullpipe) | def reset(self) | Reset the videostream by restarting ffmpeg | 3.51454 | 3.443132 | 1.020739 |
if self.video_pipe is None:
if not os.path.exists('/tmp/videopipe'):
os.mkfifo('/tmp/videopipe')
self.video_pipe = os.open('/tmp/videopipe', os.O_WRONLY)
assert frame.shape == (self.height, self.width, 3)
frame = np.clip(255*frame, 0, 255).astype('uint8')
try:
os.write(self.video_pipe, frame.tostring())
except OSError:
# The pipe has been closed. Reraise and handle it further
# downstream
raise | def send_video_frame(self, frame) | Send frame of shape (height, width, 3)
with values between 0 and 1.
Raises an OSError when the stream is closed.
:param frame: array containing the frame.
:type frame: numpy array with shape (height, width, 3)
containing values between 0.0 and 1.0 | 2.700372 | 2.780614 | 0.971142 |
if self.audio_pipe is None:
if not os.path.exists('/tmp/audiopipe'):
os.mkfifo('/tmp/audiopipe')
self.audio_pipe = os.open('/tmp/audiopipe', os.O_WRONLY)
assert len(left_channel.shape) == 1
assert left_channel.shape == right_channel.shape
frame = np.column_stack((left_channel, right_channel)).flatten()
frame = np.clip(32767*frame, -32767, 32767).astype('int16')
try:
os.write(self.audio_pipe, frame.tostring())
except OSError:
# The pipe has been closed. Reraise and handle it further
# downstream
raise | def send_audio(self, left_channel, right_channel) | Add the audio samples to the stream. The left and the right
channel should have the same shape.
Raises an OSError when the stream is closed.
:param left_channel: array containing the audio signal.
:type left_channel: numpy array with shape (k, )
containing values between -1.0 and 1.0. k can be any integer
:param right_channel: array containing the audio signal.
:type right_channel: numpy array with shape (k, )
containing values between -1.0 and 1.0. k can be any integer | 2.710549 | 2.832289 | 0.957017 |
self.lastaudioframe_left = left_channel
self.lastaudioframe_right = right_channel | def send_audio(self, left_channel, right_channel) | Add the audio samples to the stream. The left and the right
channel should have the same shape.
:param left_channel: array containing the audio signal.
:type left_channel: numpy array with shape (k, )
containing values between -1.0 and 1.0. k can be any integer
:param right_channel: array containing the audio signal.
:type right_channel: numpy array with shape (k, )
containing values between -1.0 and 1.0. k can be any integer | 4.724984 | 5.027244 | 0.939876 |
if frame_counter is None:
frame_counter = self.frame_counter
self.frame_counter += 1
self.q_video.put((frame_counter, frame)) | def send_video_frame(self, frame, frame_counter=None) | send frame of shape (height, width, 3)
with values between 0 and 1
:param frame: array containing the frame.
:type frame: numpy array with shape (height, width, 3)
containing values between 0.0 and 1.0
:param frame_counter: frame position number within stream.
Provide this when multi-threading to make sure frames don't
switch position
:type frame_counter: int | 2.542059 | 2.927917 | 0.868214 |
if frame_counter is None:
frame_counter = self.audio_frame_counter
self.audio_frame_counter += 1
self.q_audio.put((frame_counter, left_channel, right_channel)) | def send_audio(self,
left_channel,
right_channel,
frame_counter=None) | Add the audio samples to the stream. The left and the right
channel should have the same shape.
:param left_channel: array containing the audio signal.
:type left_channel: numpy array with shape (k, )
containing values between -1.0 and 1.0. l can be any integer
:param right_channel: array containing the audio signal.
:type right_channel: numpy array with shape (k, )
containing values between -1.0 and 1.0. l can be any integer
:param frame_counter: frame position number within stream.
Provide this when multi-threading to make sure frames don't
switch position
:type frame_counter: int | 2.466475 | 3.042498 | 0.810674 |
module = '.'.join(path.split('.')[:-1])
function = path.split('.')[-1]
module = importlib.import_module(module)
return getattr(module, function) | def import_attribute(self, path) | Import an attribute from a module. | 2.498594 | 2.414104 | 1.034998 |
if 'next' in request.session:
next = request.session['next']
del request.session['next']
elif 'next' in request.GET:
next = request.GET.get('next')
elif 'next' in request.POST:
next = request.POST.get('next')
else:
next = getattr(settings, 'LOGIN_REDIRECT_URL', '/')
netloc = urlparse.urlparse(next)[1]
if netloc and netloc != request.get_host():
next = getattr(settings, 'LOGIN_REDIRECT_URL', '/')
return next | def get_next(self, request) | Returns a url to redirect to after the login / signup. | 1.967011 | 1.893325 | 1.038919 |
inactive_url = getattr(settings, 'LOGIN_INACTIVE_REDIRECT_URL', '')
if inactive_url:
return HttpResponseRedirect(inactive_url)
else:
return self.error_to_response(request, {'error': _("This user account is marked as inactive.")}) | def inactive_response(self, request) | Return an inactive message. | 4.043611 | 4.022148 | 1.005336 |
profile = self.get_model()(user=user, **kwargs)
if save:
profile.save()
return profile | def create_profile(self, user, save=False, **kwargs) | Create a profile model.
:param user: A user object
:param save: If this is set, the profile will
be saved to DB straight away
:type save: bool | 2.830669 | 4.394203 | 0.644183 |
try:
profile = self.get_model().objects.get(user=user, **kwargs)
return profile, False
except self.get_model().DoesNotExist:
profile = self.create_profile(user, save=save, **kwargs)
return profile, True | def get_or_create_profile(self, user, save=False, **kwargs) | Return a profile from DB or if there is none, create a new one.
:param user: A user object
:param save: If set, a new profile will be saved.
:type save: bool | 1.937054 | 2.267212 | 0.854377 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.