text
stringlengths 0
828
|
|---|
for candidate_cls in getmro(cls):
|
if is_backup_class(cls):
|
# Note that we don't keep recursing on base classes
|
added += self.add_class(candidate_cls, include_bases=False)
|
return added"
|
1129,"def log(self, entry, *args):
|
""""""Append the string supplied to the log (a list of strings). If
|
additional arguments are supplied, then first string is assumed to be
|
a format string and the other args are used for string interpolation.
|
For instance `backup.log(""%d + %d == %d"", 1, 1, 2)` would result in the
|
string `'1 + 1 == 2'` being logged""""""
|
if args:
|
entry = entry % args
|
self.backup_log.append(entry)"
|
1130,"def run_backup(self):
|
""""""The actual backup is performed. The data for all added classes is
|
extracted and written to a file per class where each line (terminated
|
by a line feed character) is the JSON representing a single object.
|
Those files are all archived in a single gzip'ed tarball which is
|
stored in the AWS S3 bucket specified when the current instance of
|
Backup was created""""""
|
self.log(""Starting backup at %s"", now_field())
|
self.log(""Backup config object created at %s"", self.timestamp)
|
# Make sure we're good to go
|
for fld in ['aws_access_key', 'aws_secret_key', 'bucketname']:
|
val = getattr(self, fld, None)
|
if not val:
|
self.log(""Backup cannot start: %s is a required field"", fld)
|
raise ValueError(self.backup_log[-1])
|
# Start the compressed tarball our data is stored in
|
backup_file = NamedTemporaryFile(suffix="".tar.gz"")
|
backup_tarfile = tarfile.open(fileobj=backup_file, mode='w:gz')
|
for cls_name, cls in self.classes.items():
|
self.log(""Backing up %s"", cls_name)
|
rec_count = 0
|
with NamedTemporaryFile() as record_file:
|
for rec in cls.find_all():
|
write_line(record_file, rec.to_data())
|
rec_count += 1
|
record_file.flush()
|
backup_tarfile.add(record_file.name, arcname=cls_name+'.json')
|
self.log(""%s => %d records backed up"", cls_name, rec_count)
|
# Finalize archive
|
backup_tarfile.close()
|
backup_file.flush()
|
backup_size = os.stat(backup_file.name)[6]
|
# Figure out key name for archived file
|
key_name = ('Backup_' + now_field() + '.tar.gz').replace(':', '_')
|
# upload archive to s3
|
if os.environ.get('DEBUG', False) or os.environ.get('travis', False):
|
# Local or CI - connect to our mock s3 service
|
conn = S3Connection(
|
'', '',
|
is_secure=False, port=8888, host='localhost',
|
calling_format=OrdinaryCallingFormat()
|
)
|
else:
|
conn = S3Connection(self.aws_access_key, self.aws_secret_key)
|
bucket = conn.get_bucket(self.bucketname)
|
key = Key(bucket)
|
key.key = key_name
|
self.log(
|
""Sending %s [size=%d bytes] with key name %s"",
|
backup_file.name,
|
backup_size,
|
key_name
|
)
|
# TODO: should probably look into a multi-part upload for larger backup
|
key.set_contents_from_filename(backup_file.name)
|
self.log(""Sent %s"", backup_file.name)
|
# All done
|
backup_file.close()
|
self.log(""Backup completed"")
|
# return the bucket name and key name for the completed backup
|
return self.bucketname, key_name"
|
1131,"def list_dir(sourceDir, include_source=None, include_file=True):
|
""""""与 :func:`os.listdir()` 类似,但提供一些筛选功能,且返回生成器对象。
|
:param str sourceDir: 待处理的文件夹。
|
:param bool include_source: 遍历结果中是否包含源文件夹的路径。
|
:param bool include_file: 是否包含文件。True 表示返回的内容中既包含文件,又
|
包含文件夹;Flase 代表仅包含文件夹。
|
:return: 一个生成器对象。
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.