id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
12,300
|
test_utils.py
|
gaubert_gmvault/src/gmv/test_utils.py
|
# -*- coding: utf-8 -*-
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import base64
import os
import datetime
import hashlib
import gmv.gmvault as gmvault
import gmv.imap_utils as imap_utils
import gmv.credential_utils as cred_utils
import gmv.gmvault_db as gmvault_db
import gmv.gmvault_utils as gmvault_utils
def check_remote_mailbox_identical_to_local(the_self, gmvaulter, extra_labels = []): #pylint: disable=C0103,R0912,R0914,R0915
"""
Check that the remote mailbox is identical to the local one attached
to gmvaulter
Need a connected gmvaulter
"""
# get all email data from gmvault-db
pivot_dir = None
gmail_ids = gmvaulter.gstorer.get_all_existing_gmail_ids(pivot_dir)
print("gmail_ids = %s\n" % (gmail_ids))
#need to check that all labels are there for emails in essential
gmvaulter.src.select_folder('ALLMAIL')
# check the number of id on disk
imap_ids = gmvaulter.src.search({ 'type' : 'imap', 'req' : 'ALL'}) #get everything
the_self.assertEquals(len(imap_ids), \
len(gmail_ids), \
"Error. Should have the same number of emails: local nb of emails %d,"\
" remote nb of emails %d" % (len(gmail_ids), len(imap_ids)))
for gm_id in gmail_ids:
print("Fetching id %s with request %s" % (gm_id, imap_utils.GIMAPFetcher.GET_ALL_BUT_DATA))
#get disk_metadata
disk_metadata = gmvaulter.gstorer.unbury_metadata(gm_id)
print("disk metadata %s\n" % (disk_metadata))
#date = disk_metadata['internal_date'].strftime('"%d %b %Y"')
subject = disk_metadata.get('subject', None)
msgid = disk_metadata.get('msg_id', None)
received = disk_metadata.get('x_gmail_received', None)
req = "("
has_something = False
#if date:
# req += 'HEADER DATE {date}'.format(date=date)
# has_something = True
if subject:
#split on ' when contained in subject to keep only the first part
subject = subject.split("'")[0]
subject = subject.split('"')[0]
if has_something: #add extra space if it has a date
req += ' '
req += 'SUBJECT "{subject}"'.format(subject=subject.strip().encode('utf-8'))
has_something = True
if msgid:
if has_something: #add extra space if it has a date
req += ' '
req += 'HEADER MESSAGE-ID {msgid}'.format(msgid=msgid.strip())
has_something = True
if received:
if has_something:
req += ' '
req += 'HEADER X-GMAIL-RECEIVED {received}'.format(received=received.strip())
has_something = True
req += ")"
print("Req = %s\n" % (req))
imap_ids = gmvaulter.src.search({ 'type' : 'imap', 'req': req, 'charset': 'utf-8'})
print("imap_ids = %s\n" % (imap_ids))
if len(imap_ids) != 1:
the_self.fail("more than one imap_id (%s) retrieved for request %s" % (imap_ids, req))
imap_id = imap_ids[0]
# get online_metadata
online_metadata = gmvaulter.src.fetch(imap_id, \
imap_utils.GIMAPFetcher.GET_ALL_BUT_DATA)
print("online_metadata = %s\n" % (online_metadata))
print("disk_metadata = %s\n" % (disk_metadata))
header_fields = online_metadata[imap_id]['BODY[HEADER.FIELDS (MESSAGE-ID SUBJECT X-GMAIL-RECEIVED)]']
subject, msgid, received = gmvault_db.GmailStorer.parse_header_fields(header_fields)
#compare metadata
the_self.assertEquals(subject, disk_metadata.get('subject', None))
the_self.assertEquals(msgid, disk_metadata.get('msg_id', None))
the_self.assertEquals(received, disk_metadata.get('x_gmail_received', None))
# check internal date it is plus or minus 1 hour
online_date = online_metadata[imap_id].get('INTERNALDATE', None)
disk_date = disk_metadata.get('internal_date', None)
if online_date != disk_date:
min_date = disk_date - datetime.timedelta(hours=1)
max_date = disk_date + datetime.timedelta(hours=1)
if min_date <= online_date <= max_date:
print("online_date (%s) and disk_date (%s) differs but "\
"within one hour. This is OK (timezone pb) *****" % (online_date, disk_date))
else:
the_self.fail("online_date (%s) and disk_date (%s) are different" % (online_date, disk_date))
#check labels
disk_labels = disk_metadata.get('labels', None)
#add extra labels
for x_lab in extra_labels:
disk_labels.append(x_lab)
online_labels = imap_utils.decode_labels(online_metadata[imap_id].get('X-GM-LABELS', None))
#clean potential labels with multiple spaces
disk_labels = [ gmvault_utils.remove_consecutive_spaces_and_strip(label) for label in disk_labels ]
online_labels = [ gmvault_utils.remove_consecutive_spaces_and_strip(label) for label in online_labels ]
if not disk_labels: #no disk_labels check that there are no online_labels
the_self.assertTrue(not online_labels)
print("disk_labels = %s\n" % (disk_labels))
print("online_labels = %s\n" % (online_labels))
the_self.assertEquals(len(disk_labels), len(online_labels))
for label in disk_labels:
#change label Migrated (lower and uppercase) to gmv-migrated because reserved by Gmail
if label.lower() == "migrated":
label = "gmv-migrated"
elif label.lower() == r"\muted":
label = "gmv-muted"
if label not in online_labels:
the_self.fail("label %s should be in online_labels %s as"\
" it is in disk_labels %s" % (label, online_labels, disk_labels))
# check flags
disk_flags = disk_metadata.get('flags', None)
online_flags = online_metadata[imap_id].get('FLAGS', None)
if not disk_flags: #no disk flags
the_self.assertTrue(not online_flags)
the_self.assertEquals(len(disk_flags), len(online_flags))
for flag in disk_flags:
if flag not in online_flags:
the_self.fail("flag %s should be in "\
"online_flags %s as it is in disk_flags %s" \
% (flag, online_flags, disk_flags))
def find_identical_emails(gmvaulter_a): #pylint: disable=R0914
"""
Find emails that are identical
"""
# check all ids one by one
gmvaulter_a.src.select_folder('ALLMAIL')
# check the number of id on disk
imap_ids_a = gmvaulter_a.src.search({ 'type' : 'imap', 'req' : 'ALL'})
batch_size = 1000
batch_fetcher_a = gmvault.IMAPBatchFetcher(gmvaulter_a.src, imap_ids_a, \
gmvaulter_a.error_report, imap_utils.GIMAPFetcher.GET_ALL_BUT_DATA, \
default_batch_size = batch_size)
print("Got %d emails in gmvault_a(%s).\n" % (len(imap_ids_a), gmvaulter_a.login))
identicals = {}
in_db = {}
total_processed = 0
imap_ids = gmvaulter_a.src.search({ 'type' : 'imap', \
'req' : '(HEADER MESSAGE-ID 1929235391.1106286872672.JavaMail.wserver@disvds016)'})
print("Len(imap_ids): %d, imap_ids = %s" % (len(imap_ids), imap_ids))
# get all gm_id for fetcher_b
for gm_ids in batch_fetcher_a:
cpt = 0
#print("gm_ids = %s\n" % (gm_ids))
print("Process a new batch (%d). Total processed:%d.\n" % (batch_size, total_processed))
for one_id in gm_ids:
if cpt % 50 == 0:
print("look for %s" % (one_id))
header_fields = gm_ids[one_id]['BODY[HEADER.FIELDS (MESSAGE-ID SUBJECT X-GMAIL-RECEIVED)]']
subject, msgid, received = gmvault_db.GmailStorer.parse_header_fields(header_fields)
labels = gm_ids[one_id]['X-GM-LABELS']
date_internal = gm_ids[one_id]['INTERNALDATE']
if not in_db.get(msgid, None):
in_db[msgid] = [{'subject': subject, 'received': received, \
'gmid': gm_ids[one_id]['X-GM-MSGID'], \
'date': date_internal , 'labels': labels}]
else:
in_db[msgid].append({'subject': subject, 'received': received, \
'gmid': gm_ids[one_id]['X-GM-MSGID'], \
'date': date_internal , 'labels': labels})
print("identical found msgid %s : %s" \
% (msgid, {'subject': subject, \
'received': received, \
'gmid': gm_ids[one_id]['X-GM-MSGID'],\
'date': date_internal , 'labels': labels}))
cpt += 1
total_processed += batch_size
#create list of identicals
for msgid in in_db:
if len(in_db[msgid]) > 1:
identicals[msgid] = in_db[msgid]
#print identicals
print("Found %d identicals" % (len(identicals)))
for msgid in identicals:
print("== MSGID ==: %s" % (msgid))
for vals in identicals[msgid]:
print("===========> gmid: %s ### date: %s ### subject: %s ### "\
"labels: %s ### received: %s" \
% (vals.get('gmid',None), vals.get('date', None),\
vals.get('subject',None), vals.get('labels', None), \
vals.get('received',None)))
#print("vals:%s" % (vals))
print("\n")
#print("Identical emails:\n%s" % (identicals))
def diff_online_mailboxes(gmvaulter_a, gmvaulter_b): #pylint: disable=R0912, R0914
"""
Diff 2 mailboxes
"""
# check all ids one by one
gmvaulter_a.src.select_folder('ALLMAIL')
gmvaulter_b.src.select_folder('ALLMAIL')
# check the number of id on disk
imap_ids_a = gmvaulter_a.src.search({ 'type' : 'imap', 'req' : 'ALL'})
imap_ids_b = gmvaulter_b.src.search({ 'type' : 'imap', 'req' : 'ALL'})
batch_size = 700
batch_fetcher_a = gmvault.IMAPBatchFetcher(gmvaulter_a.src, imap_ids_a, gmvaulter_a.error_report, \
imap_utils.GIMAPFetcher.GET_ALL_BUT_DATA, \
default_batch_size = batch_size)
batch_fetcher_b = gmvault.IMAPBatchFetcher(gmvaulter_b.src, imap_ids_b, gmvaulter_b.error_report, \
imap_utils.GIMAPFetcher.GET_ALL_BUT_DATA, \
default_batch_size = batch_size)
print("Got %d emails in gmvault_a(%s).\n" % (len(imap_ids_a), gmvaulter_a.login))
print("Got %d emails in gmvault_b(%s).\n" % (len(imap_ids_b), gmvaulter_b.login))
if len(imap_ids_a) != len(imap_ids_b):
print("Oh Oh, gmvault_a has %s emails and gmvault_b has %s emails\n" \
% (len(imap_ids_a), len(imap_ids_b)))
else:
print("Both databases has %d emails." % (len(imap_ids_a)))
diff_result = { "in_a" : {},
"in_b" : {},
}
gm_ids_b = {}
total_processed = 0
# get all gm_id for fetcher_b
for gm_ids in batch_fetcher_b:
#print("gm_ids = %s\n" % (gm_ids))
print("Process a new batch (%d). Total processed:%d.\n" % (batch_size, total_processed))
for one_id in gm_ids:
gm_id = gm_ids[one_id]['X-GM-MSGID']
header_fields = gm_ids[one_id]['BODY[HEADER.FIELDS (MESSAGE-ID SUBJECT X-GMAIL-RECEIVED)]']
subject, msgid, received = gmvault_db.GmailStorer.parse_header_fields(header_fields)
the_hash = hashlib.md5()
if received:
the_hash.update(received)
if subject:
the_hash.update(subject)
if msgid:
the_hash.update(msgid)
id = base64.encodestring(the_hash.digest())
gm_ids_b[id] = [gm_id, subject, msgid]
total_processed += batch_size
#dumb search not optimisation
#iterate over imap_ids_a and flag emails only in a but not in b
#remove emails from imap_ids_b everytime they are found
for data_infos in batch_fetcher_a:
for gm_info in data_infos:
gm_id = data_infos[gm_info]['X-GM-MSGID']
header_fields = data_infos[gm_info]['BODY[HEADER.FIELDS (MESSAGE-ID SUBJECT X-GMAIL-RECEIVED)]']
subject, msgid, received = gmvault_db.GmailStorer.parse_header_fields(header_fields)
the_hash = hashlib.md5()
if received:
the_hash.update(received)
if subject:
the_hash.update(subject)
if msgid:
the_hash.update(msgid)
id = base64.encodestring(the_hash.digest())
if id not in gm_ids_b:
diff_result["in_a"][received] = [gm_id, subject, msgid]
else:
del gm_ids_b[id]
for recv_id in gm_ids_b:
diff_result["in_b"][recv_id] = gm_ids_b[recv_id]
# print report
if (len(diff_result["in_a"]) > 0 or len(diff_result["in_b"]) > 0):
print("emails only in gmv_a:\n")
print_diff_result(diff_result["in_a"])
print("\n")
print("emails only in gmv_b:%s\n")
print_diff_result(diff_result["in_b"])
else:
print("Mailbox %s and %s are identical.\n" % (gmvaulter_a.login, gmvaulter_b.login))
def print_diff_result(diff_result):
""" print the diff_result structure
"""
for key in diff_result:
vals = diff_result[key]
print("mailid:%s#####subject:%s#####%s." % (vals[2], vals[1], vals[0]))
def assert_login_is_protected(login):
"""
Insure that the login is not my personnal mailbox
"""
if login != 'gsync.mtester@gmail.com':
raise Exception("Beware login should be gsync.mtester@gmail.com and it is %s" % (login))
def clean_mailbox(login , credential):
"""
Delete all emails, destroy all labels
"""
gimap = imap_utils.GIMAPFetcher('imap.gmail.com', 993, login, credential, readonly_folder = False)
print("login = %s" % (login))
assert_login_is_protected(login)
gimap.connect()
gimap.erase_mailbox()
def obfuscate_string(a_str):
""" use base64 to obfuscate a string """
return base64.b64encode(a_str)
def deobfuscate_string(a_str):
""" deobfuscate a string """
return base64.b64decode(a_str)
def read_password_file(a_path):
"""
Read log:pass from a file in my home
"""
with open(a_path) as f:
line = f.readline()
login, passwd = line.split(":")
return deobfuscate_string(login.strip()), deobfuscate_string(passwd.strip())
def get_oauth_cred(email, cred_path):
"""
Read oauth token secret credential
Look by default to ~/.gmvault
Look for file ~/.gmvault/email.oauth
"""
user_oauth_file_path = cred_path
token = None
secret = None
if os.path.exists(user_oauth_file_path):
print("Get XOAuth credential from %s.\n" % user_oauth_file_path)
try:
with open(user_oauth_file_path) as oauth_file:
oauth_result = oauth_file.read()
if oauth_result:
oauth_result = oauth_result.split('::')
if len(oauth_result) == 2:
token = oauth_result[0]
secret = oauth_result[1]
except Exception, _: #pylint: disable-msg=W0703
print("Cannot read oauth credentials from %s. Force oauth credentials renewal." % user_oauth_file_path)
print("=== Exception traceback ===")
print(gmvault_utils.get_exception_traceback())
print("=== End of Exception traceback ===\n")
if token: token = token.strip() #pylint: disable-msg=C0321
if secret: secret = secret.strip() #pylint: disable-msg=C0321
return {'type': 'xoauth',
'value': cred_utils.generate_xoauth_req(token, secret, email,
'normal'),
'option': None}
def delete_db_dir(a_db_dir):
"""
delete the db directory
"""
gmvault_utils.delete_all_under(a_db_dir, delete_top_dir = True)
| 17,588
|
Python
|
.py
| 348
| 38.885057
| 125
| 0.582957
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,301
|
blowfish.py
|
gaubert_gmvault/src/gmv/blowfish.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# blowfish.py
# Copyright (C) 2002 Michael Gilfix <mgilfix@eecs.tufts.edu>
#
# This module is open source; you can redistribute it and/or
# modify it under the terms of the GPL or Artistic License.
# These licenses are available at http://www.opensource.org
#
# This software must be used and distributed in accordance
# with the law. The author claims no liability for its
# misuse.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# This software was modified by Ivan Voras: CTR cipher mode of
# operation was added, together with testing and example code.
# These changes are (c) 2007./08. Ivan Voras <ivoras@gmail.com>
# These changes can be used, modified ad distributed under the
# GPL or Artistic License, the same as the original module.
# All disclaimers of warranty from the original module also
# apply to these changes.
# Further modifications by Neil Tallim <flan@uguu.ca> to make use
# of more modern Python practises and features, improving
# performance and, in this maintainer's opinion, readability.
#
# New changes implemented (and copyrighted, I suppose),
# June 13, 2010, subject to the terms of the original module.
"""
Blowfish Encryption
This module is a pure python implementation of Bruce Schneier's
encryption scheme 'Blowfish'. Blowish is a 16-round Feistel Network
cipher and offers substantial speed gains over DES.
The key is a string of length anywhere between 64 and 448 bits, or
equivalently 8 and 56 bytes. The encryption and decryption functions operate
on 64-bit blocks, or 8-byte strings.
"""
import array
import struct
class Blowfish:
"""
Implements the encryption and decryption functionality of the Blowfish
cipher, as well as CTR processing for arbitrary-length strings.
"""
# Key restrictions
KEY_MIN_LEN = 8 #64 bits
KEY_MAX_LEN = 56 #448 bits
# Cipher directions
ENCRYPT = 0
DECRYPT = 1
# For _round()
_MODULUS = 2L ** 32
# CTR constants
_BLOCK_SIZE = 8
def __init__(self, key):
"""
Creates an instance of blowfish using 'key' as the encryption key.
Key is a string of bytes, used to seed calculations.
Once the instance of the object is created, the key is no longer necessary.
"""
if not self.KEY_MIN_LEN <= len(key) <= self.KEY_MAX_LEN:
raise ValueError("Attempted to initialize Blowfish cipher with key of invalid length: %(len)i" % {
'len': len(key),
})
self._p_boxes = array.array('I', [
0x243F6A88, 0x85A308D3, 0x13198A2E, 0x03707344,
0xA4093822, 0x299F31D0, 0x082EFA98, 0xEC4E6C89,
0x452821E6, 0x38D01377, 0xBE5466CF, 0x34E90C6C,
0xC0AC29B7, 0xC97C50DD, 0x3F84D5B5, 0xB5470917,
0x9216D5D9, 0x8979FB1B
])
self._s_boxes = (
array.array('I', [
0xD1310BA6, 0x98DFB5AC, 0x2FFD72DB, 0xD01ADFB7,
0xB8E1AFED, 0x6A267E96, 0xBA7C9045, 0xF12C7F99,
0x24A19947, 0xB3916CF7, 0x0801F2E2, 0x858EFC16,
0x636920D8, 0x71574E69, 0xA458FEA3, 0xF4933D7E,
0x0D95748F, 0x728EB658, 0x718BCD58, 0x82154AEE,
0x7B54A41D, 0xC25A59B5, 0x9C30D539, 0x2AF26013,
0xC5D1B023, 0x286085F0, 0xCA417918, 0xB8DB38EF,
0x8E79DCB0, 0x603A180E, 0x6C9E0E8B, 0xB01E8A3E,
0xD71577C1, 0xBD314B27, 0x78AF2FDA, 0x55605C60,
0xE65525F3, 0xAA55AB94, 0x57489862, 0x63E81440,
0x55CA396A, 0x2AAB10B6, 0xB4CC5C34, 0x1141E8CE,
0xA15486AF, 0x7C72E993, 0xB3EE1411, 0x636FBC2A,
0x2BA9C55D, 0x741831F6, 0xCE5C3E16, 0x9B87931E,
0xAFD6BA33, 0x6C24CF5C, 0x7A325381, 0x28958677,
0x3B8F4898, 0x6B4BB9AF, 0xC4BFE81B, 0x66282193,
0x61D809CC, 0xFB21A991, 0x487CAC60, 0x5DEC8032,
0xEF845D5D, 0xE98575B1, 0xDC262302, 0xEB651B88,
0x23893E81, 0xD396ACC5, 0x0F6D6FF3, 0x83F44239,
0x2E0B4482, 0xA4842004, 0x69C8F04A, 0x9E1F9B5E,
0x21C66842, 0xF6E96C9A, 0x670C9C61, 0xABD388F0,
0x6A51A0D2, 0xD8542F68, 0x960FA728, 0xAB5133A3,
0x6EEF0B6C, 0x137A3BE4, 0xBA3BF050, 0x7EFB2A98,
0xA1F1651D, 0x39AF0176, 0x66CA593E, 0x82430E88,
0x8CEE8619, 0x456F9FB4, 0x7D84A5C3, 0x3B8B5EBE,
0xE06F75D8, 0x85C12073, 0x401A449F, 0x56C16AA6,
0x4ED3AA62, 0x363F7706, 0x1BFEDF72, 0x429B023D,
0x37D0D724, 0xD00A1248, 0xDB0FEAD3, 0x49F1C09B,
0x075372C9, 0x80991B7B, 0x25D479D8, 0xF6E8DEF7,
0xE3FE501A, 0xB6794C3B, 0x976CE0BD, 0x04C006BA,
0xC1A94FB6, 0x409F60C4, 0x5E5C9EC2, 0x196A2463,
0x68FB6FAF, 0x3E6C53B5, 0x1339B2EB, 0x3B52EC6F,
0x6DFC511F, 0x9B30952C, 0xCC814544, 0xAF5EBD09,
0xBEE3D004, 0xDE334AFD, 0x660F2807, 0x192E4BB3,
0xC0CBA857, 0x45C8740F, 0xD20B5F39, 0xB9D3FBDB,
0x5579C0BD, 0x1A60320A, 0xD6A100C6, 0x402C7279,
0x679F25FE, 0xFB1FA3CC, 0x8EA5E9F8, 0xDB3222F8,
0x3C7516DF, 0xFD616B15, 0x2F501EC8, 0xAD0552AB,
0x323DB5FA, 0xFD238760, 0x53317B48, 0x3E00DF82,
0x9E5C57BB, 0xCA6F8CA0, 0x1A87562E, 0xDF1769DB,
0xD542A8F6, 0x287EFFC3, 0xAC6732C6, 0x8C4F5573,
0x695B27B0, 0xBBCA58C8, 0xE1FFA35D, 0xB8F011A0,
0x10FA3D98, 0xFD2183B8, 0x4AFCB56C, 0x2DD1D35B,
0x9A53E479, 0xB6F84565, 0xD28E49BC, 0x4BFB9790,
0xE1DDF2DA, 0xA4CB7E33, 0x62FB1341, 0xCEE4C6E8,
0xEF20CADA, 0x36774C01, 0xD07E9EFE, 0x2BF11FB4,
0x95DBDA4D, 0xAE909198, 0xEAAD8E71, 0x6B93D5A0,
0xD08ED1D0, 0xAFC725E0, 0x8E3C5B2F, 0x8E7594B7,
0x8FF6E2FB, 0xF2122B64, 0x8888B812, 0x900DF01C,
0x4FAD5EA0, 0x688FC31C, 0xD1CFF191, 0xB3A8C1AD,
0x2F2F2218, 0xBE0E1777, 0xEA752DFE, 0x8B021FA1,
0xE5A0CC0F, 0xB56F74E8, 0x18ACF3D6, 0xCE89E299,
0xB4A84FE0, 0xFD13E0B7, 0x7CC43B81, 0xD2ADA8D9,
0x165FA266, 0x80957705, 0x93CC7314, 0x211A1477,
0xE6AD2065, 0x77B5FA86, 0xC75442F5, 0xFB9D35CF,
0xEBCDAF0C, 0x7B3E89A0, 0xD6411BD3, 0xAE1E7E49,
0x00250E2D, 0x2071B35E, 0x226800BB, 0x57B8E0AF,
0x2464369B, 0xF009B91E, 0x5563911D, 0x59DFA6AA,
0x78C14389, 0xD95A537F, 0x207D5BA2, 0x02E5B9C5,
0x83260376, 0x6295CFA9, 0x11C81968, 0x4E734A41,
0xB3472DCA, 0x7B14A94A, 0x1B510052, 0x9A532915,
0xD60F573F, 0xBC9BC6E4, 0x2B60A476, 0x81E67400,
0x08BA6FB5, 0x571BE91F, 0xF296EC6B, 0x2A0DD915,
0xB6636521, 0xE7B9F9B6, 0xFF34052E, 0xC5855664,
0x53B02D5D, 0xA99F8FA1, 0x08BA4799, 0x6E85076A
]),
array.array('I', [
0x4B7A70E9, 0xB5B32944, 0xDB75092E, 0xC4192623,
0xAD6EA6B0, 0x49A7DF7D, 0x9CEE60B8, 0x8FEDB266,
0xECAA8C71, 0x699A17FF, 0x5664526C, 0xC2B19EE1,
0x193602A5, 0x75094C29, 0xA0591340, 0xE4183A3E,
0x3F54989A, 0x5B429D65, 0x6B8FE4D6, 0x99F73FD6,
0xA1D29C07, 0xEFE830F5, 0x4D2D38E6, 0xF0255DC1,
0x4CDD2086, 0x8470EB26, 0x6382E9C6, 0x021ECC5E,
0x09686B3F, 0x3EBAEFC9, 0x3C971814, 0x6B6A70A1,
0x687F3584, 0x52A0E286, 0xB79C5305, 0xAA500737,
0x3E07841C, 0x7FDEAE5C, 0x8E7D44EC, 0x5716F2B8,
0xB03ADA37, 0xF0500C0D, 0xF01C1F04, 0x0200B3FF,
0xAE0CF51A, 0x3CB574B2, 0x25837A58, 0xDC0921BD,
0xD19113F9, 0x7CA92FF6, 0x94324773, 0x22F54701,
0x3AE5E581, 0x37C2DADC, 0xC8B57634, 0x9AF3DDA7,
0xA9446146, 0x0FD0030E, 0xECC8C73E, 0xA4751E41,
0xE238CD99, 0x3BEA0E2F, 0x3280BBA1, 0x183EB331,
0x4E548B38, 0x4F6DB908, 0x6F420D03, 0xF60A04BF,
0x2CB81290, 0x24977C79, 0x5679B072, 0xBCAF89AF,
0xDE9A771F, 0xD9930810, 0xB38BAE12, 0xDCCF3F2E,
0x5512721F, 0x2E6B7124, 0x501ADDE6, 0x9F84CD87,
0x7A584718, 0x7408DA17, 0xBC9F9ABC, 0xE94B7D8C,
0xEC7AEC3A, 0xDB851DFA, 0x63094366, 0xC464C3D2,
0xEF1C1847, 0x3215D908, 0xDD433B37, 0x24C2BA16,
0x12A14D43, 0x2A65C451, 0x50940002, 0x133AE4DD,
0x71DFF89E, 0x10314E55, 0x81AC77D6, 0x5F11199B,
0x043556F1, 0xD7A3C76B, 0x3C11183B, 0x5924A509,
0xF28FE6ED, 0x97F1FBFA, 0x9EBABF2C, 0x1E153C6E,
0x86E34570, 0xEAE96FB1, 0x860E5E0A, 0x5A3E2AB3,
0x771FE71C, 0x4E3D06FA, 0x2965DCB9, 0x99E71D0F,
0x803E89D6, 0x5266C825, 0x2E4CC978, 0x9C10B36A,
0xC6150EBA, 0x94E2EA78, 0xA5FC3C53, 0x1E0A2DF4,
0xF2F74EA7, 0x361D2B3D, 0x1939260F, 0x19C27960,
0x5223A708, 0xF71312B6, 0xEBADFE6E, 0xEAC31F66,
0xE3BC4595, 0xA67BC883, 0xB17F37D1, 0x018CFF28,
0xC332DDEF, 0xBE6C5AA5, 0x65582185, 0x68AB9802,
0xEECEA50F, 0xDB2F953B, 0x2AEF7DAD, 0x5B6E2F84,
0x1521B628, 0x29076170, 0xECDD4775, 0x619F1510,
0x13CCA830, 0xEB61BD96, 0x0334FE1E, 0xAA0363CF,
0xB5735C90, 0x4C70A239, 0xD59E9E0B, 0xCBAADE14,
0xEECC86BC, 0x60622CA7, 0x9CAB5CAB, 0xB2F3846E,
0x648B1EAF, 0x19BDF0CA, 0xA02369B9, 0x655ABB50,
0x40685A32, 0x3C2AB4B3, 0x319EE9D5, 0xC021B8F7,
0x9B540B19, 0x875FA099, 0x95F7997E, 0x623D7DA8,
0xF837889A, 0x97E32D77, 0x11ED935F, 0x16681281,
0x0E358829, 0xC7E61FD6, 0x96DEDFA1, 0x7858BA99,
0x57F584A5, 0x1B227263, 0x9B83C3FF, 0x1AC24696,
0xCDB30AEB, 0x532E3054, 0x8FD948E4, 0x6DBC3128,
0x58EBF2EF, 0x34C6FFEA, 0xFE28ED61, 0xEE7C3C73,
0x5D4A14D9, 0xE864B7E3, 0x42105D14, 0x203E13E0,
0x45EEE2B6, 0xA3AAABEA, 0xDB6C4F15, 0xFACB4FD0,
0xC742F442, 0xEF6ABBB5, 0x654F3B1D, 0x41CD2105,
0xD81E799E, 0x86854DC7, 0xE44B476A, 0x3D816250,
0xCF62A1F2, 0x5B8D2646, 0xFC8883A0, 0xC1C7B6A3,
0x7F1524C3, 0x69CB7492, 0x47848A0B, 0x5692B285,
0x095BBF00, 0xAD19489D, 0x1462B174, 0x23820E00,
0x58428D2A, 0x0C55F5EA, 0x1DADF43E, 0x233F7061,
0x3372F092, 0x8D937E41, 0xD65FECF1, 0x6C223BDB,
0x7CDE3759, 0xCBEE7460, 0x4085F2A7, 0xCE77326E,
0xA6078084, 0x19F8509E, 0xE8EFD855, 0x61D99735,
0xA969A7AA, 0xC50C06C2, 0x5A04ABFC, 0x800BCADC,
0x9E447A2E, 0xC3453484, 0xFDD56705, 0x0E1E9EC9,
0xDB73DBD3, 0x105588CD, 0x675FDA79, 0xE3674340,
0xC5C43465, 0x713E38D8, 0x3D28F89E, 0xF16DFF20,
0x153E21E7, 0x8FB03D4A, 0xE6E39F2B, 0xDB83ADF7
]),
array.array('I', [
0xE93D5A68, 0x948140F7, 0xF64C261C, 0x94692934,
0x411520F7, 0x7602D4F7, 0xBCF46B2E, 0xD4A20068,
0xD4082471, 0x3320F46A, 0x43B7D4B7, 0x500061AF,
0x1E39F62E, 0x97244546, 0x14214F74, 0xBF8B8840,
0x4D95FC1D, 0x96B591AF, 0x70F4DDD3, 0x66A02F45,
0xBFBC09EC, 0x03BD9785, 0x7FAC6DD0, 0x31CB8504,
0x96EB27B3, 0x55FD3941, 0xDA2547E6, 0xABCA0A9A,
0x28507825, 0x530429F4, 0x0A2C86DA, 0xE9B66DFB,
0x68DC1462, 0xD7486900, 0x680EC0A4, 0x27A18DEE,
0x4F3FFEA2, 0xE887AD8C, 0xB58CE006, 0x7AF4D6B6,
0xAACE1E7C, 0xD3375FEC, 0xCE78A399, 0x406B2A42,
0x20FE9E35, 0xD9F385B9, 0xEE39D7AB, 0x3B124E8B,
0x1DC9FAF7, 0x4B6D1856, 0x26A36631, 0xEAE397B2,
0x3A6EFA74, 0xDD5B4332, 0x6841E7F7, 0xCA7820FB,
0xFB0AF54E, 0xD8FEB397, 0x454056AC, 0xBA489527,
0x55533A3A, 0x20838D87, 0xFE6BA9B7, 0xD096954B,
0x55A867BC, 0xA1159A58, 0xCCA92963, 0x99E1DB33,
0xA62A4A56, 0x3F3125F9, 0x5EF47E1C, 0x9029317C,
0xFDF8E802, 0x04272F70, 0x80BB155C, 0x05282CE3,
0x95C11548, 0xE4C66D22, 0x48C1133F, 0xC70F86DC,
0x07F9C9EE, 0x41041F0F, 0x404779A4, 0x5D886E17,
0x325F51EB, 0xD59BC0D1, 0xF2BCC18F, 0x41113564,
0x257B7834, 0x602A9C60, 0xDFF8E8A3, 0x1F636C1B,
0x0E12B4C2, 0x02E1329E, 0xAF664FD1, 0xCAD18115,
0x6B2395E0, 0x333E92E1, 0x3B240B62, 0xEEBEB922,
0x85B2A20E, 0xE6BA0D99, 0xDE720C8C, 0x2DA2F728,
0xD0127845, 0x95B794FD, 0x647D0862, 0xE7CCF5F0,
0x5449A36F, 0x877D48FA, 0xC39DFD27, 0xF33E8D1E,
0x0A476341, 0x992EFF74, 0x3A6F6EAB, 0xF4F8FD37,
0xA812DC60, 0xA1EBDDF8, 0x991BE14C, 0xDB6E6B0D,
0xC67B5510, 0x6D672C37, 0x2765D43B, 0xDCD0E804,
0xF1290DC7, 0xCC00FFA3, 0xB5390F92, 0x690FED0B,
0x667B9FFB, 0xCEDB7D9C, 0xA091CF0B, 0xD9155EA3,
0xBB132F88, 0x515BAD24, 0x7B9479BF, 0x763BD6EB,
0x37392EB3, 0xCC115979, 0x8026E297, 0xF42E312D,
0x6842ADA7, 0xC66A2B3B, 0x12754CCC, 0x782EF11C,
0x6A124237, 0xB79251E7, 0x06A1BBE6, 0x4BFB6350,
0x1A6B1018, 0x11CAEDFA, 0x3D25BDD8, 0xE2E1C3C9,
0x44421659, 0x0A121386, 0xD90CEC6E, 0xD5ABEA2A,
0x64AF674E, 0xDA86A85F, 0xBEBFE988, 0x64E4C3FE,
0x9DBC8057, 0xF0F7C086, 0x60787BF8, 0x6003604D,
0xD1FD8346, 0xF6381FB0, 0x7745AE04, 0xD736FCCC,
0x83426B33, 0xF01EAB71, 0xB0804187, 0x3C005E5F,
0x77A057BE, 0xBDE8AE24, 0x55464299, 0xBF582E61,
0x4E58F48F, 0xF2DDFDA2, 0xF474EF38, 0x8789BDC2,
0x5366F9C3, 0xC8B38E74, 0xB475F255, 0x46FCD9B9,
0x7AEB2661, 0x8B1DDF84, 0x846A0E79, 0x915F95E2,
0x466E598E, 0x20B45770, 0x8CD55591, 0xC902DE4C,
0xB90BACE1, 0xBB8205D0, 0x11A86248, 0x7574A99E,
0xB77F19B6, 0xE0A9DC09, 0x662D09A1, 0xC4324633,
0xE85A1F02, 0x09F0BE8C, 0x4A99A025, 0x1D6EFE10,
0x1AB93D1D, 0x0BA5A4DF, 0xA186F20F, 0x2868F169,
0xDCB7DA83, 0x573906FE, 0xA1E2CE9B, 0x4FCD7F52,
0x50115E01, 0xA70683FA, 0xA002B5C4, 0x0DE6D027,
0x9AF88C27, 0x773F8641, 0xC3604C06, 0x61A806B5,
0xF0177A28, 0xC0F586E0, 0x006058AA, 0x30DC7D62,
0x11E69ED7, 0x2338EA63, 0x53C2DD94, 0xC2C21634,
0xBBCBEE56, 0x90BCB6DE, 0xEBFC7DA1, 0xCE591D76,
0x6F05E409, 0x4B7C0188, 0x39720A3D, 0x7C927C24,
0x86E3725F, 0x724D9DB9, 0x1AC15BB4, 0xD39EB8FC,
0xED545578, 0x08FCA5B5, 0xD83D7CD3, 0x4DAD0FC4,
0x1E50EF5E, 0xB161E6F8, 0xA28514D9, 0x6C51133C,
0x6FD5C7E7, 0x56E14EC4, 0x362ABFCE, 0xDDC6C837,
0xD79A3234, 0x92638212, 0x670EFA8E, 0x406000E0
]),
array.array('I', [
0x3A39CE37, 0xD3FAF5CF, 0xABC27737, 0x5AC52D1B,
0x5CB0679E, 0x4FA33742, 0xD3822740, 0x99BC9BBE,
0xD5118E9D, 0xBF0F7315, 0xD62D1C7E, 0xC700C47B,
0xB78C1B6B, 0x21A19045, 0xB26EB1BE, 0x6A366EB4,
0x5748AB2F, 0xBC946E79, 0xC6A376D2, 0x6549C2C8,
0x530FF8EE, 0x468DDE7D, 0xD5730A1D, 0x4CD04DC6,
0x2939BBDB, 0xA9BA4650, 0xAC9526E8, 0xBE5EE304,
0xA1FAD5F0, 0x6A2D519A, 0x63EF8CE2, 0x9A86EE22,
0xC089C2B8, 0x43242EF6, 0xA51E03AA, 0x9CF2D0A4,
0x83C061BA, 0x9BE96A4D, 0x8FE51550, 0xBA645BD6,
0x2826A2F9, 0xA73A3AE1, 0x4BA99586, 0xEF5562E9,
0xC72FEFD3, 0xF752F7DA, 0x3F046F69, 0x77FA0A59,
0x80E4A915, 0x87B08601, 0x9B09E6AD, 0x3B3EE593,
0xE990FD5A, 0x9E34D797, 0x2CF0B7D9, 0x022B8B51,
0x96D5AC3A, 0x017DA67D, 0xD1CF3ED6, 0x7C7D2D28,
0x1F9F25CF, 0xADF2B89B, 0x5AD6B472, 0x5A88F54C,
0xE029AC71, 0xE019A5E6, 0x47B0ACFD, 0xED93FA9B,
0xE8D3C48D, 0x283B57CC, 0xF8D56629, 0x79132E28,
0x785F0191, 0xED756055, 0xF7960E44, 0xE3D35E8C,
0x15056DD4, 0x88F46DBA, 0x03A16125, 0x0564F0BD,
0xC3EB9E15, 0x3C9057A2, 0x97271AEC, 0xA93A072A,
0x1B3F6D9B, 0x1E6321F5, 0xF59C66FB, 0x26DCF319,
0x7533D928, 0xB155FDF5, 0x03563482, 0x8ABA3CBB,
0x28517711, 0xC20AD9F8, 0xABCC5167, 0xCCAD925F,
0x4DE81751, 0x3830DC8E, 0x379D5862, 0x9320F991,
0xEA7A90C2, 0xFB3E7BCE, 0x5121CE64, 0x774FBE32,
0xA8B6E37E, 0xC3293D46, 0x48DE5369, 0x6413E680,
0xA2AE0810, 0xDD6DB224, 0x69852DFD, 0x09072166,
0xB39A460A, 0x6445C0DD, 0x586CDECF, 0x1C20C8AE,
0x5BBEF7DD, 0x1B588D40, 0xCCD2017F, 0x6BB4E3BB,
0xDDA26A7E, 0x3A59FF45, 0x3E350A44, 0xBCB4CDD5,
0x72EACEA8, 0xFA6484BB, 0x8D6612AE, 0xBF3C6F47,
0xD29BE463, 0x542F5D9E, 0xAEC2771B, 0xF64E6370,
0x740E0D8D, 0xE75B1357, 0xF8721671, 0xAF537D5D,
0x4040CB08, 0x4EB4E2CC, 0x34D2466A, 0x0115AF84,
0xE1B00428, 0x95983A1D, 0x06B89FB4, 0xCE6EA048,
0x6F3F3B82, 0x3520AB82, 0x011A1D4B, 0x277227F8,
0x611560B1, 0xE7933FDC, 0xBB3A792B, 0x344525BD,
0xA08839E1, 0x51CE794B, 0x2F32C9B7, 0xA01FBAC9,
0xE01CC87E, 0xBCC7D1F6, 0xCF0111C3, 0xA1E8AAC7,
0x1A908749, 0xD44FBD9A, 0xD0DADECB, 0xD50ADA38,
0x0339C32A, 0xC6913667, 0x8DF9317C, 0xE0B12B4F,
0xF79E59B7, 0x43F5BB3A, 0xF2D519FF, 0x27D9459C,
0xBF97222C, 0x15E6FC2A, 0x0F91FC71, 0x9B941525,
0xFAE59361, 0xCEB69CEB, 0xC2A86459, 0x12BAA8D1,
0xB6C1075E, 0xE3056A0C, 0x10D25065, 0xCB03A442,
0xE0EC6E0E, 0x1698DB3B, 0x4C98A0BE, 0x3278E964,
0x9F1F9532, 0xE0D392DF, 0xD3A0342B, 0x8971F21E,
0x1B0A7441, 0x4BA3348C, 0xC5BE7120, 0xC37632D8,
0xDF359F8D, 0x9B992F2E, 0xE60B6F47, 0x0FE3F11D,
0xE54CDA54, 0x1EDAD891, 0xCE6279CF, 0xCD3E7E6F,
0x1618B166, 0xFD2C1D05, 0x848FD2C5, 0xF6FB2299,
0xF523F357, 0xA6327623, 0x93A83531, 0x56CCCD02,
0xACF08162, 0x5A75EBB5, 0x6E163697, 0x88D273CC,
0xDE966292, 0x81B949D0, 0x4C50901B, 0x71C65614,
0xE6C6C7BD, 0x327A140A, 0x45E1D006, 0xC3F27B9A,
0xC9AA53FD, 0x62A80F00, 0xBB25BFE2, 0x35BDD2F6,
0x71126905, 0xB2040222, 0xB6CBCF7C, 0xCD769C2B,
0x53113EC0, 0x1640E3D3, 0x38ABBD60, 0x2547ADF0,
0xBA38209C, 0xF746CE76, 0x77AFA1C5, 0x20756060,
0x85CBFE4E, 0x8AE88DD8, 0x7AAAF9B0, 0x4CF9AA7E,
0x1948C25C, 0x02FB8A8C, 0x01C36AE4, 0xD6EBE1F9,
0x90D4F869, 0xA65CDEA0, 0x3F09252D, 0xC208E69F,
0xB74E6132, 0xCE77E25B, 0x578FDFE3, 0x3AC372E6
])
)
# Cycle through the p-boxes and round-robin XOR the
# key with the p-boxes
key_len = len(key)
index = 0
for i in xrange(len(self._p_boxes)):
self._p_boxes[i] = self._p_boxes[i] ^ (
(ord(key[index % key_len]) << 24) +
(ord(key[(index + 1) % key_len]) << 16) +
(ord(key[(index + 2) % key_len]) << 8) +
(ord(key[(index + 3) % key_len]))
)
index += 4
# For the chaining process
l = r = 0
# Begin chain replacing the p-boxes
for i in xrange(0, len(self._p_boxes), 2):
(l, r) = self.cipher(l, r, self.ENCRYPT)
self._p_boxes[i] = l
self._p_boxes[i + 1] = r
# Chain replace the s-boxes
for i in xrange(len(self._s_boxes)):
for j in xrange(0, len(self._s_boxes[i]), 2):
(l, r) = self.cipher(l, r, self.ENCRYPT)
self._s_boxes[i][j] = l
self._s_boxes[i][j + 1] = r
def initCTR(self, iv=0):
"""
Initializes CTR engine for encryption or decryption.
"""
if not struct.calcsize("Q") == self._BLOCK_SIZE:
raise ValueError("Struct-type 'Q' must have a length of %(target-len)i bytes, not %(q-len)i bytes; this module cannot be used on your platform" % {
'target-len': self._BLOCK_SIZE,
'q-len': struct.calcsize("Q"),
})
self._ctr_iv = iv
self._calcCTRBuf()
def cipher(self, xl, xr, direction):
"""
Encrypts a 64-bit block of data where xl is the upper 32 bits and xr is
the lower 32-bits.
'direction' is the direction to apply the cipher, either ENCRYPT or
DECRYPT class-constants.
Returns a tuple of either encrypted or decrypted data of the left half
and right half of the 64-bit block.
"""
if direction == self.ENCRYPT:
for i in self._p_boxes[:16]:
xl = xl ^ i
xr = self._round(xl) ^ xr
(xl, xr) = (xr, xl)
(xl, xr) = (xr, xl)
xr = xr ^ self._p_boxes[16]
xl = xl ^ self._p_boxes[17]
else:
for i in reversed(self._p_boxes[2:18]):
xl = xl ^ i
xr = self._round(xl) ^ xr
(xl, xr) = (xr, xl)
(xl, xr) = (xr, xl)
xr = xr ^ self._p_boxes[1]
xl = xl ^ self._p_boxes[0]
return (xl, xr)
def encrypt(self, data):
"""
Encrypt an 8-byte (64-bit) block of text where 'data' is an 8 byte
string.
Returns an 8-byte encrypted string.
"""
if not len(data) == 8:
raise ValueError("Attempted to encrypt data of invalid block length: %(len)i" % {
'len': len(data),
})
# Use big endianess since that's what everyone else uses
xl = (ord(data[3])) | (ord(data[2]) << 8) | (ord(data[1]) << 16) | (ord(data[0]) << 24)
xr = (ord(data[7])) | (ord(data[6]) << 8) | (ord(data[5]) << 16) | (ord(data[4]) << 24)
(cl, cr) = self.cipher(xl, xr, self.ENCRYPT)
chars = ''.join ([
chr((cl >> 24) & 0xFF), chr((cl >> 16) & 0xFF), chr((cl >> 8) & 0xFF), chr(cl & 0xFF),
chr((cr >> 24) & 0xFF), chr((cr >> 16) & 0xFF), chr((cr >> 8) & 0xFF), chr(cr & 0xFF)
])
return chars
def decrypt(self, data):
"""
Decrypt an 8 byte (64-bit) encrypted block of text, where 'data' is the
8-byte encrypted string.
Returns an 8-byte string of plaintext.
"""
if not len(data) == 8:
raise ValueError("Attempted to encrypt data of invalid block length: %(len)i" % {
'len': len(data),
})
# Use big endianess since that's what everyone else uses
cl = (ord(data[3])) | (ord(data[2]) << 8) | (ord(data[1]) << 16) | (ord(data[0]) << 24)
cr = (ord(data[7])) | (ord(data[6]) << 8) | (ord(data[5]) << 16) | (ord(data[4]) << 24)
(xl, xr) = self.cipher (cl, cr, self.DECRYPT)
return ''.join ([
chr((xl >> 24) & 0xFF), chr((xl >> 16) & 0xFF), chr((xl >> 8) & 0xFF), chr(xl & 0xFF),
chr((xr >> 24) & 0xFF), chr((xr >> 16) & 0xFF), chr((xr >> 8) & 0xFF), chr(xr & 0xFF)
])
def encryptCTR(self, data):
"""
Encrypts an arbitrary string and returns the encrypted string.
This method can be called successively for multiple string blocks.
"""
if not type(data) is str:
raise TypeError("Only 8-bit strings are supported")
return ''.join([chr(ord(ch) ^ self._nextCTRByte()) for ch in data])
def decryptCTR(self, data):
"""
Decrypts a string encrypted with encryptCTR() and returns the original
string.
"""
return self.encryptCTR(data)
def _calcCTRBuf(self):
"""
Calculates one block of CTR keystream.
"""
self._ctr_cks = self.encrypt(struct.pack("Q", self._ctr_iv)) # keystream block
self._ctr_iv += 1
self._ctr_pos = 0
def _nextCTRByte(self):
"""
Returns one byte of CTR keystream.
"""
b = ord(self._ctr_cks[self._ctr_pos])
self._ctr_pos += 1
if self._ctr_pos >= len(self._ctr_cks):
self._calcCTRBuf()
return b
def _round(self, xl):
"""
Performs an obscuring function on the 32-bit block of data, 'xl', which
is the left half of the 64-bit block of data.
Returns the 32-bit result as a long integer.
"""
# Perform all ops as longs then and out the last 32-bits to
# obtain the integer
f = long(self._s_boxes[0][(xl & 0xFF000000) >> 24])
f += long(self._s_boxes[1][(xl & 0x00FF0000) >> 16])
f %= self._MODULUS
f ^= long(self._s_boxes[2][(xl & 0x0000FF00) >> 8])
f += long(self._s_boxes[3][(xl & 0x000000FF)])
f %= self._MODULUS
return f & 0xFFFFFFFF
# Sample usage
##############
if __name__ == '__main__':
import time
def _demo(heading, source, encrypted, decrypted):
"""demo method """
print heading
print "\tSource: %(source)s" % {
'source': source,
}
print "\tEncrypted: %(encrypted)s" % {
'encrypted': encrypted,
}
print "\tDecrypted: %(decrypted)s" % {
'decrypted': decrypted,
}
print
key = 'This is a test key'
cipher = Blowfish(key)
# Encryption processing
(xl, xr) = (123456L, 654321L)
(cl, cr) = cipher.cipher(xl, xr, cipher.ENCRYPT)
(dl, dr) = cipher.cipher(cl, cr, cipher.DECRYPT)
_demo("Testing encryption", (xl, xr), (cl, cr), (dl, dr))
# Block processing
text = 'testtest'
crypted = cipher.encrypt(text)
decrypted = cipher.decrypt(crypted)
_demo("Testing block encrypt", text, repr(crypted), decrypted)
# CTR ptocessing
cipher.initCTR()
text = "The quick brown fox jumps over the lazy dog"
crypted = cipher.encryptCTR(text)
cipher.initCTR()
decrypted = cipher.decryptCTR(crypted)
_demo("Testing CTR logic", text, repr(crypted), decrypted)
# Test speed
print "Testing speed"
test_strings = [''.join(("The quick brown fox jumps over the lazy dog", str(i),)) for i in xrange(1000)]
n = 0
t1 = time.time()
while True:
for test_string in test_strings:
cipher.encryptCTR(test_string)
n += 1000
t2 = time.time()
if t2 - t1 >= 5.0:
break
print "%(count)i encryptions in %(time)0.1f seconds: %(throughput)0.1f enc/s" % {
'count': n,
'time': t2 - t1,
'throughput': n / (t2 - t1),
}
| 27,465
|
Python
|
.py
| 536
| 38.802239
| 159
| 0.610649
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,302
|
__init__.py
|
gaubert_gmvault/src/gmv/__init__.py
|
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
| 835
|
Python
|
.py
| 14
| 54.928571
| 89
| 0.75153
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,303
|
collections_utils.py
|
gaubert_gmvault/src/gmv/collections_utils.py
|
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import collections
## {{{ http://code.activestate.com/recipes/576669/ (r18)
class OrderedDict(dict, collections.MutableMapping):
'''OrderedDict Class'''
# Methods with direct access to underlying attributes
def __init__(self, *args, **kwds):
if len(args) > 1:
raise TypeError('expected at 1 argument, got %d', len(args))
if not hasattr(self, '_keys'):
self._keys = []
self.update(*args, **kwds)
def clear(self):
del self._keys[:]
dict.clear(self)
def __setitem__(self, key, value):
if key not in self:
self._keys.append(key)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
self._keys.remove(key)
def __iter__(self):
return iter(self._keys)
def __reversed__(self):
return reversed(self._keys)
def popitem(self):
if not self:
raise KeyError
key = self._keys.pop()
value = dict.pop(self, key)
return key, value
def __reduce__(self):
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
inst_dict.pop('_keys', None)
return (self.__class__, (items,), inst_dict)
# Methods with indirect access via the above methods
setdefault = collections.MutableMapping.setdefault
update = collections.MutableMapping.update
pop = collections.MutableMapping.pop
keys = collections.MutableMapping.keys
values = collections.MutableMapping.values
items = collections.MutableMapping.items
def __repr__(self):
pairs = ', '.join(map('%r: %r'.__mod__, self.items()))
return '%s({%s})' % (self.__class__.__name__, pairs)
def copy(self):
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''fromkeys'''
the_d = cls()
for key in iterable:
the_d[key] = value
return the_d
## end of http://code.activestate.com/recipes/576669/ }}}
class Map(object):
""" Map wraps a dictionary. It is essentially an abstract class from which
specific multimaps are subclassed. """
def __init__(self):
self._dict = {}
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(self._dict))
__str__ = __repr__
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
self._dict[key] = value
def __delitem__(self, key):
del self._dict[key]
def __len__(self):
return len(self._dict)
def remove(self, key, value): #pylint: disable=W0613
'''remove key from Map'''
del self._dict[key]
def keys(self):
'''returns list of keys'''
return self._dict.keys()
def dict(self):
""" Allows access to internal dictionary, if necessary. Caution: multimaps
will break if keys are not associated with proper container."""
return self._dict
class ListMultimap(Map):
""" ListMultimap is based on lists and allows multiple instances of same value. """
def __init__(self):
super(ListMultimap, self).__init__()
self._dict = collections.defaultdict(list)
def __setitem__(self, key, value):
self._dict[key].append(value)
def __len__(self):
return len(self._dict)
def remove(self, key, value):
'''Remove key'''
self._dict[key].remove(value)
class SetMultimap(Map):
""" SetMultimap is based on sets and prevents multiple instances of same value. """
def __init__(self):
super(SetMultimap, self).__init__()
self._dict = collections.defaultdict(set)
def __setitem__(self, key, value):
self._dict[key].add(value)
def __len__(self):
return len(self._dict)
def remove(self, key, value):
'''remove key'''
self._dict[key].remove(value)
class DictMultimap(Map):
""" DictMultimap is based on dicts and allows fast tests for membership. """
def __init__(self):
super(DictMultimap, self).__init__()
self._dict = collections.defaultdict(dict)
def __setitem__(self, key, value):
self._dict[key][value] = True
def __len__(self):
return len(self._dict)
def remove(self, key, value):
""" remove key"""
del self._dict[key][value]
| 5,347
|
Python
|
.py
| 132
| 32.863636
| 89
| 0.62002
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,304
|
gmvault_export.py
|
gaubert_gmvault/src/gmv/gmvault_export.py
|
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <2011-2012> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
'''
Export function of Gmvault created by dave@vasilevsky.ca
'''
import os
import re
import mailbox
import imapclient.imap_utf7 as imap_utf7
import gmv.imap_utils as imap_utils
import gmv.log_utils as log_utils
import gmv.gmvault_utils as gmvault_utils
import gmv.gmvault_db as gmvault_db
LOG = log_utils.LoggerFactory.get_logger('gmvault_export')
class GMVaultExporter(object):
"""
Class hanlding the creation of exports in standard formats
such as maildir, mbox
"""
PROGRESS_INTERVAL = 200
CHATS_FOLDER = 'Chats'
ARCHIVED_FOLDER = 'Archived' # Mails only in 'All Mail'
GM_ALL = re.sub(r'^\\', '', imap_utils.GIMAPFetcher.GENERIC_GMAIL_ALL)
GM_INBOX = 'Inbox'
GM_SEP = '/'
GM_SEEN = '\\Seen'
GM_FLAGGED = '\\Flagged'
def __init__(self, db_dir, a_mailbox, labels = None):
"""
constructor
"""
self.storer = gmvault_db.GmailStorer(db_dir)
self.mailbox = a_mailbox
self.labels = labels
def want_label(self, label):
""" helper indicating is a label is needed"""
if self.labels:
return label in self.labels
return label != self.GM_ALL
def export(self):
"""core method for starting the export """
self.export_ids('emails', self.storer.get_all_existing_gmail_ids(), \
default_folder = self.GM_ALL, use_labels = True)
self.export_ids('chats', self.storer.get_all_chats_gmail_ids(), \
default_folder = self.CHATS_FOLDER, use_labels = False)
def printable_label_list(self, labels):
"""helper to print a list of labels"""
labels = [l.encode('ascii', 'backslashreplace') for l in labels]
return u'; '.join(labels)
def export_ids(self, kind, ids, default_folder, use_labels):
""" export organised by ids """
exported_labels = "default labels"
if self.labels:
exported_labels = "labels " + self.printable_label_list(self.labels)
LOG.critical("Start %s export for %s." % (kind, exported_labels))
timer = gmvault_utils.Timer()
timer.start()
done = 0
for a_id in ids:
meta, msg = self.storer.unbury_email(a_id)
folders = [default_folder]
if use_labels:
add_labels = meta[gmvault_db.GmailStorer.LABELS_K]
if not add_labels:
add_labels = [GMVaultExporter.ARCHIVED_FOLDER]
folders.extend(add_labels)
folders = [re.sub(r'^\\', '', f) for f in folders]
folders = [f for f in folders if self.want_label(f)]
LOG.debug("Processing id %s in labels %s." % \
(a_id, self.printable_label_list(folders)))
for folder in folders:
self.mailbox.add(msg, folder, meta[gmvault_db.GmailStorer.FLAGS_K])
done += 1
left = len(ids) - done
if done % self.PROGRESS_INTERVAL == 0 and left > 0:
elapsed = timer.elapsed()
LOG.critical("== Processed %d %s in %s, %d left (time estimate %s). ==\n" % \
(done, kind, timer.seconds_to_human_time(elapsed), \
left, timer.estimate_time_left(done, elapsed, left)))
LOG.critical("Export completed in %s." % (timer.elapsed_human_time(),))
class Mailbox(object):
""" Mailbox abstract class"""
def add(self, msg, folder, flags):
raise NotImplementedError('implement in subclass')
def close(self):
pass
class Maildir(Mailbox):
""" Class delaing with the Maildir format """
def __init__(self, path, separator = '/'):
self.path = path
self.subdirs = {}
self.separator = separator
if not self.root_is_maildir() and not os.path.exists(self.path):
os.makedirs(self.path)
@staticmethod
def separate(folder, sep):
""" separate method """
return folder.replace(GMVaultExporter.GM_SEP, sep)
def subdir_name(self, folder):
"""get subdir_name """
return self.separate(folder, self.separator)
def root_is_maildir(self):
"""check if root is maildir"""
return False;
def subdir(self, folder):
""" return a subdir """
if folder in self.subdirs:
return self.subdirs[folder]
if folder:
parts = folder.split(GMVaultExporter.GM_SEP)
parent = GMVaultExporter.GM_SEP.join(parts[:-1])
self.subdir(parent)
path = self.subdir_name(folder)
path = imap_utf7.encode(path)
else:
if not self.root_is_maildir():
return
path = ''
abspath = os.path.join(self.path, path)
sub = mailbox.Maildir(abspath, create = True)
self.subdirs[folder] = sub
return sub
def add(self, msg, folder, flags):
""" add message in a given subdir """
mmsg = mailbox.MaildirMessage(msg)
if GMVaultExporter.GM_SEEN in flags:
mmsg.set_subdir('cur')
mmsg.add_flag('S')
if mmsg.get_subdir() == 'cur' and GMVaultExporter.GM_FLAGGED in flags:
mmsg.add_flag('F')
self.subdir(folder).add(mmsg)
class OfflineIMAP(Maildir):
""" Class dealing with offlineIMAP specificities """
DEFAULT_SEPARATOR = '.'
def __init__(self, path, separator = DEFAULT_SEPARATOR):
super(OfflineIMAP, self).__init__(path, separator = separator)
class Dovecot(Maildir):
""" Class dealing with Dovecot specificities """
# See http://wiki2.dovecot.org/Namespaces
class Layout(object):
def join(self, parts):
return self.SEPARATOR.join(parts)
class FSLayout(Layout):
SEPARATOR = '/'
class MaildirPlusPlusLayout(Layout):
SEPARATOR = '.'
def join(self, parts):
return '.' + super(Dovecot.MaildirPlusPlusLayout, self).join(parts)
DEFAULT_NS_SEP = '.'
DEFAULT_LISTESCAPE = '\\'
# The namespace separator cannot be escaped with listescape.
# Replace it with a two-character escape code.
DEFAULT_SEP_ESCAPE = "*'"
def __init__(self, path,
layout = MaildirPlusPlusLayout(),
ns_sep = DEFAULT_NS_SEP,
listescape = DEFAULT_LISTESCAPE,
sep_escape = DEFAULT_SEP_ESCAPE):
super(Dovecot, self).__init__(path, separator = layout.SEPARATOR)
self.layout = layout
self.ns_sep = ns_sep
self.listescape = listescape
self.sep_escape = sep_escape
# Escape one character
def _listescape(self, s, char = None, pattern = None):
pattern = pattern or re.escape(char)
esc = "%s%02x" % (self.listescape, ord(char))
return re.sub(pattern, lambda m: esc, s)
def _munge_name(self, s):
# Escape namespace separator: . => *', * => **
esc = self.sep_escape[0]
s = re.sub(re.escape(esc), esc * 2, s)
s = re.sub(re.escape(self.ns_sep), self.sep_escape, s)
if self.listescape:
# See http://wiki2.dovecot.org/Plugins/Listescape
if self.layout.SEPARATOR == '.':
s = self._listescape(s, '.')
s = self._listescape(s, '/')
s = self._listescape(s, '~', r'^~')
return s
def subdir_name(self, folder):
if folder == GMVaultExporter.GM_INBOX:
return ''
parts = folder.split(GMVaultExporter.GM_SEP)
parts = [self._munge_name(n) for n in parts]
return self.layout.join(parts)
def root_is_maildir(self):
return True
class MBox(Mailbox):
""" Class dealing with MBox specificities """
def __init__(self, folder):
self.folder = folder
self.open = dict()
def close(self):
for _, m in self.open.items():
m.close()
def subdir(self, label):
segments = label.split(GMVaultExporter.GM_SEP)
# Safety first: No unusable directory portions
segments = [s for s in segments if s != '..' and s != '.']
real_label = GMVaultExporter.GM_SEP.join(segments)
if real_label in self.open:
return self.open[real_label]
cur_path = self.folder
label_segments = []
for s in segments:
label_segments.append(s)
cur_label = GMVaultExporter.GM_SEP.join(label_segments)
if cur_label not in self.open:
# Create an mbox for intermediate folders, to satisfy
# Thunderbird import
if not os.path.exists(cur_path):
os.makedirs(cur_path)
mbox_path = os.path.join(cur_path, s)
self.open[cur_label] = mailbox.mbox(mbox_path)
# Use .sbd folders a la Thunderbird, to allow nested folders
cur_path = os.path.join(cur_path, s + '.sbd')
return self.open[real_label]
def add(self, msg, folder, flags):
mmsg = mailbox.mboxMessage(msg)
if GMVaultExporter.GM_SEEN in flags:
mmsg.add_flag('R')
if GMVaultExporter.GM_FLAGGED in flags:
mmsg.add_flag('F')
self.subdir(folder).add(mmsg)
| 10,048
|
Python
|
.py
| 237
| 33.345992
| 93
| 0.606557
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,305
|
progress_test.py
|
gaubert_gmvault/src/gmv/progress_test.py
|
import time
import sys
def progress_2():
"""
"""
percents = 0
to_write = "Progress: [%s ]\r" % (percents)
sys.stdout.write(to_write)
sys.stdout.flush()
steps = 100
for i in xrange(steps):
time.sleep(0.1)
percents += 1
#sys.stdout.write("\b" * (len(to_write)))
to_write = "Progress: [%s percents]\r" % (percents)
sys.stdout.write(to_write)
sys.stdout.flush()
def progress_1():
"""
progress_1
"""
toolbar_width = 100
# setup toolbar
sys.stdout.write("[%s]" % (" " * toolbar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (toolbar_width+1)) # return to start of line, after '['
for i in xrange(toolbar_width):
time.sleep(0.1) # do real work here
# update the bar
sys.stdout.write("-")
sys.stdout.flush()
sys.stdout.write("\n")
if __name__ == '__main__':
progress_2()
| 820
|
Python
|
.py
| 34
| 21.588235
| 80
| 0.646907
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,306
|
mod_imap.py
|
gaubert_gmvault/src/gmv/mod_imap.py
|
# -*- coding: utf-8 -*-
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Contains the class monkey patching IMAPClient and imaplib
'''
import zlib
import datetime
import re
import socket
import ssl
import cStringIO
import os
import imaplib #for the exception
import imapclient
#enable imap debugging if GMV_IMAP_DEBUG is set
if os.getenv("GMV_IMAP_DEBUG"):
imaplib.Debug = 4 #enable debugging
#to enable imap debugging and see all command
#imaplib.Debug = 4 #enable debugging
INTERNALDATE_RE = re.compile(r'.*INTERNALDATE "'
r'(?P<day>[ 0123][0-9])-(?P<mon>[A-Z][a-z][a-z])-(?P<year>[0-9][0-9][0-9][0-9])'
r' (?P<hour>[0-9][0-9]):(?P<min>[0-9][0-9]):(?P<sec>[0-9][0-9])'
r' (?P<zonen>[-+])(?P<zoneh>[0-9][0-9])(?P<zonem>[0-9][0-9])'
r'"')
MON2NUM = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}
#need to monkey patch _convert_INTERNALDATE to work with imaplib2
#modification of IMAPClient
def mod_convert_INTERNALDATE(date_string, normalise_times=True):#pylint: disable=C0103
"""
monkey patched convert_INTERNALDATE
"""
mon = INTERNALDATE_RE.match('INTERNALDATE "%s"' % date_string)
if not mon:
raise ValueError("couldn't parse date %r" % date_string)
zoneh = int(mon.group('zoneh'))
zonem = (zoneh * 60) + int(mon.group('zonem'))
if mon.group('zonen') == '-':
zonem = -zonem
timez = imapclient.fixed_offset.FixedOffset(zonem)
year = int(mon.group('year'))
the_mon = MON2NUM[mon.group('mon')]
day = int(mon.group('day'))
hour = int(mon.group('hour'))
minute = int(mon.group('min'))
sec = int(mon.group('sec'))
the_dt = datetime.datetime(year, the_mon, day, hour, minute, sec, 0, timez)
if normalise_times:
# Normalise to host system's timezone
return the_dt.astimezone(imapclient.fixed_offset.FixedOffset.for_system()).replace(tzinfo=None)
return the_dt
#monkey patching is done here
imapclient.response_parser._convert_INTERNALDATE = mod_convert_INTERNALDATE #pylint: disable=W0212
#monkey patching add compress in COMMANDS of imap
imaplib.Commands['COMPRESS'] = ('AUTH', 'SELECTED')
def datetime_to_imap(dt):
"""Convert a datetime instance to a IMAP datetime string.
If timezone information is missing the current system
timezone is used.
"""
if not dt.tzinfo:
dt = dt.replace(tzinfo=imapclient.fixed_offset.FixedOffset.for_system())
return dt.strftime("%d-%b-%Y %H:%M:%S %z")
def to_unicode(s):
if isinstance(s, imapclient.six.binary_type):
return s.decode('ascii')
return s
def to_bytes(s):
if isinstance(s, imapclient.six.text_type):
return s.encode('ascii')
return s
class IMAP4COMPSSL(imaplib.IMAP4_SSL): #pylint:disable=R0904
"""
Add support for compression inspired by http://www.janeelix.com/piers/python/py2html.cgi/piers/python/imaplib2
"""
SOCK_TIMEOUT = 70 # set a socket timeout of 70 sec to avoid for ever blockage in ssl.read
def __init__(self, host = '', port = imaplib.IMAP4_SSL_PORT, keyfile = None, certfile = None):
"""
constructor
"""
self.compressor = None
self.decompressor = None
imaplib.IMAP4_SSL.__init__(self, host, port, keyfile, certfile)
def activate_compression(self):
"""
activate_compressing()
Enable deflate compression on the socket (RFC 4978).
"""
# rfc 1951 - pure DEFLATE, so use -15 for both windows
self.decompressor = zlib.decompressobj(-15)
self.compressor = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION, zlib.DEFLATED, -15)
def open(self, host = '', port = imaplib.IMAP4_SSL_PORT):
"""Setup connection to remote server on "host:port".
(default: localhost:standard IMAP4 SSL port).
This connection will be used by the routines:
read, readline, send, shutdown.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port), self.SOCK_TIMEOUT) #add so_timeout
#self.sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) #try to set TCP NO DELAY to increase performances
self.sslobj = ssl.wrap_socket(self.sock, self.keyfile, self.certfile)
#self.sslobj = ssl.wrap_socket(self.sock, self.keyfile, self.certfile, suppress_ragged_eofs = False)
# This is the last correction added to avoid memory fragmentation in imaplib
# makefile creates a file object that makes use of cStringIO to avoid mem fragmentation
# it could be used without the compression
# (maybe make 2 set of methods without compression and with compression)
#self.file = self.sslobj.makefile('rb')
def new_read(self, size):
"""
Read 'size' bytes from remote.
Call _intern_read that takes care of the compression
"""
chunks = cStringIO.StringIO() #use cStringIO.cStringIO to avoir too much fragmentation
read = 0
while read < size:
try:
data = self._intern_read(min(size-read, 16384)) #never ask more than 16384 because imaplib can do it
except ssl.SSLError, err:
print("************* SSLError received %s" % (err))
raise self.abort('Gmvault ssl socket error: EOF. Connection lost, reconnect.')
read += len(data)
chunks.write(data)
return chunks.getvalue() #return the cStringIO content
def read(self, size):
"""
Read 'size' bytes from remote.
Call _intern_read that takes care of the compression
"""
chunks = cStringIO.StringIO() #use cStringIO.cStringIO to avoir too much fragmentation
read = 0
while read < size:
data = self._intern_read(min(size-read, 16384)) #never ask more than 16384 because imaplib can do it
if not data:
#to avoid infinite looping due to empty string returned
raise self.abort('Gmvault ssl socket error: EOF. Connection lost, reconnect.')
read += len(data)
chunks.write(data)
return chunks.getvalue() #return the cStringIO content
def _intern_read(self, size):
"""
Read at most 'size' bytes from remote.
Takes care of the compression
"""
if self.decompressor is None:
return self.sslobj.read(size)
if self.decompressor.unconsumed_tail:
data = self.decompressor.unconsumed_tail
else:
data = self.sslobj.read(8192) #Fixed buffer size. maybe change to 16384
return self.decompressor.decompress(data, size)
def readline(self):
"""Read line from remote."""
line = cStringIO.StringIO() #use cStringIO to avoid memory fragmentation
while 1:
#make use of read that takes care of the compression
#it could be simplified without compression
char = self.read(1)
line.write(char)
if char in ("\n", ""):
return line.getvalue()
def shutdown(self):
"""Close I/O established in "open"."""
#self.file.close() #if file created
self.sock.close()
def send(self, data):
"""send(data)
Send 'data' to remote."""
if self.compressor is not None:
data = self.compressor.compress(data)
data += self.compressor.flush(zlib.Z_SYNC_FLUSH)
self.sslobj.sendall(data)
def seq_to_parenlist(flags):
"""Convert a sequence of strings into parenthised list string for
use with IMAP commands.
"""
if isinstance(flags, str):
flags = (flags,)
elif not isinstance(flags, (tuple, list)):
raise ValueError('invalid flags list: %r' % flags)
return '(%s)' % ' '.join(flags)
class MonkeyIMAPClient(imapclient.IMAPClient): #pylint:disable=R0903,R0904
"""
Need to extend the IMAPClient to do more things such as compression
Compression inspired by http://www.janeelix.com/piers/python/py2html.cgi/piers/python/imaplib2
"""
def __init__(self, host, port=None, use_uid=True, need_ssl=False):
"""
constructor
"""
super(MonkeyIMAPClient, self).__init__(host, port, use_uid, need_ssl)
def oauth2_login(self, oauth2_cred):
"""
Connect using oauth2
:param oauth2_cred:
:return:
"""
typ, data = self._imap.authenticate('XOAUTH2', lambda x: oauth2_cred)
self._checkok('authenticate', typ, data)
return data[0]
def search(self, criteria): #pylint: disable=W0221
"""
Perform a imap search or gmail search
"""
if criteria.get('type','') == 'imap':
#encoding criteria in utf-8
req = criteria['req'].encode('utf-8')
charset = 'utf-8'
return super(MonkeyIMAPClient, self).search(req, charset)
elif criteria.get('type','') == 'gmail':
return self.gmail_search(criteria.get('req',''))
else:
raise Exception("Unknown search type %s" % (criteria.get('type','no request type passed')))
def gmail_search(self, criteria):
"""
perform a search with gmailsearch criteria.
eg, subject:Hello World
"""
criteria = criteria.replace('\\', '\\\\')
criteria = criteria.replace('"', '\\"')
#working but cannot send that understand when non ascii chars are used
#args = ['CHARSET', 'utf-8', 'X-GM-RAW', '"%s"' % (criteria)]
#typ, data = self._imap.uid('SEARCH', *args)
#working Literal search
self._imap.literal = '"%s"' % (criteria)
self._imap.literal = imaplib.MapCRLF.sub(imaplib.CRLF, self._imap.literal)
self._imap.literal = self._imap.literal.encode("utf-8")
#use uid to keep the imap ids consistent
args = ['CHARSET', 'utf-8', 'X-GM-RAW']
typ, data = self._imap.uid('SEARCH', *args) #pylint: disable=W0142
self._checkok('search', typ, data)
if data == [None]: # no untagged responses...
return [ ]
return [ long(i) for i in data[0].split() ]
def append(self, folder, msg, flags=(), msg_time=None):
"""Append a message to *folder*.
*msg* should be a string contains the full message including
headers.
*flags* should be a sequence of message flags to set. If not
specified no flags will be set.
*msg_time* is an optional datetime instance specifying the
date and time to set on the message. The server will set a
time if it isn't specified. If *msg_time* contains timezone
information (tzinfo), this will be honoured. Otherwise the
local machine's time zone sent to the server.
Returns the APPEND response as returned by the server.
"""
if msg_time:
time_val = '"%s"' % datetime_to_imap(msg_time)
time_val = to_bytes(time_val)
else:
time_val = None
return self._command_and_check('append',
self._normalise_folder(folder),
imapclient.imapclient.seq_to_parenstr(flags),
time_val,
to_bytes(msg),
unpack=True)
def enable_compression(self):
"""
enable_compression()
Ask the server to start compressing the connection.
Should be called from user of this class after instantiation, as in:
if 'COMPRESS=DEFLATE' in imapobj.capabilities:
imapobj.enable_compression()
"""
ret_code, _ = self._imap._simple_command('COMPRESS', 'DEFLATE') #pylint: disable=W0212
if ret_code == 'OK':
self._imap.activate_compression()
else:
#no errors for the moment
pass
| 13,095
|
Python
|
.py
| 283
| 37.003534
| 118
| 0.623828
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,307
|
imap_utils.py
|
gaubert_gmvault/src/gmv/imap_utils.py
|
# -*- coding: utf-8 -*-
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Module containing the IMAPFetcher object which is the Wrapper around the modified IMAPClient object
'''
import math
import time
import socket
import re
import functools
import ssl
import imaplib
import gmv.gmvault_const as gmvault_const
import gmv.log_utils as log_utils
import gmv.credential_utils as credential_utils
import gmv.gmvault_utils as gmvault_utils
import gmv.mod_imap as mimap
LOG = log_utils.LoggerFactory.get_logger('imap_utils')
class PushEmailError(Exception):
"""
PushEmail Error
"""
def __init__(self, a_msg, quarantined = False):
"""
Constructor
"""
super(PushEmailError, self).__init__(a_msg)
self._in_quarantine = quarantined
def quarantined(self):
""" Get email to quarantine """
return self._in_quarantine
class LabelError(Exception):
"""
LabelError. Exception send when there is an error when adding labels to message
"""
def __init__(self, a_msg, ignore = False):
"""
Constructor
"""
super(LabelError, self).__init__(a_msg)
self._ignore = ignore
def ignore(self):
""" ignore """
return self._ignore
#retry decorator with nb of tries and sleep_time and backoff
def retry(a_nb_tries=3, a_sleep_time=1, a_backoff=1): #pylint:disable=R0912
"""
Decorator for retrying command when it failed with a imap or socket error.
Should be used exclusively on imap exchanges.
Strategy, always retry on any imaplib or socket error. Wait few seconds before to retry
backoff sets the factor by which the a_sleep_time should lengthen after each failure. backoff must be greater than 1,
or else it isn't really a backoff
"""
if a_backoff < 1:
raise ValueError("a_backoff must be greater or equal to 1")
a_nb_tries = math.floor(a_nb_tries)
if a_nb_tries < 0:
raise ValueError("a_nb_tries must be 0 or greater")
if a_sleep_time <= 0:
raise ValueError("a_sleep_time must be greater than 0")
def reconnect(the_self, rec_nb_tries, total_nb_tries, rec_error, rec_sleep_time = [1]): #pylint: disable=W0102
"""
Reconnect procedure. Sleep and try to reconnect
"""
# go in retry mode if less than a_nb_tries
while rec_nb_tries[0] < total_nb_tries:
LOG.critical("Disconnecting from Gmail Server and sleeping ...")
the_self.disconnect()
# add X sec of wait
time.sleep(rec_sleep_time[0])
rec_sleep_time[0] *= a_backoff #increase sleep time for next time
rec_nb_tries[0] += 1
#increase total nb of reconns
the_self.total_nb_reconns += 1
# go in retry mode: reconnect.
# retry reconnect as long as we have tries left
try:
LOG.critical("Reconnecting to the from Gmail Server.")
#reconnect to the current folder
the_self.connect(go_to_current_folder = True )
return
except Exception, ignored:
# catch all errors and try as long as we have tries left
LOG.exception(ignored)
else:
#cascade error
raise rec_error
def inner_retry(the_func): #pylint:disable=C0111,R0912
def wrapper(*args, **kwargs): #pylint:disable=C0111,R0912
nb_tries = [0] # make it mutable in reconnect
m_sleep_time = [a_sleep_time] #make it mutable in reconnect
while True:
try:
return the_func(*args, **kwargs)
except PushEmailError, p_err:
LOG.debug("error message = %s. traceback:%s" % (p_err, gmvault_utils.get_exception_traceback()))
if nb_tries[0] < a_nb_tries:
LOG.critical("Cannot reach the Gmail server. Wait %s second(s) and retrying." % (m_sleep_time[0]))
else:
LOG.critical("Stop retrying, tried too many times ...")
reconnect(args[0], nb_tries, a_nb_tries, p_err, m_sleep_time)
except imaplib.IMAP4.abort, err: #abort is recoverable and error is not
LOG.debug("IMAP (abort) error message = %s. traceback:%s" % (err, gmvault_utils.get_exception_traceback()))
if nb_tries[0] < a_nb_tries:
LOG.critical("Received an IMAP abort error. Wait %s second(s) and retrying." % (m_sleep_time[0]))
else:
LOG.critical("Stop retrying, tried too many times ...")
# problem with this email, put it in quarantine
reconnect(args[0], nb_tries, a_nb_tries, err, m_sleep_time)
except socket.error, sock_err:
LOG.debug("error message = %s. traceback:%s" % (sock_err, gmvault_utils.get_exception_traceback()))
if nb_tries[0] < a_nb_tries:
LOG.critical("Cannot reach the Gmail server. Wait %s second(s) and retrying." % (m_sleep_time[0]))
else:
LOG.critical("Stop retrying, tried too many times ...")
reconnect(args[0], nb_tries, a_nb_tries, sock_err, m_sleep_time)
except ssl.SSLError, ssl_err:
LOG.debug("error message = %s. traceback:%s" % (ssl_err, gmvault_utils.get_exception_traceback()))
if nb_tries[0] < a_nb_tries:
LOG.critical("Cannot reach the Gmail server. Wait %s second(s) and retrying." % (m_sleep_time[0]))
else:
LOG.critical("Stop retrying, tried too many times ...")
reconnect(args[0], nb_tries, a_nb_tries, sock_err, m_sleep_time)
except imaplib.IMAP4.error, err:
#just trace it back for the moment
LOG.debug("IMAP (normal) error message = %s. traceback:%s" % (err, gmvault_utils.get_exception_traceback()))
if nb_tries[0] < a_nb_tries:
LOG.critical("Error when reaching Gmail server. Wait %s second(s) and retry up to 2 times." \
% (m_sleep_time[0]))
else:
LOG.critical("Stop retrying, tried too many times ...")
#raise err
# retry 2 times before to quit
reconnect(args[0], nb_tries, 2, err, m_sleep_time)
return functools.wraps(the_func)(wrapper)
#return wrapper
return inner_retry
class GIMAPFetcher(object): #pylint:disable=R0902,R0904
'''
IMAP Class reading the information
'''
GMAIL_EXTENSION = 'X-GM-EXT-1' # GMAIL capability
GMAIL_ALL = u'[Gmail]/All Mail' #GMAIL All Mail mailbox
GENERIC_GMAIL_ALL = u'\\AllMail' # unlocalised GMAIL ALL
GENERIC_DRAFTS = u'\\Drafts' # unlocalised DRAFTS
GENERIC_GMAIL_CHATS = gmvault_const.GMAIL_UNLOCAL_CHATS # unlocalised Chats names
FOLDER_NAMES = ['ALLMAIL', 'CHATS', 'DRAFTS']
GMAIL_ID = 'X-GM-MSGID' #GMAIL ID attribute
GMAIL_THREAD_ID = 'X-GM-THRID'
GMAIL_LABELS = 'X-GM-LABELS'
IMAP_INTERNALDATE = 'INTERNALDATE'
IMAP_FLAGS = 'FLAGS'
IMAP_ALL = {'type':'imap', 'req':'ALL'}
EMAIL_BODY = 'BODY[]'
GMAIL_SPECIAL_DIRS = ['\\Inbox', '\\Starred', '\\Sent', '\\Draft', '\\Important']
#GMAIL_SPECIAL_DIRS_LOWER = ['\\inbox', '\\starred', '\\sent', '\\draft', '\\important']
GMAIL_SPECIAL_DIRS_LOWER = ['\\inbox', '\\starred', '\\sent', '\\draft', '\\important', '\\trash']
IMAP_BODY_PEEK = 'BODY.PEEK[]' #get body without setting msg as seen
#get the body info without setting msg as seen
IMAP_HEADER_PEEK_FIELDS = 'BODY.PEEK[HEADER.FIELDS (MESSAGE-ID SUBJECT X-GMAIL-RECEIVED)]'
#key used to find these fields in the IMAP Response
IMAP_HEADER_FIELDS_KEY = 'BODY[HEADER.FIELDS (MESSAGE-ID SUBJECT X-GMAIL-RECEIVED)]'
#GET_IM_UID_RE
APPENDUID = r'^[APPENDUID [0-9]* ([0-9]*)] \(Success\)$'
APPENDUID_RE = re.compile(APPENDUID)
GET_ALL_INFO = [ GMAIL_ID, GMAIL_THREAD_ID, GMAIL_LABELS, IMAP_INTERNALDATE, \
IMAP_BODY_PEEK, IMAP_FLAGS, IMAP_HEADER_PEEK_FIELDS]
GET_ALL_BUT_DATA = [ GMAIL_ID, GMAIL_THREAD_ID, GMAIL_LABELS, IMAP_INTERNALDATE, \
IMAP_FLAGS, IMAP_HEADER_PEEK_FIELDS]
GET_DATA_ONLY = [ GMAIL_ID, IMAP_BODY_PEEK]
GET_GMAIL_ID = [ GMAIL_ID ]
GET_GMAIL_ID_DATE = [ GMAIL_ID, IMAP_INTERNALDATE]
def __init__(self, host, port, login, credential, readonly_folder = True): #pylint:disable=R0913
'''
Constructor
'''
self.host = host
self.port = port
self.login = login
self.once_connected = False
self.credential = credential
self.ssl = True
self.use_uid = True
self.readonly_folder = readonly_folder
self.localized_folders = { 'ALLMAIL': { 'loc_dir' : None, 'friendly_name' : 'allmail'},
'CHATS' : { 'loc_dir' : None, 'friendly_name' : 'chats'},
'DRAFTS' : { 'loc_dir' : None, 'friendly_name' : 'drafts'} }
# memoize the current folder (All Mail or Chats) for reconnection management
self.current_folder = None
self.server = None
self.go_to_all_folder = True
self.total_nb_reconns = 0
# True when CHATS or other folder error msg has been already printed
self.printed_folder_error_msg = { 'ALLMAIL' : False, 'CHATS': False , 'DRAFTS':False }
#update GENERIC_GMAIL_CHATS. Should be done at the class level
self.GENERIC_GMAIL_CHATS.extend(gmvault_utils.get_conf_defaults().get_list('Localisation', 'chat_folder', []))
def spawn_connection(self):
"""
spawn a connection with the same parameters
"""
conn = GIMAPFetcher(self.host, self.port, self.login, self.credential, self.readonly_folder)
conn.connect()
return conn
def connect(self, go_to_current_folder = False):
"""
connect to the IMAP server
"""
# create imap object
self.server = mimap.MonkeyIMAPClient(self.host, port = self.port, use_uid= self.use_uid, need_ssl= self.ssl)
# connect with password or xoauth
if self.credential['type'] == 'passwd':
self.server.login(self.login, self.credential['value'])
elif self.credential['type'] == 'oauth2':
#connect with oauth2
if self.once_connected:
self.credential = credential_utils.CredentialHelper.get_oauth2_credential(self.login, renew_cred = False)
LOG.debug("credential['value'] = %s" % (self.credential['value']))
#try to login
self.server.oauth2_login(self.credential['value'])
else:
raise Exception("Unknown authentication method %s. Please use xoauth or passwd authentication " \
% (self.credential['type']))
#set connected to True to handle reconnection in case of failure
self.once_connected = True
# check gmailness
self.check_gmailness()
# find allmail chats and drafts folders
self.find_folder_names()
if go_to_current_folder and self.current_folder:
self.server.select_folder(self.current_folder, readonly = self.readonly_folder)
#enable compression
if gmvault_utils.get_conf_defaults().get_boolean('General', 'enable_imap_compression', True):
self.enable_compression()
LOG.debug("After Enabling compression.")
else:
LOG.debug("Do not enable imap compression.")
def disconnect(self):
"""
disconnect to avoid too many simultaneous connection problem
"""
if self.server:
try:
self.server.logout()
except Exception, ignored: #ignored exception but still log it in log file if activated
LOG.exception(ignored)
self.server = None
def reconnect(self):
"""
disconnect and connect again
"""
self.disconnect()
self.connect()
def enable_compression(self):
"""
Try to enable the compression
"""
#self.server.enable_compression()
pass
@retry(3,1,2) # try 3 times to reconnect with a sleep time of 1 sec and a backoff of 2. The fourth time will wait 4 sec
def find_folder_names(self):
"""
depending on your account the all mail folder can be named
[GMAIL]/ALL Mail or [GoogleMail]/All Mail.
Find and set the right one
"""
#use xlist because of localized dir names
folders = self.server.xlist_folders()
the_dir = None
for (flags, _, the_dir) in folders:
#non localised GMAIL_ALL
if GIMAPFetcher.GENERIC_GMAIL_ALL in flags:
#it could be a localized Dir name
self.localized_folders['ALLMAIL']['loc_dir'] = the_dir
elif the_dir in GIMAPFetcher.GENERIC_GMAIL_CHATS :
#it could be a localized Dir name
self.localized_folders['CHATS']['loc_dir'] = the_dir
elif GIMAPFetcher.GENERIC_DRAFTS in flags:
self.localized_folders['DRAFTS']['loc_dir'] = the_dir
if not self.localized_folders['ALLMAIL']['loc_dir']: # all mail error
raise Exception("Cannot find global 'All Mail' folder (maybe localized and translated into your language) ! "\
"Check whether 'Show in IMAP for 'All Mail' is enabled in Gmail (Go to Settings->Labels->All Mail)")
elif not self.localized_folders['CHATS']['loc_dir'] and \
gmvault_utils.get_conf_defaults().getboolean("General","errors_if_chat_not_visible", False):
raise Exception("Cannot find global 'Chats' folder ! Check whether 'Show in IMAP for 'Chats' "\
"is enabled in Gmail (Go to Settings->Labels->All Mail)")
elif not self.localized_folders['DRAFTS']['loc_dir']:
raise Exception("Cannot find global 'Drafts' folder.")
@retry(3,1,2) # try 3 times to reconnect with a sleep time of 1 sec and a backoff of 2. The fourth time will wait 4 sec
def find_all_mail_folder(self):
"""
depending on your account the all mail folder can be named
[GMAIL]/ALL Mail or [GoogleMail]/All Mail.
Find and set the right one
"""
#use xlist because of localized dir names
folders = self.server.xlist_folders()
the_dir = None
for (flags, _, the_dir) in folders:
#non localised GMAIL_ALL
if GIMAPFetcher.GENERIC_GMAIL_ALL in flags:
#it could be a localized Dir name
self.localized_folders['ALLMAIL']['loc_dir'] = the_dir
return the_dir
if not self.localized_folders['ALLMAIL']['loc_dir']:
#Error
raise Exception("Cannot find global 'All Mail' folder (maybe localized and translated into your language) !"\
" Check whether 'Show in IMAP for 'All Mail' is enabled in Gmail (Go to Settings->Labels->All Mail)")
@retry(3,1,2) # try 3 times to reconnect with a sleep time of 1 sec and a backoff of 2. The fourth time will wait 4 sec
def find_chats_folder(self):
"""
depending on your account the chats folder can be named
[GMAIL]/Chats or [GoogleMail]/Chats, [GMAIL]/tous les chats ...
Find and set the right one
Npte: Cannot use the flags as Chats is not a system label. Thanks Google
"""
#use xlist because of localized dir names
folders = self.server.xlist_folders()
LOG.debug("Folders = %s\n" % (folders))
the_dir = None
for (_, _, the_dir) in folders:
#look for GMAIL Chats
if the_dir in GIMAPFetcher.GENERIC_GMAIL_CHATS :
#it could be a localized Dir name
self.localized_folders['CHATS']['loc_dir'] = the_dir
return the_dir
#Error did not find Chats dir
if gmvault_utils.get_conf_defaults().getboolean("General", "errors_if_chat_not_visible", False):
raise Exception("Cannot find global 'Chats' folder ! Check whether 'Show in IMAP for 'Chats' "\
"is enabled in Gmail (Go to Settings->Labels->All Mail)")
return None
def is_visible(self, a_folder_name):
"""
check if a folder is visible otherwise
"""
dummy = self.localized_folders.get(a_folder_name)
if dummy and (dummy.get('loc_dir', None) is not None):
return True
if not self.printed_folder_error_msg.get(a_folder_name, None):
LOG.critical("Cannot find 'Chats' folder on Gmail Server. If you wish to backup your chats,"\
" look at the documentation to see how to configure your Gmail account.\n")
self.printed_folder_error_msg[a_folder_name] = True
return False
def get_folder_name(self, a_folder_name):
"""return real folder name from generic ones"""
if a_folder_name not in self.FOLDER_NAMES:
raise Exception("%s is not a predefined folder names. Please use one" % (a_folder_name) )
folder = self.localized_folders.get(a_folder_name, {'loc_dir' : 'GMVNONAME'})['loc_dir']
return folder
@retry(3,1,2) # try 3 times to reconnect with a sleep time of 1 sec and a backoff of 2. The fourth time will wait 4 sec
def select_folder(self, a_folder_name, use_predef_names = True):
"""
Select one of the existing folder
"""
if use_predef_names:
if a_folder_name not in self.FOLDER_NAMES:
raise Exception("%s is not a predefined folder names. Please use one" % (a_folder_name) )
folder = self.localized_folders.get(a_folder_name, {'loc_dir' : 'GMVNONAME'})['loc_dir']
if self.current_folder != folder:
self.server.select_folder(folder, readonly = self.readonly_folder)
self.current_folder = folder
elif self.current_folder != a_folder_name:
self.server.select_folder(a_folder_name, readonly = self.readonly_folder)
self.current_folder = a_folder_name
return self.current_folder
@retry(3,1,2) # try 3 times to reconnect with a sleep time of 1 sec and a backoff of 2. The fourth time will wait 4 sec
def list_all_folders(self):
"""
Return all folders mainly for debuging purposes
"""
return self.server.xlist_folders()
@retry(3,1,2) # try 3 times to reconnect with a sleep time of 1 sec and a backoff of 2. The fourth time will wait 4 sec
def get_capabilities(self):
"""
return the server capabilities
"""
if not self.server:
raise Exception("GIMAPFetcher not connect to the GMAIL server")
return self.server.capabilities()
@retry(3,1,2) # try 3 times to reconnect with a sleep time of 1 sec and a backoff of 2. The fourth time will wait 4 sec
def check_gmailness(self):
"""
Check that the server is a gmail server
"""
if not GIMAPFetcher.GMAIL_EXTENSION in self.get_capabilities():
raise Exception("GIMAPFetcher is not connected to a IMAP GMAIL server. Please check host (%s) and port (%s)" \
% (self.host, self.port))
return True
@retry(3,1,2) # try 3 times to reconnect with a sleep time of 1 sec and a backoff of 2. The fourth time will wait 4 sec
def search(self, a_criteria):
"""
Return all found ids corresponding to the search
"""
return self.server.search(a_criteria)
@retry(3,1,2) # try 4 times to reconnect with a sleep time of 1 sec and a backoff of 2. The fourth time will wait 8 sec
def fetch(self, a_ids, a_attributes):
"""
Return all attributes associated to each message
"""
return self.server.fetch(a_ids, a_attributes)
@classmethod
def _build_labels_str(cls, a_labels):
"""
Create IMAP label string from list of given labels.
Convert the labels to utf7
a_labels: List of labels
"""
# add GMAIL LABELS
labels_str = None
if a_labels and len(a_labels) > 0:
labels_str = '('
for label in a_labels:
label = gmvault_utils.remove_consecutive_spaces_and_strip(label)
#add not in self.GMAIL_SPECIAL_DIRS_LOWER
if label.lower() in cls.GMAIL_SPECIAL_DIRS_LOWER:
labels_str += '%s ' % (label)
else:
label = label.replace('"', '\\"') #replace quote with escaped quotes
labels_str += '\"%s\" ' % (label)
labels_str = '%s%s' % (labels_str[:-1],')')
return labels_str
@classmethod
def _get_dir_from_labels(cls, label):
"""
Get the dirs to create from the labels
label: label name with / in it
"""
dirs = []
i = 0
for lab in label.split('/'):
lab = gmvault_utils.remove_consecutive_spaces_and_strip(lab)
if i == 0:
dirs.append(lab)
else:
dirs.append('%s/%s' % (dirs[i-1], lab))
i += 1
return dirs
def create_gmail_labels(self, labels, existing_folders):
"""
Create folders and subfolders on Gmail in order
to recreate the label hierarchy before to upload emails
Note that adding labels with +X-GM-LABELS create only nested labels
but not nested ones. This is why this trick must be used to
recreate the label hierarchy
labels: list of labels to create
"""
#1.5-beta moved that out of the loop to minimize the number of calls
#to that method. (Could go further and memoize it)
#get existing directories (or label parts)
# get in lower case because Gmail labels are case insensitive
listed_folders = set([ directory.lower() for (_, _, directory) in self.list_all_folders() ])
existing_folders = listed_folders.union(existing_folders)
reserved_labels_map = gmvault_utils.get_conf_defaults().get_dict("Restore", "reserved_labels_map", \
{ u'migrated' : u'gmv-migrated', u'\muted' : u'gmv-muted' })
LOG.debug("Labels to create: [%s]" % (labels))
for lab in labels:
#LOG.info("Reserved labels = %s\n" % (reserved_labels))
#LOG.info("lab.lower = %s\n" % (lab.lower()))
if lab.lower() in reserved_labels_map.keys(): #exclude creation of migrated label
n_lab = reserved_labels_map.get(lab.lower(), "gmv-default-label")
LOG.info("Warning ! label '%s' (lower or uppercase) is reserved by Gmail and cannot be used."\
"Use %s instead" % (lab, n_lab))
lab = n_lab
LOG.info("translated lab = %s\n" % (lab))
#split all labels
labs = self._get_dir_from_labels(lab)
for directory in labs:
low_directory = directory.lower() #get lower case directory but store original label
if (low_directory not in existing_folders) and (low_directory not in self.GMAIL_SPECIAL_DIRS_LOWER):
try:
if self.server.create_folder(directory) != 'Success':
raise Exception("Cannot create label %s: the directory %s cannot be created." % (lab, directory))
else:
LOG.debug("============== ####### Created Labels (%s)." % (directory))
except imaplib.IMAP4.error, error:
#log error in log file if it exists
LOG.debug(gmvault_utils.get_exception_traceback())
if str(error).startswith("create failed: '[ALREADYEXISTS] Duplicate folder"):
LOG.critical("Warning: label %s already exists on Gmail and Gmvault tried to create it."\
" Ignore this issue." % (directory) )
else:
raise error
#add created folder in folders
existing_folders.add(low_directory)
#return all existing folders
return existing_folders
@retry(3,1,2)
def apply_labels_to(self, imap_ids, labels):
"""
apply one labels to x emails
"""
# go to All Mail folder
LOG.debug("Applying labels %s" % (labels))
the_timer = gmvault_utils.Timer()
the_timer.start()
#utf7 the labels as they should be
labels = [ utf7_encode(label) for label in labels ]
labels_str = self._build_labels_str(labels) # create labels str
if labels_str:
#has labels so update email
the_timer.start()
LOG.debug("Before to store labels %s" % (labels_str))
id_list = ",".join(map(str, imap_ids))
#+X-GM-LABELS.SILENT to have not returned data
try:
ret_code, data = self.server._imap.uid('STORE', id_list, '+X-GM-LABELS.SILENT', labels_str) #pylint: disable=W0212
except imaplib.IMAP4.error, original_err:
LOG.info("Error in apply_labels_to. See exception traceback")
LOG.debug(gmvault_utils.get_exception_traceback())
# try to add labels to each individual ids
faulty_ids = []
for the_id in imap_ids:
try:
ret_code, data = self.server._imap.uid('STORE', the_id, '+X-GM-LABELS.SILENT', labels_str) #pylint: disable=W0212
except imaplib.IMAP4.error, store_err:
LOG.debug("Error when trying to apply labels %s to emails with imap_id %s. Error:%s" % (labels_str, the_id, store_err))
faulty_ids.append(the_id)
#raise an error to ignore faulty emails
raise LabelError("Cannot add Labels %s to emails with uids %s. Error:%s" % (labels_str, faulty_ids, original_err), ignore = True)
#ret_code, data = self.server._imap.uid('COPY', id_list, labels[0])
LOG.debug("After storing labels %s. Operation time = %s s.\nret = %s\ndata=%s" \
% (labels_str, the_timer.elapsed_ms(),ret_code, data))
# check if it is ok otherwise exception
if ret_code != 'OK':
#update individuals emails
faulty_ids = []
for the_id in imap_ids:
try:
ret_code, data = self.server._imap.uid('STORE', the_id, '+X-GM-LABELS.SILENT', labels_str) #pylint: disable=W0212
except imaplib.IMAP4.error, store_err:
LOG.debug("Error when trying to apply labels %s to emails with imap_id %s. Error:%s" % (labels_str, the_id, store_err))
faulty_ids.append(the_id)
raise LabelError("Cannot add Labels %s to emails with uids %s. Error:%s" % (labels_str, faulty_ids, data), ignore = True)
else:
LOG.debug("Stored Labels %s for gm_ids %s" % (labels_str, imap_ids))
def delete_gmail_labels(self, labels, force_delete = False):
"""
Delete passed labels. Beware experimental and labels must be ordered
"""
for label in reversed(labels):
labs = self._get_dir_from_labels(label)
for directory in reversed(labs):
if force_delete or ( (directory.lower() not in self.GMAIL_SPECIAL_DIRS_LOWER) \
and self.server.folder_exists(directory) ): #call server exists each time
try:
self.server.delete_folder(directory)
except imaplib.IMAP4.error, _:
LOG.debug(gmvault_utils.get_exception_traceback())
def erase_mailbox(self):
"""
This is for testing purpose and cannot be used with my own mailbox
"""
if self.login == "guillaume.aubert@gmail.com":
raise Exception("Error cannot activate erase_mailbox with %s" % (self.login))
LOG.info("Erase mailbox for account %s." % (self.login))
LOG.info("Delete folders")
#delete folders
folders = self.server.xlist_folders()
LOG.debug("Folders = %s.\n" %(folders))
trash_folder_name = None
for (flags, _, the_dir) in folders:
if (u'\\Starred' in flags) or (u'\\Spam' in flags) or (u'\\Sent' in flags) \
or (u'\\Important' in flags) or (the_dir == u'[Google Mail]/Chats') \
or (the_dir == u'[Google Mail]') or (u'\\Trash' in flags) or \
(u'\\Inbox' in flags) or (GIMAPFetcher.GENERIC_GMAIL_ALL in flags) or \
(GIMAPFetcher.GENERIC_DRAFTS in flags) or (GIMAPFetcher.GENERIC_GMAIL_CHATS in flags):
LOG.info("Ignore folder %s" % (the_dir))
if (u'\\Trash' in flags): #keep trash folder name
trash_folder_name = the_dir
else:
LOG.info("Delete folder %s" % (the_dir))
self.server.delete_folder(the_dir)
self.select_folder('ALLMAIL')
#self.server.store("1:*",'+X-GM-LABELS', '\\Trash')
#self.server._imap.uid('STORE', id_list, '+X-GM-LABELS.SILENT', '\\Trash')
#self.server.add_gmail_labels(self, messages, labels)
LOG.info("Move emails to Trash.")
# get all imap ids in ALLMAIL
imap_ids = self.search(GIMAPFetcher.IMAP_ALL)
#flag all message as deleted
#print(self.server.delete_messages(imap_ids))
if len(imap_ids) > 0:
self.apply_labels_to(imap_ids, ['\\Trash'])
LOG.info("Got all imap_ids flagged to Trash : %s." % (imap_ids))
else:
LOG.info("No messages to erase.")
LOG.info("Delete emails from Trash.")
if trash_folder_name == None:
raise Exception("No trash folder ???")
self.select_folder(trash_folder_name, False)
# get all imap ids in ALLMAIL
imap_ids = self.search(GIMAPFetcher.IMAP_ALL)
if len(imap_ids) > 0:
res = self.server.delete_messages(imap_ids)
LOG.debug("Delete messages result = %s" % (res))
LOG.info("Expunge everything.")
self.server.expunge()
@retry(4,1,2) # try 4 times to reconnect with a sleep time of 1 sec and a backoff of 2. The fourth time will wait 8 sec
def push_data(self, a_folder, a_body, a_flags, a_internal_time):
"""
Push the data
"""
# protection against myself
if self.login == 'guillaume.aubert@gmail.com':
raise Exception("Cannot push to this account")
the_timer = gmvault_utils.Timer()
the_timer.start()
LOG.debug("Before to Append email contents")
#import sys #to print the msg in stdout
#import codecs
#sys.stdout = codecs.getwriter('utf-8')(sys.__stdout__)
#msg = "a_folder = %s, a_flags = %s" % (a_folder.encode('utf-8'), a_flags)
#msg = "a_folder = %s" % (a_folder.encode('utf-8'))
#msg = msg.encode('utf-8')
#print(msg)
res = None
try:
#a_body = self._clean_email_body(a_body)
res = self.server.append(a_folder, a_body, a_flags, a_internal_time)
except imaplib.IMAP4.abort, err:
# handle issue when there are invalid characters (This is do to the presence of null characters)
if str(err).find("APPEND => Invalid character in literal") >= 0:
LOG.critical("Invalid character detected. Try to clean the email and reconnect.")
a_body = self._clean_email_body(a_body)
self.reconnect()
res = self.server.append(a_folder, a_body, a_flags, a_internal_time)
LOG.debug("Appended data with flags %s and internal time %s. Operation time = %s.\nres = %s\n" \
% (a_flags, a_internal_time, the_timer.elapsed_ms(), res))
# check res otherwise Exception
if '(Success)' not in res:
raise PushEmailError("GIMAPFetcher cannot restore email in %s account." %(self.login))
match = GIMAPFetcher.APPENDUID_RE.match(res)
if match:
result_uid = int(match.group(1))
LOG.debug("result_uid = %s" %(result_uid))
else:
# do not quarantine it because it seems to be done by Google Mail to forbid data uploading.
raise PushEmailError("No email id returned by IMAP APPEND command. Quarantine this email.", quarantined = True)
return result_uid
def _clean_email_body(self, a_body):
"""
Clean the body of the email
"""
#for the moment just try to remove the null character brut force. In the future will have to parse the email and clean it
return a_body.replace("\0", '')
@retry(4,1,2) # try 4 times to reconnect with a sleep time of 1 sec and a backoff of 2. The fourth time will wait 8 sec
def deprecated_push_email(self, a_body, a_flags, a_internal_time, a_labels):
"""
Push a complete email body
"""
#protection against myself
if self.login == 'guillaume.aubert@gmail.com':
raise Exception("Cannot push to this account")
the_t = gmvault_utils.Timer()
the_t.start()
LOG.debug("Before to Append email contents")
try:
res = self.server.append(u'[Google Mail]/All Mail', a_body, a_flags, a_internal_time)
except imaplib.IMAP4.abort, err:
# handle issue when there are invalid characters (This is do to the presence of null characters)
if str(err).find("APPEND => Invalid character in literal") >= 0:
a_body = self._clean_email_body(a_body)
res = self.server.append(u'[Google Mail]/All Mail', a_body, a_flags, a_internal_time)
LOG.debug("Appended data with flags %s and internal time %s. Operation time = %s.\nres = %s\n" \
% (a_flags, a_internal_time, the_t.elapsed_ms(), res))
# check res otherwise Exception
if '(Success)' not in res:
raise PushEmailError("GIMAPFetcher cannot restore email in %s account." %(self.login))
match = GIMAPFetcher.APPENDUID_RE.match(res)
if match:
result_uid = int(match.group(1))
LOG.debug("result_uid = %s" %(result_uid))
else:
# do not quarantine it because it seems to be done by Google Mail to forbid data uploading.
raise PushEmailError("No email id returned by IMAP APPEND command. Quarantine this email.", quarantined = True)
labels_str = self._build_labels_str(a_labels)
if labels_str:
#has labels so update email
the_t.start()
LOG.debug("Before to store labels %s" % (labels_str))
self.server.select_folder(u'[Google Mail]/All Mail', readonly = self.readonly_folder) # go to current folder
LOG.debug("Changing folders. elapsed %s s\n" % (the_t.elapsed_ms()))
the_t.start()
ret_code, data = self.server._imap.uid('STORE', result_uid, '+X-GM-LABELS', labels_str) #pylint: disable=W0212
#ret_code = self.server._store('+X-GM-LABELS', [result_uid],labels_str)
LOG.debug("After storing labels %s. Operation time = %s s.\nret = %s\ndata=%s" \
% (labels_str, the_t.elapsed_ms(),ret_code, data))
LOG.debug("Stored Labels %s in gm_id %s" % (labels_str, result_uid))
self.server.select_folder(u'[Google Mail]/Drafts', readonly = self.readonly_folder) # go to current folder
# check if it is ok otherwise exception
if ret_code != 'OK':
raise PushEmailError("Cannot add Labels %s to email with uid %d. Error:%s" % (labels_str, result_uid, data))
return result_uid
def decode_labels(labels):
"""
Decode labels when they are received as utf7 entities or numbers
"""
new_labels = []
for label in labels:
if isinstance(label, (int, long, float, complex)):
label = str(label)
new_labels.append(utf7_decode(label))
return new_labels
# utf7 conversion functions
def utf7_encode(s): #pylint: disable=C0103
"""encode in utf7"""
if isinstance(s, str) and sum(n for n in (ord(c) for c in s) if n > 127):
raise ValueError("%r contains characters not valid in a str folder name. "
"Convert to unicode first?" % s)
r = [] #pylint: disable=C0103
_in = []
for c in s: #pylint: disable=C0103
if ord(c) in (range(0x20, 0x26) + range(0x27, 0x7f)):
if _in:
r.extend(['&', utf7_modified_base64(''.join(_in)), '-'])
del _in[:]
r.append(str(c))
elif c == '&':
if _in:
r.extend(['&', utf7_modified_base64(''.join(_in)), '-'])
del _in[:]
r.append('&-')
else:
_in.append(c)
if _in:
r.extend(['&', utf7_modified_base64(''.join(_in)), '-'])
return ''.join(r)
def utf7_decode(s): #pylint: disable=C0103
"""decode utf7"""
r = [] #pylint: disable=C0103
decode = []
for c in s: #pylint: disable=C0103
if c == '&' and not decode:
decode.append('&')
elif c == '-' and decode:
if len(decode) == 1:
r.append('&')
else:
r.append(utf7_modified_unbase64(''.join(decode[1:])))
decode = []
elif decode:
decode.append(c)
else:
r.append(c)
if decode:
r.append(utf7_modified_unbase64(''.join(decode[1:])))
out = ''.join(r)
if not isinstance(out, unicode):
out = unicode(out, 'latin-1')
return out
def utf7_modified_base64(s): #pylint: disable=C0103
"""utf7 base64"""
s_utf7 = s.encode('utf-7')
return s_utf7[1:-1].replace('/', ',')
def utf7_modified_unbase64(s): #pylint: disable=C0103
""" utf7 unbase64"""
s_utf7 = '+' + s.replace(',', '/') + '-'
return s_utf7.decode('utf-7')
| 41,539
|
Python
|
.py
| 768
| 40.608073
| 146
| 0.584282
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,308
|
log_utils.py
|
gaubert_gmvault/src/gmv/log_utils.py
|
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sys
import os
import logbook
#different types of LoggerFactory
STANDALONE = "STANDALONE"
class StdoutHandler(logbook.StreamHandler):
"""A handler that writes to what is currently at stdout. At the first
glace this appears to just be a :class:`StreamHandler` with the stream
set to :data:`sys.stdout` but there is a difference: if the handler is
created globally and :data:`sys.stdout` changes later, this handler will
point to the current `stdout`, whereas a stream handler would still
point to the old one.
"""
def __init__(self, level=logbook.base.NOTSET, format_string=None, a_filter = None, bubble=False): #pylint: disable=W0212
super(StdoutHandler, self).__init__(logbook.base._missing, level, \
format_string, None, a_filter, bubble )
@property
def stream(self): #pylint: disable=W0212
"""
Return the stream where to write
"""
return sys.stdout
#default log file
DEFAULT_LOG = "%s/gmvault.log" % (os.getenv("HOME", "."))
class LogbookLoggerFactory(object):
"""
Factory for creating the right logbook handler
"""
def __init__(self):
pass
def setup_cli_app_handler(self, activate_log_file=False, console_level= 'CRITICAL', \
file_path=DEFAULT_LOG, log_file_level = 'DEBUG'):
"""
Setup a handler for communicating with the user and still log everything in a logfile
"""
null_handler = logbook.NullHandler()
out_handler = StdoutHandler(format_string='{record.message}', level = console_level , bubble = False)
# first stack null handler to not have anything else logged
null_handler.push_application()
# add output Handler
out_handler.push_application()
# add file Handler
if activate_log_file:
file_handler = logbook.FileHandler(file_path, mode='w', format_string=\
'[{record.time:%Y-%m-%d %H:%M}]:{record.level_name}:{record.channel}:{record.message}',\
level = log_file_level, bubble = True)
file_handler.push_application()
def setup_simple_file_handler(self, file_path):
"""
Push a file handler logging only the message (no timestamp)
"""
null_handler = logbook.NullHandler()
handler = logbook.FileHandler(file_path, format_string='{record.message}', level = 2, bubble = False)
# first stack null handler to not have anything else logged
null_handler.push_application()
# add Stderr Handler
handler.push_application()
def setup_simple_stdout_handler(self):
"""
Push a stderr handler logging only the message (no timestamp)
"""
null_handler = logbook.NullHandler()
handler = StdoutHandler(format_string='{record.message}', level = 2, bubble = False)
# first stack null handler to not have anything else logged
null_handler.push_application()
# add Stderr Handler
handler.push_application()
def setup_simple_stderr_handler(self):
"""
Push a stderr handler logging only the message (no timestamp)
"""
null_handler = logbook.NullHandler()
handler = logbook.StderrHandler(format_string='{record.message}', level = 2, bubble = False)
# first stack null handler to not have anything else logged
null_handler.push_application()
# add Stderr Handler
handler.push_application()
def get_logger(self, name):
"""
Return a logbook logger
"""
return logbook.Logger(name)
class LoggerFactory(object):
'''
My Logger Factory
'''
_factory = LogbookLoggerFactory()
_created = False
@classmethod
def get_factory(cls, the_type):
"""
Get logger factory
"""
if cls._created:
return cls._factory
if the_type == STANDALONE:
cls._factory = LogbookLoggerFactory()
cls._created = True
else:
raise Exception("LoggerFactory type %s is unknown." % (the_type))
return cls._factory
@classmethod
def get_logger(cls, name):
"""
Simply return a logger
"""
return cls._factory.get_logger(name)
@classmethod
def setup_simple_stderr_handler(cls, the_type):
"""
Push a stderr handler logging only the message (no timestamp)
"""
cls.get_factory(the_type).setup_simple_stderr_handler()
@classmethod
def setup_simple_stdout_handler(cls, the_type):
"""
Push a stderr handler logging only the message (no timestamp)
"""
cls.get_factory(the_type).setup_simple_stdout_handler()
@classmethod
def setup_simple_file_handler(cls, the_type, file_path):
"""
Push a file handler logging only the message (no timestamp)
"""
cls.get_factory(the_type).setup_simple_file_handler(file_path)
@classmethod
def setup_cli_app_handler(cls, the_type, activate_log_file=False, \
console_level= 'CRITICAL', file_path=DEFAULT_LOG,\
log_file_level = 'DEBUG'):
"""
init logging engine
"""
cls.get_factory(the_type).setup_cli_app_handler(activate_log_file, \
console_level, \
file_path, log_file_level)
| 6,692
|
Python
|
.py
| 149
| 34.187919
| 124
| 0.626648
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,309
|
gmvault.py
|
gaubert_gmvault/src/gmv/gmvault.py
|
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import json
import time
import datetime
import os
import itertools
import imaplib
import gmv.log_utils as log_utils
import gmv.collections_utils as collections_utils
import gmv.gmvault_utils as gmvault_utils
import gmv.imap_utils as imap_utils
import gmv.gmvault_db as gmvault_db
LOG = log_utils.LoggerFactory.get_logger('gmvault')
def handle_restore_imap_error(the_exception, gm_id, db_gmail_ids_info, gmvaulter):
"""
function to handle restore IMAPError and OSError([Errno 2] No such file or directory) in restore functions
"""
if isinstance(the_exception, imaplib.IMAP4.abort):
# if this is a Gmvault SSL Socket error quarantine the email and continue the restore
if str(the_exception).find("=> Gmvault ssl socket error: EOF") >= 0:
LOG.critical("Quarantine email with gm id %s from %s. GMAIL IMAP cannot restore it:"\
" err={%s}" % (gm_id, db_gmail_ids_info[gm_id], str(the_exception)))
gmvaulter.gstorer.quarantine_email(gm_id)
gmvaulter.error_report['emails_in_quarantine'].append(gm_id)
LOG.critical("Disconnecting and reconnecting to restart cleanly.")
gmvaulter.src.reconnect() #reconnect
else:
raise the_exception
elif isinstance(the_exception, IOError) and str(the_exception).find('[Errno 2] No such file or directory:') >=0:
LOG.critical("Quarantine email with gm id %s from %s. GMAIL IMAP cannot restore it:"\
" err={%s}" % (gm_id, db_gmail_ids_info[gm_id], str(the_exception)))
gmvaulter.gstorer.quarantine_email(gm_id)
gmvaulter.error_report['emails_in_quarantine'].append(gm_id)
LOG.critical("Disconnecting and reconnecting to restart cleanly.")
gmvaulter.src.reconnect() #reconnect
elif isinstance(the_exception, imaplib.IMAP4.error):
LOG.error("Catched IMAP Error %s" % (str(the_exception)))
LOG.exception(the_exception)
#When the email cannot be read from Database because it was empty when returned by gmail imap
#quarantine it.
if str(the_exception) == "APPEND command error: BAD ['Invalid Arguments: Unable to parse message']":
LOG.critical("Quarantine email with gm id %s from %s. GMAIL IMAP cannot restore it:"\
" err={%s}" % (gm_id, db_gmail_ids_info[gm_id], str(the_exception)))
gmvaulter.gstorer.quarantine_email(gm_id)
gmvaulter.error_report['emails_in_quarantine'].append(gm_id)
else:
raise the_exception
elif isinstance(the_exception, imap_utils.PushEmailError):
LOG.error("Catch the following exception %s" % (str(the_exception)))
LOG.exception(the_exception)
if the_exception.quarantined():
LOG.critical("Quarantine email with gm id %s from %s. GMAIL IMAP cannot restore it:"\
" err={%s}" % (gm_id, db_gmail_ids_info[gm_id], str(the_exception)))
gmvaulter.gstorer.quarantine_email(gm_id)
gmvaulter.error_report['emails_in_quarantine'].append(gm_id)
else:
raise the_exception
else:
LOG.error("Catch the following exception %s" % (str(the_exception)))
LOG.exception(the_exception)
raise the_exception
def handle_sync_imap_error(the_exception, the_id, error_report, src):
"""
function to handle IMAPError in gmvault
type = chat or email
"""
if isinstance(the_exception, imaplib.IMAP4.abort):
# imap abort error
# ignore it
# will have to do something with these ignored messages
LOG.critical("Error while fetching message with imap id %s." % (the_id))
LOG.critical("\n=== Exception traceback ===\n")
LOG.critical(gmvault_utils.get_exception_traceback())
LOG.critical("=== End of Exception traceback ===\n")
try:
#try to get the gmail_id
curr = src.fetch(the_id, imap_utils.GIMAPFetcher.GET_GMAIL_ID)
except Exception, _: #pylint:disable-msg=W0703
curr = None
LOG.critical("Error when trying to get gmail id for message with imap id %s." % (the_id))
LOG.critical("Disconnect, wait for 10 sec then reconnect.")
src.disconnect()
#could not fetch the gm_id so disconnect and sleep
#sleep 10 sec
time.sleep(10)
LOG.critical("Reconnecting ...")
src.connect()
if curr:
gmail_id = curr[the_id].get(imap_utils.GIMAPFetcher.GMAIL_ID)
else:
gmail_id = None
#add ignored id
error_report['cannot_be_fetched'].append((the_id, gmail_id))
LOG.critical("Forced to ignore message with imap id %s, (gmail id %s)." \
% (the_id, (gmail_id if gmail_id else "cannot be read")))
elif isinstance(the_exception, imaplib.IMAP4.error):
# check if this is a cannot be fetched error
# I do not like to do string guessing within an exception but I do not have any choice here
LOG.critical("Error while fetching message with imap id %s." % (the_id))
LOG.critical("\n=== Exception traceback ===\n")
LOG.critical(gmvault_utils.get_exception_traceback())
LOG.critical("=== End of Exception traceback ===\n")
#quarantine emails that have raised an abort error
if str(the_exception).find("'Some messages could not be FETCHed (Failure)'") >= 0:
try:
#try to get the gmail_id
LOG.critical("One more attempt. Trying to fetch the Gmail ID for %s" % (the_id) )
curr = src.fetch(the_id, imap_utils.GIMAPFetcher.GET_GMAIL_ID)
except Exception, _: #pylint:disable-msg=W0703
curr = None
if curr:
gmail_id = curr[the_id].get(imap_utils.GIMAPFetcher.GMAIL_ID)
else:
gmail_id = None
#add ignored id
error_report['cannot_be_fetched'].append((the_id, gmail_id))
LOG.critical("Ignore message with imap id %s, (gmail id %s)" % (the_id, (gmail_id if gmail_id else "cannot be read")))
else:
raise the_exception #rethrow error
else:
raise the_exception
class IMAPBatchFetcher(object):
"""
Fetch IMAP data in batch
"""
def __init__(self, src, imap_ids, error_report, request, default_batch_size = 100):
"""
constructor
"""
self.src = src
self.imap_ids = imap_ids
self.def_batch_size = default_batch_size
self.request = request
self.error_report = error_report
self.to_fetch = list(imap_ids)
def individual_fetch(self, imap_ids):
"""
Find the imap_id creating the issue
return the data related to the imap_ids
"""
new_data = {}
for the_id in imap_ids:
try:
single_data = self.src.fetch(the_id, self.request)
new_data.update(single_data)
except Exception, error:
handle_sync_imap_error(error, the_id, self.error_report, self.src) #do everything in this handler
return new_data
def __iter__(self):
return self
def next(self):
"""
Return the next batch of elements
"""
new_data = {}
batch = self.to_fetch[:self.def_batch_size]
if len(batch) <= 0:
raise StopIteration
try:
new_data = self.src.fetch(batch, self.request)
self.to_fetch = self.to_fetch[self.def_batch_size:]
return new_data
except imaplib.IMAP4.error, _:
new_data = self.individual_fetch(batch)
return new_data
def reset(self):
"""
Restart from the beginning
"""
self.to_fetch = self.imap_ids
class GMVaulter(object):
"""
Main object operating over gmail
"""
NB_GRP_OF_ITEMS = 1400
EMAIL_RESTORE_PROGRESS = 'email_last_id.restore'
CHAT_RESTORE_PROGRESS = 'chat_last_id.restore'
EMAIL_SYNC_PROGRESS = 'email_last_id.sync'
CHAT_SYNC_PROGRESS = 'chat_last_id.sync'
OP_EMAIL_RESTORE = "EM_RESTORE"
OP_EMAIL_SYNC = "EM_SYNC"
OP_CHAT_RESTORE = "CH_RESTORE"
OP_CHAT_SYNC = "CH_SYNC"
OP_TO_FILENAME = { OP_EMAIL_RESTORE : EMAIL_RESTORE_PROGRESS,
OP_EMAIL_SYNC : EMAIL_SYNC_PROGRESS,
OP_CHAT_RESTORE : CHAT_RESTORE_PROGRESS,
OP_CHAT_SYNC : CHAT_SYNC_PROGRESS
}
def __init__(self, db_root_dir, host, port, login, \
credential, read_only_access = True, use_encryption = False): #pylint:disable-msg=R0913,R0914
"""
constructor
"""
self.db_root_dir = db_root_dir
#create dir if it doesn't exist
gmvault_utils.makedirs(self.db_root_dir)
#keep track of login email
self.login = login
# create source and try to connect
self.src = imap_utils.GIMAPFetcher(host, port, login, credential, \
readonly_folder = read_only_access)
self.src.connect()
LOG.debug("Connected")
self.use_encryption = use_encryption
#to report gmail imap problems
self.error_report = { 'empty' : [] ,
'cannot_be_fetched' : [],
'emails_in_quarantine' : [],
'reconnections' : 0,
'key_error' : []}
#instantiate gstorer
self.gstorer = gmvault_db.GmailStorer(self.db_root_dir, self.use_encryption)
#timer used to mesure time spent in the different values
self.timer = gmvault_utils.Timer()
@classmethod
def get_imap_request_btw_2_dates(cls, begin_date, end_date):
"""
Return the imap request for those 2 dates
"""
imap_req = 'Since %s Before %s' % (gmvault_utils.datetime2imapdate(begin_date), gmvault_utils.datetime2imapdate(end_date))
return imap_req
def get_operation_report(self):
"""
Return the error report
"""
the_str = "\n================================================================\n"\
"%s operation performed in %s.\n" \
"Number of reconnections: %d.\nNumber of emails quarantined: %d.\n" \
"Number of emails that could not be fetched: %d.\n" \
"Number of emails that were returned empty by gmail: %d.\n"\
"Number of emails without label information returned by gmail: %d.\n"\
"================================================================" \
% (self.error_report['operation'], \
self.error_report['operation_time'], \
self.error_report['reconnections'], \
len(self.error_report['emails_in_quarantine']), \
len(self.error_report['cannot_be_fetched']), \
len(self.error_report['empty']), \
len(self.error_report['key_error'])
)
LOG.debug("error_report complete structure = %s" % (self.error_report))
return the_str
@classmethod
def _get_next_date(cls, a_current_date, start_month_beginning = False):
"""
return the next date necessary to build the imap req
"""
if start_month_beginning:
dummy_date = a_current_date.replace(day=1)
else:
dummy_date = a_current_date
# the next date = current date + 1 month
return dummy_date + datetime.timedelta(days=31)
@classmethod
def check_email_on_disk(cls, a_gstorer, a_id, a_dir = None):
"""
Factory method to create the object if it exists
"""
try:
a_dir = a_gstorer.get_directory_from_id(a_id, a_dir)
if a_dir:
return a_gstorer.unbury_metadata(a_id, a_dir)
except ValueError, json_error:
LOG.exception("Cannot read file %s. Try to fetch the data again" % ('%s.meta' % (a_id)), json_error )
return None
@classmethod
def _metadata_needs_update(cls, curr_metadata, new_metadata, chat_metadata = False):
"""
Needs update
"""
if curr_metadata[gmvault_db.GmailStorer.ID_K] != new_metadata['X-GM-MSGID']:
raise Exception("Gmail id has changed for %s" % (curr_metadata['id']))
#check flags
prev_set = set(new_metadata['FLAGS'])
for flag in curr_metadata['flags']:
if flag not in prev_set:
return True
else:
prev_set.remove(flag)
if len(prev_set) > 0:
return True
#check labels
prev_labels = set(new_metadata['X-GM-LABELS'])
if chat_metadata: #add gmvault-chats labels
prev_labels.add(gmvault_db.GmailStorer.CHAT_GM_LABEL)
for label in curr_metadata['labels']:
if label not in prev_labels:
return True
else:
prev_labels.remove(label)
if len(prev_labels) > 0:
return True
return False
def _check_email_db_ownership(self, ownership_control):
"""
Check email database ownership.
If ownership control activated then fail if a new additional owner is added.
Else if no ownership control allow one more user and save it in the list of owners
Return the number of owner this will be used to activate or not the db clean.
Activating a db cleaning on a multiownership db would be a catastrophy as it would delete all
the emails from the others users.
"""
#check that the gmvault-db is not associated with another user
db_owners = self.gstorer.get_db_owners()
if ownership_control:
if len(db_owners) > 0 and self.login not in db_owners: #db owner should not be different unless bypass activated
raise Exception("The email database %s is already associated with one or many logins: %s."\
" Use option (-m, --multiple-db-owner) if you want to link it with %s" \
% (self.db_root_dir, ", ".join(db_owners), self.login))
else:
if len(db_owners) == 0:
LOG.critical("Establish %s as the owner of the Gmvault db %s." % (self.login, self.db_root_dir))
elif len(db_owners) > 0 and self.login not in db_owners:
LOG.critical("The email database %s is hosting emails from %s. It will now also store emails from %s" \
% (self.db_root_dir, ", ".join(db_owners), self.login))
#try to save db_owner in the list of owners
self.gstorer.store_db_owner(self.login)
def _sync_chats(self, imap_req, compress, restart):
"""
sync emails
"""
chat_dir = None
timer = gmvault_utils.Timer() #start local timer for chat
timer.start()
LOG.debug("Before selection")
if self.src.is_visible('CHATS'):
chat_dir = self.src.select_folder('CHATS')
LOG.debug("Selection is finished")
if chat_dir:
imap_ids = self._common_sync(timer, "chat", imap_req, compress, restart)
else:
imap_ids = []
LOG.critical("\nchats synchronisation operation performed in %s.\n" % (timer.seconds_to_human_time(timer.elapsed())))
return imap_ids
def _common_sync(self, a_timer, a_type, imap_req, compress, restart):
"""
common syncing method for both emails and chats.
"""
# get all imap ids in All Mail
imap_ids = self.src.search(imap_req)
last_id_file = self.OP_EMAIL_SYNC if a_type == "email" else self.OP_CHAT_SYNC
# check if there is a restart
if restart:
LOG.critical("Restart mode activated for emails. Need to find information in Gmail, be patient ...")
imap_ids = self.get_gmails_ids_left_to_sync(last_id_file, imap_ids, imap_req)
total_nb_msgs_to_process = len(imap_ids) # total number of emails to get
LOG.critical("%d %ss to be fetched." % (total_nb_msgs_to_process, a_type))
nb_msgs_processed = 0
to_fetch = set(imap_ids)
batch_fetcher = IMAPBatchFetcher(self.src, imap_ids, self.error_report, imap_utils.GIMAPFetcher.GET_ALL_BUT_DATA, \
default_batch_size = \
gmvault_utils.get_conf_defaults().getint("General","nb_messages_per_batch",500))
#choose different bury methods if it is an email or a chat
if a_type == "email":
bury_metadata_fn = self.gstorer.bury_metadata
bury_data_fn = self.gstorer.bury_email
chat_metadata = False
elif a_type == "chat":
bury_metadata_fn = self.gstorer.bury_chat_metadata
bury_data_fn = self.gstorer.bury_chat
chat_metadata = True
else:
raise Exception("Error a_type %s in _common_sync is unknown" % (a_type))
#LAST Thing to do remove all found ids from imap_ids and if ids left add missing in report
for new_data in batch_fetcher:
for the_id in new_data:
if new_data.get(the_id, None):
LOG.debug("\nProcess imap id %s" % ( the_id ))
gid = new_data[the_id].get(imap_utils.GIMAPFetcher.GMAIL_ID, None)
eml_date = new_data[the_id].get(imap_utils.GIMAPFetcher.IMAP_INTERNALDATE, None)
if gid is None or eml_date is None:
LOG.info("Ignore email with id %s. No %s nor %s found in %s." % (the_id, imap_utils.GIMAPFetcher.GMAIL_ID, imap_utils.GIMAPFetcher.IMAP_INTERNALDATE, new_data[the_id]))
self.error_report['empty'].append((the_id, gid if gid else None))
pass #ignore this email and process the next one
if a_type == "email":
the_dir = gmvault_utils.get_ym_from_datetime(eml_date)
elif a_type == "chat":
the_dir = self.gstorer.get_sub_chats_dir()
else:
raise Exception("Error a_type %s in _common_sync is unknown" % (a_type))
LOG.critical("Process %s num %d (imap_id:%s) from %s." % (a_type, nb_msgs_processed, the_id, the_dir))
#decode the labels that are received as utf7 => unicode
try:
new_data[the_id][imap_utils.GIMAPFetcher.GMAIL_LABELS] = \
imap_utils.decode_labels(new_data[the_id][imap_utils.GIMAPFetcher.GMAIL_LABELS])
except KeyError, ke:
LOG.info("KeyError, reason: %s. new_data[%s]=%s" % (str(ke), the_id, new_data.get(the_id)))
# try to fetch it individually and replace current info if it fails then raise error.
id_info = None
try:
id_info = batch_fetcher.individual_fetch(the_id)
new_data[the_id][imap_utils.GIMAPFetcher.GMAIL_LABELS] = \
imap_utils.decode_labels(id_info[imap_utils.GIMAPFetcher.GMAIL_LABELS])
except Exception, err:
LOG.debug("Error when trying to fetch again information for email id %s. id_info = %s. exception:(%s)" \
% (the_id, id_info, str(err)))
LOG.info("Missing labels information for email id %s. Ignore it\n" % (the_id))
self.error_report['key_error'].append((the_id, new_data.get(the_id)))
continue
LOG.debug("metadata info collected: %s\n" % (new_data[the_id]))
#pass the dir and the ID
curr_metadata = GMVaulter.check_email_on_disk( self.gstorer , \
new_data[the_id][imap_utils.GIMAPFetcher.GMAIL_ID], \
the_dir)
#if on disk check that the data is not different
if curr_metadata:
LOG.debug("metadata for %s already exists. Check if different." % (gid))
if self._metadata_needs_update(curr_metadata, new_data[the_id], chat_metadata):
LOG.debug("%s with imap id %s and gmail id %s has changed. Updated it." % (a_type, the_id, gid))
#restore everything at the moment
gid = bury_metadata_fn(new_data[the_id], local_dir = the_dir)
#update local index id gid => index per directory to be thought out
else:
LOG.debug("On disk metadata for %s is up to date." % (gid))
else:
try:
#get the data
LOG.debug("Get Data for %s." % (gid))
email_data = self.src.fetch(the_id, imap_utils.GIMAPFetcher.GET_DATA_ONLY )
new_data[the_id][imap_utils.GIMAPFetcher.EMAIL_BODY] = \
email_data[the_id][imap_utils.GIMAPFetcher.EMAIL_BODY]
LOG.debug("Storing on disk data for %s" % (gid))
# store data on disk within year month dir
gid = bury_data_fn(new_data[the_id], local_dir = the_dir, compress = compress)
#update local index id gid => index per directory to be thought out
LOG.debug("Create and store email with imap id %s, gmail id %s." % (the_id, gid))
except Exception, error:
handle_sync_imap_error(error, the_id, self.error_report, self.src) #do everything in this handler
nb_msgs_processed += 1
#indicate every 50 messages the number of messages left to process
left_emails = (total_nb_msgs_to_process - nb_msgs_processed)
if (nb_msgs_processed % 50) == 0 and (left_emails > 0):
elapsed = a_timer.elapsed() #elapsed time in seconds
LOG.critical("\n== Processed %d emails in %s. %d left to be stored (time estimate %s).==\n" % \
(nb_msgs_processed, \
a_timer.seconds_to_human_time(elapsed), left_emails, \
a_timer.estimate_time_left(nb_msgs_processed, elapsed, left_emails)))
# save id every 10 restored emails
if (nb_msgs_processed % 10) == 0:
if gid:
self.save_lastid(last_id_file, gid, eml_date, imap_req)
else:
LOG.info("Could not process message with id %s. Ignore it\n" % (the_id))
self.error_report['empty'].append((the_id, gid if gid else None))
to_fetch -= set(new_data.keys()) #remove all found keys from to_fetch set
for the_id in to_fetch:
# case when gmail IMAP server returns OK without any data whatsoever
# eg. imap uid 142221L ignore it
LOG.info("Could not process imap with id %s. Ignore it\n" % (the_id))
self.error_report['empty'].append((the_id, None))
return imap_ids
def _sync_emails(self, imap_req, compress, restart):
"""
sync emails
"""
timer = gmvault_utils.Timer()
timer.start()
#select all mail folder using the constant name defined in GIMAPFetcher
self.src.select_folder('ALLMAIL')
imap_ids = self._common_sync(timer, "email", imap_req, compress, restart)
LOG.critical("\nEmails synchronisation operation performed in %s.\n" % (timer.seconds_to_human_time(timer.elapsed())))
return imap_ids
def sync(self, imap_req, compress_on_disk = True, \
db_cleaning = False, ownership_checking = True, \
restart = False, emails_only = False, chats_only = False):
"""
sync mode
"""
#check ownership to have one email per db unless user wants different
#save the owner if new
self._check_email_db_ownership(ownership_checking)
if not compress_on_disk:
LOG.critical("Disable compression when storing emails.")
if self.use_encryption:
LOG.critical("Encryption activated. All emails will be encrypted before to be stored.")
LOG.critical("Please take care of the encryption key stored in (%s) or all"\
" your stored emails will become unreadable." \
% (gmvault_db.GmailStorer.get_encryption_key_path(self.db_root_dir)))
self.error_report['operation'] = 'Sync'
self.timer.start() #start syncing emails
now = datetime.datetime.now()
LOG.critical("Start synchronization (%s).\n" % (now.strftime('%Y-%m-%dT%Hh%Mm%Ss')))
if not chats_only:
# backup emails
LOG.critical("Start emails synchronization.")
self._sync_emails(imap_req, compress = compress_on_disk, restart = restart)
else:
LOG.critical("Skip emails synchronization.\n")
if not emails_only:
# backup chats
LOG.critical("Start chats synchronization.")
self._sync_chats(imap_req, compress = compress_on_disk, restart = restart)
else:
LOG.critical("\nSkip chats synchronization.\n")
#delete supress emails from DB since last sync
self.check_clean_db(db_cleaning)
LOG.debug("Sync operation performed in %s.\n" \
% (self.timer.seconds_to_human_time(self.timer.elapsed())))
self.error_report["operation_time"] = self.timer.seconds_to_human_time(self.timer.elapsed())
#update number of reconnections
self.error_report["reconnections"] = self.src.total_nb_reconns
return self.error_report
def _delete_sync(self, imap_ids, db_gmail_ids, db_gmail_ids_info, msg_type):
"""
Delete emails or chats from the database if necessary
imap_ids : all remote imap_ids to check
db_gmail_ids_info : info read from metadata
msg_type : email or chat
"""
# optimize nb of items
nb_items = self.NB_GRP_OF_ITEMS if len(imap_ids) >= self.NB_GRP_OF_ITEMS else len(imap_ids)
LOG.critical("Call Gmail to check the stored %ss against the Gmail %ss ids and see which ones have been deleted.\n\n"\
"This might take a few minutes ...\n" % (msg_type, msg_type))
#calculate the list elements to delete
#query nb_items items in one query to minimise number of imap queries
for group_imap_id in itertools.izip_longest(fillvalue=None, *[iter(imap_ids)]*nb_items):
# if None in list remove it
if None in group_imap_id:
group_imap_id = [ im_id for im_id in group_imap_id if im_id != None ]
data = self.src.fetch(group_imap_id, imap_utils.GIMAPFetcher.GET_GMAIL_ID)
# syntax for 2.7 set comprehension { data[key][imap_utils.GIMAPFetcher.GMAIL_ID] for key in data }
# need to create a list for 2.6
db_gmail_ids.difference_update([data[key].get(imap_utils.GIMAPFetcher.GMAIL_ID) for key in data if data[key].get(imap_utils.GIMAPFetcher.GMAIL_ID)])
if len(db_gmail_ids) == 0:
break
LOG.critical("Will delete %s %s(s) from gmvault db.\n" % (len(db_gmail_ids), msg_type) )
for gm_id in db_gmail_ids:
LOG.critical("gm_id %s not in the Gmail server. Delete it." % (gm_id))
self.gstorer.delete_emails([(gm_id, db_gmail_ids_info[gm_id])], msg_type)
def search_on_date(self, a_eml_date):
"""
get eml_date and format it to search
"""
imap_date = gmvault_utils.datetime2imapdate(a_eml_date)
imap_req = "SINCE %s" % (imap_date)
imap_ids = self.src.search({'type':'imap', 'req': imap_req})
return imap_ids
def get_gmails_ids_left_to_sync(self, op_type, imap_ids, imap_req):#pylint:disable-msg=W0613
"""
Get the ids that still needs to be sync
Return a list of ids
"""
filename = self.OP_TO_FILENAME.get(op_type, None)
if not filename:
raise Exception("Bad Operation (%s) in save_last_id. "\
"This should not happen, send the error to the software developers." % (op_type))
filepath = '%s/%s_%s' % (self.gstorer.get_info_dir(), self.login, filename)
if not os.path.exists(filepath):
LOG.critical("last_id.sync file %s doesn't exist.\nSync the full list of backed up emails." %(filepath))
return imap_ids
json_obj = json.load(open(filepath, 'r'))
last_id = json_obj['last_id']
last_id_index = -1
new_gmail_ids = imap_ids
try:
#get imap_id from stored gmail_id
dummy = self.src.search({'type':'imap', 'req':'X-GM-MSGID %s' % (last_id)})
imap_id = dummy[0]
last_id_index = imap_ids.index(imap_id)
LOG.critical("Restart from gmail id %s (imap id %s)." % (last_id, imap_id))
new_gmail_ids = imap_ids[last_id_index:]
except Exception, _: #ignore any exception and try to get all ids in case of problems. pylint:disable=W0703
#element not in keys return current set of keys
LOG.critical("Error: Cannot restore from last restore gmail id. It is not in Gmail."\
" Sync the complete list of gmail ids requested from Gmail.")
return new_gmail_ids
def check_clean_db(self, db_cleaning):
"""
Check and clean the database (remove file that are not anymore in Gmail)
"""
owners = self.gstorer.get_db_owners()
if not db_cleaning: #decouple the 2 conditions for activating cleaning
LOG.debug("db_cleaning is off so ignore removing deleted emails from disk.")
return
elif len(owners) > 1:
LOG.critical("The Gmvault db hosts emails from the following accounts: %s.\n"\
% (", ".join(owners)))
LOG.critical("Deactivate database cleaning on a multi-owners Gmvault db.")
return
else:
LOG.critical("Look for emails/chats that are in the Gmvault db but not in Gmail servers anymore.\n")
#get gmail_ids from db
LOG.critical("Read all gmail ids from the Gmvault db. It might take a bit of time ...\n")
timer = gmvault_utils.Timer() # needed for enhancing the user information
timer.start()
db_gmail_ids_info = self.gstorer.get_all_existing_gmail_ids()
LOG.critical("Found %s email(s) in the Gmvault db.\n" % (len(db_gmail_ids_info)) )
#create a set of keys
db_gmail_ids = set(db_gmail_ids_info.keys())
# get all imap ids in All Mail
self.src.select_folder('ALLMAIL') #go to all mail
imap_ids = self.src.search(imap_utils.GIMAPFetcher.IMAP_ALL) #search all
LOG.debug("Got %s emails imap_id(s) from the Gmail Server." % (len(imap_ids)))
#delete supress emails from DB since last sync
self._delete_sync(imap_ids, db_gmail_ids, db_gmail_ids_info, 'email')
# get all chats ids
if self.src.is_visible('CHATS'):
db_gmail_ids_info = self.gstorer.get_all_chats_gmail_ids()
LOG.critical("Found %s chat(s) in the Gmvault db.\n" % (len(db_gmail_ids_info)) )
self.src.select_folder('CHATS') #go to chats
chat_ids = self.src.search(imap_utils.GIMAPFetcher.IMAP_ALL)
db_chat_ids = set(db_gmail_ids_info.keys())
LOG.debug("Got %s chat imap_ids from the Gmail Server." % (len(chat_ids)))
#delete supress emails from DB since last sync
self._delete_sync(chat_ids, db_chat_ids, db_gmail_ids_info , 'chat')
else:
LOG.critical("Chats IMAP Directory not visible on Gmail. Ignore deletion of chats.")
LOG.critical("\nDeletion checkup done in %s." % (timer.elapsed_human_time()))
def remote_sync(self):
"""
Sync with a remote source (IMAP mirror or cloud storage area)
"""
#sync remotely
pass
def save_lastid(self, op_type, gm_id, eml_date=None, imap_req=None):#pylint:disable-msg=W0613
"""
Save the passed gmid in last_id.restore
For the moment reopen the file every time
"""
filename = self.OP_TO_FILENAME.get(op_type, None)
if not filename:
raise Exception("Bad Operation (%s) in save_last_id. "
"This should not happen, send the error to the "
"software developers." % op_type)
filepath = '%s/%s_%s' % (self.gstorer.get_info_dir(), self.login,
filename)
with open(filepath, 'w') as f:
#json.dump({
# 'last_id' : gm_id,
# 'date' : gmvault_utils.datetime2e(eml_date) if eml_date else None,
# 'req' : imap_req
# }, the_fd)
json.dump({
'last_id': gm_id,
}, f)
def get_gmails_ids_left_to_restore(self, op_type, db_gmail_ids_info):
"""
Get the ids that still needs to be restored
Return a dict key = gm_id, val = directory
"""
filename = self.OP_TO_FILENAME.get(op_type, None)
if not filename:
raise Exception("Bad Operation (%s) in save_last_id. This should "
"not happen, send the error to the software "
"developers." % op_type)
#filepath = '%s/%s_%s' % (gmvault_utils.get_home_dir_path(), self.login, filename)
filepath = '%s/%s_%s' % (self.gstorer.get_info_dir(), self.login, filename)
if not os.path.exists(filepath):
LOG.critical("last_id restore file %s doesn't exist.\nRestore the full list of backed up emails." %(filepath))
return db_gmail_ids_info
with open(filepath, 'r') as f:
json_obj = json.load(f)
last_id = json_obj['last_id']
last_id_index = -1
try:
keys = db_gmail_ids_info.keys()
last_id_index = keys.index(last_id)
LOG.critical("Restart from gmail id %s." % last_id)
except ValueError, _:
#element not in keys return current set of keys
LOG.error("Cannot restore from last restore gmail id. It is not in the disk database.")
new_gmail_ids_info = collections_utils.OrderedDict()
if last_id_index != -1:
for key in db_gmail_ids_info.keys()[last_id_index+1:]:
new_gmail_ids_info[key] = db_gmail_ids_info[key]
else:
new_gmail_ids_info = db_gmail_ids_info
return new_gmail_ids_info
def restore(self, pivot_dir = None, extra_labels = [], \
restart = False, emails_only = False, chats_only = False): #pylint:disable=W0102
"""
Restore emails in a gmail account
"""
self.error_report['operation'] = 'Sync'
self.timer.start() #start restoring
now = datetime.datetime.now()
LOG.critical("Start restoration (%s).\n" % (now.strftime('%Y-%m-%dT%Hh%Mm%Ss')))
if not chats_only:
# backup emails
LOG.critical("Start emails restoration.\n")
if pivot_dir:
LOG.critical("Quick mode activated. Will only restore all emails since %s.\n" % (pivot_dir))
self.restore_emails(pivot_dir, extra_labels, restart)
else:
LOG.critical("Skip emails restoration.\n")
if not emails_only:
# backup chats
LOG.critical("Start chats restoration.\n")
self.restore_chats(extra_labels, restart)
else:
LOG.critical("Skip chats restoration.\n")
LOG.debug("Restore operation performed in %s.\n" \
% (self.timer.seconds_to_human_time(self.timer.elapsed())))
self.error_report["operation_time"] = self.timer.seconds_to_human_time(self.timer.elapsed())
#update number of reconnections
self.error_report["reconnections"] = self.src.total_nb_reconns
return self.error_report
def restore_chats(self, extra_labels = [], restart = False): #pylint:disable=W0102
"""
restore chats
"""
LOG.critical("Restore chats in gmail account %s." % (self.login) )
LOG.critical("Read chats info from %s gmvault-db." % (self.db_root_dir))
#get gmail_ids from db
db_gmail_ids_info = self.gstorer.get_all_chats_gmail_ids()
LOG.critical("Total number of chats to restore %s." % (len(db_gmail_ids_info.keys())))
if restart:
db_gmail_ids_info = self.get_gmails_ids_left_to_restore(self.OP_CHAT_RESTORE, db_gmail_ids_info)
total_nb_emails_to_restore = len(db_gmail_ids_info)
LOG.critical("Got all chats id left to restore. Still %s chats to do.\n" % (total_nb_emails_to_restore) )
existing_labels = set() #set of existing labels to not call create_gmail_labels all the time
reserved_labels_map = gmvault_utils.get_conf_defaults().get_dict("Restore", "reserved_labels_map", \
{ u'migrated' : u'gmv-migrated', u'\muted' : u'gmv-muted' })
nb_emails_restored = 0 #to count nb of emails restored
labels_to_apply = collections_utils.SetMultimap()
#get all mail folder name
all_mail_name = self.src.get_folder_name("ALLMAIL")
# go to DRAFTS folder because if you are in ALL MAIL when uploading emails it is very slow
folder_def_location = gmvault_utils.get_conf_defaults().get("General", "restore_default_location", "DRAFTS")
self.src.select_folder(folder_def_location)
timer = gmvault_utils.Timer() # local timer for restore emails
timer.start()
nb_items = gmvault_utils.get_conf_defaults().get_int("General", "nb_messages_per_restore_batch", 100)
for group_imap_ids in itertools.izip_longest(fillvalue=None, *[iter(db_gmail_ids_info)]*nb_items):
last_id = group_imap_ids[-1] #will be used to save the last id
#remove all None elements from group_imap_ids
group_imap_ids = itertools.ifilter(lambda x: x != None, group_imap_ids)
labels_to_create = set(extra_labels) #create label set, add xtra labels in set
LOG.critical("Processing next batch of %s chats.\n" % (nb_items))
# unbury the metadata for all these emails
for gm_id in group_imap_ids:
try:
email_meta, email_data = self.gstorer.unbury_email(gm_id)
LOG.critical("Pushing chat content with id %s." % (gm_id))
LOG.debug("Subject = %s." % (email_meta[self.gstorer.SUBJECT_K]))
# push data in gmail account and get uids
imap_id = self.src.push_data(all_mail_name, email_data, \
email_meta[self.gstorer.FLAGS_K] , \
email_meta[self.gstorer.INT_DATE_K] )
#labels for this email => real_labels U extra_labels
labels = set(email_meta[self.gstorer.LABELS_K])
# add in the labels_to_create struct
for label in labels:
LOG.debug("label = %s\n" % (label))
if label.lower() in reserved_labels_map.keys(): #exclude creation of migrated label
n_label = reserved_labels_map.get(label.lower(), "gmv-default-label")
LOG.info("Apply label '%s' instead of '%s' (lower or uppercase)"\
" because it is a Gmail reserved label." % (n_label, label))
label = n_label
labels_to_apply[str(label)] = imap_id #add in multimap
# get list of labels to create (do a union with labels to create)
#labels_to_create.update([ label for label in labels if label not in existing_labels])
labels_to_create.update([ label for label in labels_to_apply.keys() \
if label not in existing_labels])
for ex_label in extra_labels:
labels_to_apply[ex_label] = imap_id
except Exception, err:
handle_restore_imap_error(err, gm_id, db_gmail_ids_info, self)
#create the non existing labels and update existing labels
if len(labels_to_create) > 0:
LOG.debug("Labels creation tentative for chats ids %s." % (group_imap_ids))
existing_labels = self.src.create_gmail_labels(labels_to_create, existing_labels)
# associate labels with emails
LOG.critical("Applying labels to the current batch of chats.")
try:
LOG.debug("Changing directory. Going into ALLMAIL")
self.src.select_folder('ALLMAIL') #go to ALL MAIL to make STORE usable
for label in labels_to_apply.keys():
self.src.apply_labels_to(labels_to_apply[label], [label])
except Exception, err:
LOG.error("Problem when applying labels %s to the following ids: %s" %(label, labels_to_apply[label]), err)
if isinstance(err, imap_utils.LabelError) and err.ignore() == True:
LOG.critical("Ignore labelling: %s" % (err))
LOG.critical("Disconnecting and reconnecting to restart cleanly.")
self.src.reconnect() #reconnect
elif isinstance(err, imaplib.IMAP4.abort) and str(err).find("=> Gmvault ssl socket error: EOF") >= 0:
# if this is a Gmvault SSL Socket error ignore labelling and continue the restore
LOG.critical("Ignore labelling")
LOG.critical("Disconnecting and reconnecting to restart cleanly.")
self.src.reconnect() #reconnect
else:
raise err
finally:
self.src.select_folder(folder_def_location) # go back to an empty DIR (Drafts) to be fast
labels_to_apply = collections_utils.SetMultimap() #reset label to apply
nb_emails_restored += nb_items
#indicate every 10 messages the number of messages left to process
left_emails = (total_nb_emails_to_restore - nb_emails_restored)
if (left_emails > 0):
elapsed = timer.elapsed() #elapsed time in seconds
LOG.critical("\n== Processed %d chats in %s. %d left to be restored "\
"(time estimate %s).==\n" % \
(nb_emails_restored, timer.seconds_to_human_time(elapsed), \
left_emails, timer.estimate_time_left(nb_emails_restored, elapsed, left_emails)))
# save id every nb_items restored emails
# add the last treated gm_id
self.save_lastid(self.OP_CHAT_RESTORE, last_id)
return self.error_report
def restore_emails(self, pivot_dir = None, extra_labels = [], restart = False):
"""
restore emails in a gmail account using batching to group restore
If you are not in "All Mail" Folder, it is extremely fast to push emails.
But it is not possible to reapply labels if you are not in All Mail because the uid which is returned
is dependant on the folder. On the other hand, you can restore labels in batch which would help gaining lots of time.
The idea is to get a batch of 50 emails and push them all in the mailbox one by one and get the uid for each of them.
Then create a dict of labels => uid_list and for each label send a unique store command after having changed dir
"""
LOG.critical("Restore emails in gmail account %s." % (self.login) )
LOG.critical("Read email info from %s gmvault-db." % (self.db_root_dir))
#get gmail_ids from db
db_gmail_ids_info = self.gstorer.get_all_existing_gmail_ids(pivot_dir)
LOG.critical("Total number of elements to restore %s." % (len(db_gmail_ids_info.keys())))
if restart:
db_gmail_ids_info = self.get_gmails_ids_left_to_restore(self.OP_EMAIL_RESTORE, db_gmail_ids_info)
total_nb_emails_to_restore = len(db_gmail_ids_info)
LOG.critical("Got all emails id left to restore. Still %s emails to do.\n" % (total_nb_emails_to_restore) )
existing_labels = set() #set of existing labels to not call create_gmail_labels all the time
reserved_labels_map = gmvault_utils.get_conf_defaults().get_dict("Restore", "reserved_labels_map", { u'migrated' : u'gmv-migrated', u'\muted' : u'gmv-muted' })
nb_emails_restored = 0 #to count nb of emails restored
labels_to_apply = collections_utils.SetMultimap()
#get all mail folder name
all_mail_name = self.src.get_folder_name("ALLMAIL")
# go to DRAFTS folder because if you are in ALL MAIL when uploading emails it is very slow
folder_def_location = gmvault_utils.get_conf_defaults().get("General", "restore_default_location", "DRAFTS")
self.src.select_folder(folder_def_location)
timer = gmvault_utils.Timer() # local timer for restore emails
timer.start()
nb_items = gmvault_utils.get_conf_defaults().get_int("General", "nb_messages_per_restore_batch", 80)
for group_imap_ids in itertools.izip_longest(fillvalue=None, *[iter(db_gmail_ids_info)]*nb_items):
last_id = group_imap_ids[-1] #will be used to save the last id
#remove all None elements from group_imap_ids
group_imap_ids = itertools.ifilter(lambda x: x != None, group_imap_ids)
labels_to_create = set(extra_labels) #create label set and add extra labels to apply to all emails
LOG.critical("Processing next batch of %s emails.\n" % (nb_items))
# unbury the metadata for all these emails
for gm_id in group_imap_ids:
try:
LOG.debug("Unbury email with gm_id %s." % (gm_id))
email_meta, email_data = self.gstorer.unbury_email(gm_id)
LOG.critical("Pushing email body with id %s." % (gm_id))
LOG.debug("Subject = %s." % (email_meta[self.gstorer.SUBJECT_K]))
# push data in gmail account and get uids
imap_id = self.src.push_data(all_mail_name, email_data, \
email_meta[self.gstorer.FLAGS_K] , \
email_meta[self.gstorer.INT_DATE_K] )
#labels for this email => real_labels U extra_labels
labels = set(email_meta[self.gstorer.LABELS_K])
# add in the labels_to_create struct
for label in labels:
if label != "\\Starred":
#LOG.debug("label = %s\n" % (label.encode('utf-8')))
LOG.debug("label = %s\n" % (label))
if label.lower() in reserved_labels_map.keys(): #exclude creation of migrated label
n_label = reserved_labels_map.get(label.lower(), "gmv-default-label")
LOG.info("Apply label '%s' instead of '%s' (lower or uppercase)"\
" because it is a Gmail reserved label." % (n_label, label))
label = n_label
labels_to_apply[label] = imap_id #add item in multimap
# get list of labels to create (do a union with labels to create)
#labels_to_create.update([ label for label in labels if label not in existing_labels])
labels_to_create.update([ label for label in labels_to_apply.keys() \
if label not in existing_labels])
for ex_label in extra_labels:
labels_to_apply[ex_label] = imap_id
except Exception, err:
handle_restore_imap_error(err, gm_id, db_gmail_ids_info, self)
#create the non existing labels and update existing labels
if len(labels_to_create) > 0:
LOG.debug("Labels creation tentative for emails with ids %s." % (group_imap_ids))
existing_labels = self.src.create_gmail_labels(labels_to_create, existing_labels)
# associate labels with emails
LOG.critical("Applying labels to the current batch of emails.")
try:
LOG.debug("Changing directory. Going into ALLMAIL")
the_timer = gmvault_utils.Timer()
the_timer.start()
self.src.select_folder('ALLMAIL') #go to ALL MAIL to make STORE usable
LOG.debug("Changed dir. Operation time = %s ms" % (the_timer.elapsed_ms()))
for label in labels_to_apply.keys():
self.src.apply_labels_to(labels_to_apply[label], [label])
except Exception, err:
LOG.error("Problem when applying labels %s to the following ids: %s" %(label, labels_to_apply[label]), err)
if isinstance(err, imap_utils.LabelError) and err.ignore() == True:
LOG.critical("Ignore labelling: %s" % (err))
LOG.critical("Disconnecting and reconnecting to restart cleanly.")
self.src.reconnect() #reconnect
elif isinstance(err, imaplib.IMAP4.abort) and str(err).find("=> Gmvault ssl socket error: EOF") >= 0:
# if this is a Gmvault SSL Socket error ignore labelling and continue the restore
LOG.critical("Ignore labelling")
LOG.critical("Disconnecting and reconnecting to restart cleanly.")
self.src.reconnect() #reconnect
else:
raise err
finally:
self.src.select_folder(folder_def_location) # go back to an empty DIR (Drafts) to be fast
labels_to_apply = collections_utils.SetMultimap() #reset label to apply
nb_emails_restored += nb_items
#indicate every 10 messages the number of messages left to process
left_emails = (total_nb_emails_to_restore - nb_emails_restored)
if (left_emails > 0):
elapsed = timer.elapsed() #elapsed time in seconds
LOG.critical("\n== Processed %d emails in %s. %d left to be restored "\
"(time estimate %s). ==\n" % \
(nb_emails_restored, timer.seconds_to_human_time(elapsed), \
left_emails, timer.estimate_time_left(nb_emails_restored, elapsed, left_emails)))
# save id every 50 restored emails
# add the last treated gm_id
self.save_lastid(self.OP_EMAIL_RESTORE, last_id)
return self.error_report
| 56,219
|
Python
|
.py
| 922
| 43.976139
| 192
| 0.57514
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,310
|
gmvault_utils.py
|
gaubert_gmvault/src/gmv/gmvault_utils.py
|
# -*- coding: utf-8 -*-
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os
import re
import datetime
import time
import calendar
import fnmatch
import functools
import StringIO
import sys
import traceback
import random
import locale
import urllib
import chardet
import gmv.log_utils as log_utils
import gmv.conf.conf_helper
import gmv.gmvault_const as gmvault_const
LOG = log_utils.LoggerFactory.get_logger('gmvault_utils')
GMVAULT_VERSION = "1.9.1"
class memoized(object): #pylint: disable=C0103
"""Decorator that caches a function's return value each time it is called.
If called later with the same arguments, the cached value is returned, and
not re-evaluated.
"""
def __init__(self, func):
self.func = func
self.cache = {}
def __call__(self, *args):
try:
return self.cache[args]
except KeyError:
value = self.func(*args)
self.cache[args] = value
return value
except TypeError:
# uncachable -- for instance, passing a list as an argument.
# Better to not cache than to blow up entirely.
return self.func(*args)
def __repr__(self):
"""Return the function's docstring."""
return self.func.__doc__
def __get__(self, obj, objtype):
"""Support instance methods."""
return functools.partial(self.__call__, obj)
class Curry:
""" Class used to implement the currification (functional programming technic) :
Create a function from another one by instanciating some of its parameters.
For example double = curry(operator.mul,2), res = double(4) = 8
"""
def __init__(self, fun, *args, **kwargs):
self.fun = fun
self.pending = args[:]
self.kwargs = kwargs.copy()
def __call__(self, *args, **kwargs):
if kwargs and self.kwargs:
the_kw = self.kwargs.copy()
the_kw.update(kwargs)
else:
the_kw = kwargs or self.kwargs
return self.fun(*(self.pending + args), **the_kw) #pylint: disable=W0142
LETTERS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ'
DIGITS = '0123456789'
def make_password(minlength=8, maxlength=16):
"""
generate randomw password
"""
length = random.randint(minlength, maxlength)
letters = LETTERS + DIGITS
return ''.join([random.choice(letters) for _ in range(length)])
def get_exception_traceback():
"""
return the exception traceback (stack info and so on) in a string
Args:
None
Returns:
return a string that contains the exception traceback
Raises:
"""
the_file = StringIO.StringIO()
exception_type, exception_value, exception_traceback = sys.exc_info() #IGNORE:W0702
traceback.print_exception(exception_type, exception_value, exception_traceback, file = the_file)
return the_file.getvalue()
MULTI_SPACES_PATTERN = r"\s{2,}"
MULTI_SPACES_RE = re.compile(MULTI_SPACES_PATTERN, flags=re.U) #to support unicode
def remove_consecutive_spaces_and_strip(a_str):
"""
Supress consecutive spaces to replace them with a unique one.
e.g "two spaces" = "two spaces"
"""
#return re.sub("\s{2,}", " ", a_str, flags=re.U).strip()
return MULTI_SPACES_RE.sub(u" ", a_str).strip()
TIMER_SUFFIXES = ['y', 'w', 'd', 'h', 'm', 's']
class Timer(object):
"""
Timer Class to mesure time.
Possess also few time utilities
"""
def __init__(self):
self._start = None
def start(self):
"""
start the timer
"""
self._start = time.time()
def reset(self):
"""
reset the timer to 0
"""
self._start = time.time()
def elapsed(self):
"""
return elapsed time in sec
"""
now = time.time()
return int(round(now - self._start))
def elapsed_ms(self):
"""
return elapsed time up to micro second
"""
return time.time() - self._start
def elapsed_human_time(self, suffixes=TIMER_SUFFIXES, add_s=False, separator=' '):#pylint:disable=W0102
"""
Takes an amount of seconds and turns it into a human-readable amount of time.
"""
seconds = self.elapsed()
return self.seconds_to_human_time(seconds, suffixes, add_s, separator)
@classmethod
def estimate_time_left(cls, nb_elem_done, in_sec, still_to_be_done, in_human_time = True):
"""
Stupid estimate. Use current time to estimate how long it will take
"""
if in_human_time:
return cls.seconds_to_human_time(int(round(float(still_to_be_done * in_sec)/nb_elem_done)))
else:
return int(round(float(still_to_be_done * in_sec)/nb_elem_done))
@classmethod
def seconds_to_human_time(cls, seconds, suffixes=TIMER_SUFFIXES, add_s=False, separator=' '):#pylint:disable=W0102
"""
convert seconds to human time
"""
# the formatted time string to be returned
the_time = []
# the pieces of time to iterate over (days, hours, minutes, etc)
# - the first piece in each tuple is the suffix (d, h, w)
# - the second piece is the length in seconds (a day is 60s * 60m * 24h)
parts = [(suffixes[0], 60 * 60 * 24 * 7 * 52),
(suffixes[1], 60 * 60 * 24 * 7),
(suffixes[2], 60 * 60 * 24),
(suffixes[3], 60 * 60),
(suffixes[4], 60),
(suffixes[5], 1)]
if seconds < 1: #less than a second case
return "less than a second"
# for each time piece, grab the value and remaining seconds, and add it to
# the time string
for suffix, length in parts:
value = seconds / length
if value > 0:
seconds = seconds % length
the_time.append('%s%s' % (str(value),
(suffix, (suffix, suffix + 's')[value > 1])[add_s]))
if seconds < 1:
break
return separator.join(the_time)
ZERO = datetime.timedelta(0)
# A UTC class.
class UTC(datetime.tzinfo):
"""UTC Timezone"""
def utcoffset(self, a_dt): #pylint: disable=W0613
''' return utcoffset '''
return ZERO
def tzname(self, a_dt): #pylint: disable=W0613
''' return tzname '''
return "UTC"
def dst(self, a_dt): #pylint: disable=W0613
''' return dst '''
return ZERO
# pylint: enable-msg=W0613
UTC_TZ = UTC()
def get_ym_from_datetime(a_datetime):
"""
return year month from datetime
"""
if a_datetime:
return a_datetime.strftime('%Y-%m')
return None
MONTH_CONV = { 1: 'Jan', 4: 'Apr', 6: 'Jun', 7: 'Jul', 10: 'Oct' , 12: 'Dec',
2: 'Feb', 5: 'May', 8: 'Aug', 9: 'Sep', 11: 'Nov',
3: 'Mar'}
REVERSE_MONTH_CONV = { 'Jan' : 1, 'Apr' : 4, 'Jun' : 6, 'Jul': 7, 'Oct': 10 , 'Dec':12,
'Feb' : 2, 'May' : 5, 'Aug' : 8, 'Sep': 9, 'Nov': 11,
'Mar' : 3}
MONTH_YEAR_PATTERN = r'(?P<year>(18|19|[2-5][0-9])\d\d)[-/.](?P<month>(0[1-9]|1[012]|[1-9]))'
MONTH_YEAR_RE = re.compile(MONTH_YEAR_PATTERN)
def compare_yymm_dir(first, second):
"""
Compare directory names in the form of Year-Month
Return 1 if first > second
0 if equal
-1 if second > first
"""
matched = MONTH_YEAR_RE.match(first)
if matched:
first_year = int(matched.group('year'))
first_month = int(matched.group('month'))
first_val = (first_year * 1000) + first_month
else:
raise Exception("Invalid Year-Month expression (%s). Please correct it to be yyyy-mm" % (first))
matched = MONTH_YEAR_RE.match(second)
if matched:
second_year = int(matched.group('year'))
second_month = int(matched.group('month'))
second_val = (second_year * 1000) + second_month
else:
raise Exception("Invalid Year-Month expression (%s). Please correct it" % (second))
if first_val > second_val:
return 1
elif first_val == second_val:
return 0
else:
return -1
def cmp_to_key(mycmp):
"""
Taken from functools. Not in all python versions so had to redefine it
Convert a cmp= function into a key= function
"""
class Key(object): #pylint: disable=R0903
"""Key class"""
def __init__(self, obj, *args): #pylint: disable=W0613
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
def __hash__(self):
raise TypeError('hash not implemented')
return Key
def get_all_dirs_posterior_to(a_dir, dirs):
"""
get all directories posterior
"""
#sort the passed dirs list and return all dirs posterior to a_dir
return [ name for name in sorted(dirs, key=cmp_to_key(compare_yymm_dir))\
if compare_yymm_dir(a_dir, name) <= 0 ]
def get_all_dirs_under(root_dir, ignored_dirs = []):#pylint:disable=W0102
"""
Get all directory names under (1 level only) the root dir
params:
root_dir : the dir to look under
ignored_dir: ignore the dir if it is in this list of dirnames
"""
return [ name for name in os.listdir(root_dir) \
if ( os.path.isdir(os.path.join(root_dir, name)) \
and (name not in ignored_dirs) ) ]
def datetime2imapdate(a_datetime):
"""
Transfrom in date format for IMAP Request
"""
if a_datetime:
month = MONTH_CONV[a_datetime.month]
pattern = '%%d-%s-%%Y' % (month)
return a_datetime.strftime(pattern)
def e2datetime(a_epoch):
"""
convert epoch time in datetime
Args:
a_epoch: the epoch time to convert
Returns: a datetime
"""
#utcfromtimestamp is not working properly with a decimals.
# use floor to create the datetime
# decim = decimal.Decimal('%s' % (a_epoch)).quantize(decimal.Decimal('.001'), rounding=decimal.ROUND_DOWN)
new_date = datetime.datetime.utcfromtimestamp(a_epoch)
return new_date
def get_utcnow_epoch():
return datetime2e(datetime.datetime.utcnow())
def datetime2e(a_date):
"""
convert datetime in epoch
Beware the datetime as to be in UTC otherwise you might have some surprises
Args:
a_date: the datertime to convert
Returns: a epoch time
"""
return calendar.timegm(a_date.timetuple())
def contains_any(string, char_set):
"""Check whether 'string' contains ANY of the chars in 'set'"""
return 1 in [c in string for c in char_set]
def makedirs(a_path):
""" my own version of makedir """
if os.path.isdir(a_path):
# it already exists so return
return
elif os.path.isfile(a_path):
raise OSError("a file with the same name as the desired dir, '%s', already exists."%(a_path))
os.makedirs(a_path)
def __rmgeneric(path, __func__):
""" private function that is part of delete_all_under """
try:
__func__(path)
#print 'Removed ', path
except OSError, (_, strerror): #IGNORE:W0612
print """Error removing %(path)s, %(error)s """ % {'path' : path, 'error': strerror }
def delete_all_under(path, delete_top_dir = False):
""" delete all files and directories under path """
if not os.path.isdir(path):
return
files = os.listdir(path)
for the_f in files:
fullpath = os.path.join(path, the_f)
if os.path.isfile(fullpath):
new_f = os.remove
__rmgeneric(fullpath, new_f)
elif os.path.isdir(fullpath):
delete_all_under(fullpath)
new_f = os.rmdir
__rmgeneric(fullpath, new_f)
if delete_top_dir:
os.rmdir(path)
def ordered_dirwalk(a_dir, a_file_wildcards='*', a_dir_ignore_list=(), sort_func=sorted):
"""
Walk a directory tree, using a generator.
This implementation returns only the files in all the subdirectories.
Beware, this is a generator.
Args:
a_dir: A root directory from where to list
a_wildcards: Filtering wildcards a la unix
"""
sub_dirs = []
for the_file in sort_func(os.listdir(a_dir)):
fullpath = os.path.join(a_dir, the_file)
if os.path.isdir(fullpath):
sub_dirs.append(fullpath) #it is a sub_dir
elif fnmatch.fnmatch(fullpath, a_file_wildcards):
yield fullpath
#iterate over sub_dirs
for sub_dir in sort_func(sub_dirs):
if os.path.basename(sub_dir) not in a_dir_ignore_list:
for p_elem in ordered_dirwalk(sub_dir, a_file_wildcards):
yield p_elem
else:
LOG.debug("Ignore subdir %s" % sub_dir)
def dirwalk(a_dir, a_wildcards='*'):
"""
return all files and dirs in a directory
"""
for root, _, files in os.walk(a_dir):
for the_file in files:
if fnmatch.fnmatch(the_file, a_wildcards):
yield os.path.join(root, the_file)
def ascii_hex(a_str):
"""
transform any string in hexa values
"""
new_str = ""
for the_char in a_str:
new_str += "%s=hex[%s]," % (the_char, hex(ord(the_char)))
return new_str
def profile_this(fn):
""" profiling decorator """
def profiled_fn(*args, **kwargs):
import cProfile
fpath = fn.__name__ + ".profile"
prof = cProfile.Profile()
ret = prof.runcall(fn, *args, **kwargs)
prof.dump_stats(fpath)
return ret
return profiled_fn
DEFAULT_ENC_LIST = ['ascii','iso-8859-1','iso-8859-2','windows-1250','windows-1252','utf-8']
class GuessEncoding(Exception): pass # Guess encoding error
def guess_encoding(byte_str, use_encoding_list=True):
"""
byte_str: byte string
use_encoding_list: To try or not to brut force guess with the predefined list
Try to guess the encoding of byte_str
if encoding cannot be found return utf-8
"""
encoding = None
if type(byte_str) == type(unicode()):
raise GuessEncoding("Error. The passed string is a unicode string and not a byte string")
if use_encoding_list:
encoding_list = get_conf_defaults().get('Localisation', 'encoding_guess_list', DEFAULT_ENC_LIST)
for enc in encoding_list:
try:
unicode(byte_str ,enc,"strict")
encoding = enc
except:
pass
else:
break
if not encoding:
#detect encoding with chardet
enc = chardet.detect(byte_str)
if enc and enc.get("encoding") != None:
encoding = enc.get("encoding")
else:
LOG.debug("Force encoding to utf-8")
encoding = "utf-8"
return encoding
def convert_to_unicode(a_str):
"""
Convert a string to unicode (except terminal strings)
:param a_str:
:return: unicode string
"""
encoding = None
#if email encoding is forced no more guessing
email_encoding = get_conf_defaults().get('Localisation', 'email_encoding', None)
try:
if email_encoding:
encoding = email_encoding
else:
LOG.debug("Guess encoding")
#guess encoding based on the beginning of the string up to 128K character
encoding = guess_encoding(a_str[:20000], use_encoding_list = False)
LOG.debug("Convert to %s" % (encoding))
u_str = unicode(a_str, encoding = encoding) #convert to unicode with given encoding
except Exception, e:
LOG.debug("Exception: %s" % (e))
LOG.info("Warning: Guessed encoding = (%s). Ignore those characters" % (encoding if encoding else "Not defined"))
#try utf-8
u_str = unicode(a_str, encoding="utf-8", errors='replace')
return u_str
def convert_argv_to_unicode(a_str):
"""
Convert command line individual arguments (argv to unicode)
"""
#if str is already unicode do nothing and return the str
if type(a_str) == type(unicode()):
return a_str
#encoding can be forced from conf
terminal_encoding = get_conf_defaults().get('Localisation', 'terminal_encoding', None)
if not terminal_encoding:
terminal_encoding = locale.getpreferredencoding() #use it to find the encoding for text terminal
LOG.debug("encoding found with locale.getpreferredencoding()")
if not terminal_encoding:
loc = locale.getdefaultlocale() #try to get defaultlocale()
if loc and len(loc) == 2:
LOG.debug("encoding found with locale.getdefaultlocale()")
terminal_encoding = loc[1]
else:
LOG.debug("Cannot Terminal encoding using locale.getpreferredencoding() and locale.getdefaultlocale(), loc = %s. Use chardet to try guessing the encoding." % (loc if loc else "None"))
terminal_encoding = guess_encoding(a_str)
else:
LOG.debug("Use terminal encoding forced from the configuration file.")
try:
LOG.debug("terminal encoding = %s." % (terminal_encoding))
#decode byte string to unicode and fails in case of error
u_str = a_str.decode(terminal_encoding)
LOG.debug("unicode_escape val = %s." % (u_str.encode('unicode_escape')))
LOG.debug("raw unicode = %s." % (u_str))
except Exception, err:
LOG.error(err)
get_exception_traceback()
LOG.info("Convertion of %s from %s to a unicode failed. Will now convert to unicode using utf-8 encoding and ignoring errors (non utf-8 characters will be eaten)." % (a_str, terminal_encoding))
LOG.info("Please set properly the Terminal encoding or use the [Localisation]:terminal_encoding property to set it.")
u_str = unicode(a_str, encoding='utf-8', errors='ignore')
return u_str
@memoized
def get_home_dir_path():
"""
Get the gmvault dir
"""
gmvault_dir = os.getenv("GMVAULT_DIR", None)
# check by default in user[HOME]
if not gmvault_dir:
LOG.debug("no ENV variable $GMVAULT_DIR defined. Set by default $GMVAULT_DIR to $HOME/.gmvault (%s/.gmvault)" \
% (os.getenv("HOME",".")))
gmvault_dir = "%s/.gmvault" % (os.getenv("HOME", "."))
#create dir if not there
makedirs(gmvault_dir)
return gmvault_dir
CONF_FILE = "gmvault_defaults.conf"
@memoized
def get_conf_defaults():
"""
Return the conf object containing the defaults stored in HOME/gmvault_defaults.conf
Beware it is memoized
"""
filepath = get_conf_filepath()
if filepath:
os.environ[gmv.conf.conf_helper.Conf.ENVNAME] = filepath
the_cf = gmv.conf.conf_helper.Conf.get_instance()
LOG.debug("Load defaults from %s" % (filepath))
return the_cf
else:
return gmv.conf.conf_helper.MockConf() #retrun MockObject that will play defaults
#VERSION DETECTION PATTERN
VERSION_PATTERN = r'\s*conf_version=\s*(?P<version>\S*)\s*'
VERSION_RE = re.compile(VERSION_PATTERN)
#list of version conf to not overwrite with the next
VERSIONS_TO_PRESERVE = [ '1.9' ]
def _get_version_from_conf(home_conf_file):
"""
Check if the config file need to be replaced because it comes from an older version
"""
#check version
ver = None
with open(home_conf_file) as curr_fd:
for line in curr_fd:
line = line.strip()
matched = VERSION_RE.match(line)
if matched:
ver = matched.group('version')
return ver.strip()
return ver
def _create_default_conf_file(home_conf_file):
"""
Write on disk the default file
"""
LOG.critical("Create defaults in %s. Please touch this file only if you know what to do." % home_conf_file)
try:
with open(home_conf_file, "w+") as f:
f.write(gmvault_const.DEFAULT_CONF_FILE)
return home_conf_file
except Exception, err:
#catch all error and let run gmvault with defaults if needed
LOG.critical("Ignore Error when trying to create conf file for defaults in %s:\n%s.\n" % (get_home_dir_path(), err))
LOG.debug("=== Exception traceback ===")
LOG.debug(get_exception_traceback())
LOG.debug("=== End of Exception traceback ===\n")
#return default file instead
@memoized
def get_conf_filepath():
"""
If default file is not present, generate it from scratch.
If it cannot be created, then return None
"""
home_conf_file = "%s/%s" % (get_home_dir_path(), CONF_FILE)
if not os.path.exists(home_conf_file):
return _create_default_conf_file(home_conf_file)
else:
# check if the conf file needs to be replaced
version = _get_version_from_conf(home_conf_file)
if version not in VERSIONS_TO_PRESERVE:
LOG.debug("%s with version %s is too old, overwrite it with the latest file." \
% (home_conf_file, version))
return _create_default_conf_file(home_conf_file)
return home_conf_file
def chunker(seq, size):
"""Returns the contents of `seq` in chunks of up to `size` items."""
return (seq[pos:pos + size] for pos in xrange(0, len(seq), size))
def escape_url(text):
"""
Escape characters as expected in OAUTH 5.1
:param text: the escaped url
:return: escaped url
"""
return urllib.quote(text, safe='~-._')
def unescape_url(text):
"""
Unescaped characters when needed (see OAUTH 5.1)
:param text:
:return: unescaped url
"""
return urllib.unquote(text)
def format_url_params(params):
"""
Formats given parameters as URL query string.
:param params: a python dict
:return: A URL query string version of the given dict.
"""
param_elements = []
for param in sorted(params.iteritems(), key=lambda x: x[0]):
param_elements.append('%s=%s' % (param[0], escape_url(param[1])))
return '&'.join(param_elements)
| 23,774
|
Python
|
.py
| 589
| 32.20034
| 200
| 0.619291
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,311
|
gmvault_const.py
|
gaubert_gmvault/src/gmv/gmvault_const.py
|
# -*- coding: utf-8 -*-
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
# Gmvault constants
GMAIL_UNLOCAL_CHATS = [
u'[Gmail]/Chats', u'[Google Mail]/Chats', #en, es, ger, portuguese
u'[Gmail]/Chat', u'[Google Mail]/Chat', #it
u'[Google Mail]/Tous les chats', u'[Gmail]/Tous les chats', # french
u'[Gmail]/Чаты', u'[Google Mail]/Чаты', # russian
u'[Gmail]/チャット', u'[Google Mail]/チャット', # japanese
u'[Google Mail]/Czat', u'[Gmail]/Czat', # polish
u'[Google Mail]/聊天', u'[Gmail]/聊天' , # chinese
u'[Google Mail]/Bate-papos', u'[Gmail]/Bate-papos', #portuguese brazil
] # unlocalised Chats names
#The default conf file
DEFAULT_CONF_FILE = """#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Gmvault Configuration file containing Gmvault defaults.
# DO NOT CHANGE IT IF YOU ARE NOT AN ADVANCED USER
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
[Sync]
quick_days=10
[Restore]
# it is 10 days but currently it will always be the current month or the last 2 months
# the notion of days is not yet apparent in restore (only months).
quick_days=10
reserved_labels_map = { u'migrated' : u'gmv-migrated', u'\muted' : u'gmv-muted' }
[General]
limit_per_chat_dir=2000
errors_if_chat_not_visible=False
nb_messages_per_batch=500
nb_messages_per_restore_batch=80
restore_default_location=DRAFTS
keep_in_bin=False
enable_imap_compression=False
[Localisation]
#example with Russian
chat_folder=[ u'[Google Mail]/Чаты', u'[Gmail]/Чаты' ]
#uncomment if you need to force the terminal_encoding
#terminal_encoding='utf-8'
#encoding list used to guess heuristicly the encoding
encoding_guess_list=['ascii','iso-8859-1','iso-8859-2','windows-1250','windows-1252','utf-8']
#uncomment if you need to force the data email encoding when it is stored
#email_encoding='utf-8'
#Do not touch any parameters below as it could force an overwrite of this file
[VERSION]
conf_version=1.9.1
[GoogleOauth2]
scope=https://mail.google.com/
# The URL root for accessing Google Accounts.
google_accounts_base_url=https://accounts.google.com
# Hardcoded dummy redirect URI for non-web apps.
redirect_uri=urn:ietf:wg:oauth:2.0:oob
#identifier and secret in app mode for gmvault
gmvault_client_id=1070918343777-0eecradokiu8i77qfo8e3stbi0mkrtog.apps.googleusercontent.com
gmvault_client_secret=IVkl_pglv5cXzugpmnRNqtT7
#set environment variables for the program locally
#they will be read only once the conf file has been loaded
[ENV]
#by default it is ~/.gmvault
GMV_IMAP_DEBUG=0
"""
| 3,501
|
Python
|
.py
| 73
| 42.958904
| 93
| 0.691578
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,312
|
conf_helper.py
|
gaubert_gmvault/src/gmv/conf/conf_helper.py
|
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import sys
import os
import re
import codecs
import gmv.conf.exceptions as exceptions
import gmv.conf.utils.struct_parser as struct_parser
class ResourceError(Exception):
"""
Base class for ressource exceptions
"""
def __init__(self, a_msg):
super(ResourceError, self).__init__(a_msg)
class Resource(object):
"""
Class read a ressource.
It can be read first from the Command Line, then from the ENV as an env variable and finally from a conf file
"""
def __init__(self, a_cli_argument=None, a_env_variable=None, a_conf_property=None):
"""
Default Constructor.
It is important to understand that there is precedence between the different ways to set the ressource:
- get from the command line if defined otherwise get from the Env variable if defined otherwise get from the conf file otherwise error
Args:
a_cli_argument : The command line argument name
a_env_variable : The env variable name used for this ressource
a_conf_property: It should be a tuple containing two elements (group,property)
"""
self._cli_arg = a_cli_argument.lower() if a_cli_argument is not None else None
self._env_var = a_env_variable.upper() if a_env_variable is not None else None
if a_conf_property is not None:
(self._conf_group, self._conf_property) = a_conf_property
else:
self._conf_group = None
self._conf_property = None
def set_cli_argument(self, a_cli_argument):
"""cli_argument setter"""
self._cli_arg = a_cli_argument.lower()
def set_env_variable(self, a_env_variable):
"""env_variable setter"""
self._env_var = a_env_variable
@classmethod
def _get_srandardized_cli_argument(cls, a_tostrip):
"""
remove -- or - from the command line argument and add a -- prefix to standardize the cli argument
"""
the_str = a_tostrip
while the_str.startswith('-'):
the_str = the_str[1:]
return '--%s' % (the_str)
def _get_value_from_command_line(self):
"""
internal method for extracting the value from the command line.
All command line agruments must be lower case (unix style).
To Do support short and long cli args.
Returns:
the Value if defined otherwise None
"""
# check precondition
if self._cli_arg == None:
return None
the_s = Resource._get_srandardized_cli_argument(self._cli_arg)
# look for cliArg in sys argv
for arg in sys.argv:
if arg.lower() == the_s:
i = sys.argv.index(arg)
#print "i = %d, val = %s\n"%(i,sys.argv[i])
if len(sys.argv) <= i:
# No more thing to read in the command line so quit
print "Resource: Commandline argument %s has no value\n" % (self._cli_arg)
return None
else:
#print "i+1 = %d, val = %s\n"%(i+1,sys.argv[i+1])
return sys.argv[i+1]
def _get_value_from_env(self):
"""
internal method for extracting the value from the env.
All support ENV Variables should be in uppercase.
Returns:
the Value if defined otherwise None
"""
# precondition
if self._env_var == None:
return None
return os.environ.get(self._env_var, None)
def _get_from_conf(self):
"""
Try to read the info from the Configuration if possible
"""
if self._conf_group and self._conf_property:
if Conf.can_be_instanciated():
return Conf.get_instance().get(self._conf_group, self._conf_property)
return None
def get_value(self, a_raise_exception=True):
"""
Return the value of the Resource as a string.
- get from the command line if defined otherwise get from the Env variable if defined otherwise get from the conf file otherwise error
Arguments:
aRaiseException: flag indicating if an exception should be raise if value not found
Returns:
value of the Resource as a String
Raises:
exception CTBTOError if the aRaiseExceptionOnError flag is activated
"""
# get a value using precedence rule 1) command-line, 2) ENV, 3) Conf
val = self._get_value_from_command_line()
if val is None:
val = self._get_value_from_env()
if val is None:
val = self._get_from_conf()
if (val is None) and a_raise_exception:
the_str = "Cannot find "
add_nor = 0
if self._cli_arg is not None:
the_str += "commandline argument %s" % (self._cli_arg)
add_nor += 1
if self._env_var is not None:
if add_nor > 0:
the_str += ", nor "
the_str += "the Env Variable %s" % (self._env_var)
add_nor += 1
if self._conf_group is not None:
if add_nor > 0:
the_str += ", nor "
the_str += "the Conf Group:[%s] and Property=%s" % (self._conf_group, self._conf_property)
add_nor += 1
if add_nor == 0:
the_str += " any defined commandline argument, nor any env variable or"\
" Conf group and properties. They are all None, fatal error"
else:
the_str += ". One of them should be defined"
raise ResourceError(the_str)
return val
def _get(self, conv):
"""
Private _get method used to convert to the right expected type (int,float or boolean).
Strongly inspired by ConfigParser.py
Returns:
value converted into the asked type
Raises:
exception ValueError if conversion issue
"""
return conv(self.get_value())
def get_value_as_int(self):
"""
Return the value as an int
Returns:
value converted into the asked type
Raises:
exception ValueError if conversion issue
"""
return self._get(int)
def get_value_as_float(self):
"""
Return the value as a float
Returns:
value converted into the asked type
Raises:
exception ValueError if conversion issue
"""
return self._get(float)
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def get_value_as_boolean(self):
"""
Return the value as a boolean
Returns:
value converted into the asked type
Raises:
exception ValueError if conversion issue
"""
val = self.get_value()
if val.lower() not in self._boolean_states:
raise ValueError, 'Not a boolean: %s' % val
return self._boolean_states[val.lower()]
class MockConf(object):
"""
MockConf Object that returns only defaults
"""
def __init__(self, use_resource=True):
"""
default constructor
"""
pass
@classmethod
def get(cls, section, option, default=None, fail_if_missing=False): #pylint: disable=W0613
""" get one option from a section.
"""
return default
@classmethod
def print_content(cls, substitute_values = True):#pylint: disable=W0613
""" print all the options variables substituted.
:param a_substitue_vals: bool for substituting values
:returns: the string containing all sections and variables
"""
raise exceptions.Error("Not implemented in MockupConf")
@classmethod
def items(cls, section):#pylint: disable=W0613
""" return all items from a section. Items is a list of tuples (option,value)
Args:
section. The section where to find the option
Returns: a list of tuples (option,value)
Raises:
exception NoSectionError if the section cannot be found
"""
raise exceptions.Error("Not implemented in MockupConf")
@classmethod
def getint(cls, section, option, default=0, fail_if_missing=False):#pylint: disable=W0613
"""Return the int value of the option.
Default value is 0, None value can't be used as default value"""
return default
@classmethod
def getfloat(cls, section, option, default=0, fail_if_missing=False):#pylint: disable=W0613
"""Return the float value of the option.
Default value is 0, None value can't be used as default value"""
return default
@classmethod
def getboolean(cls, section, option, default=False, fail_if_missing=False):#pylint: disable=W0613
"""get bool value """
return default
@classmethod
def get_list(cls, section, option, default=None, fail_if_missing=False):#pylint: disable=W0613
""" get a list of string, int """
return default
@classmethod
def getlist(cls, section, option, default=None, fail_if_missing=False):#pylint: disable=W0613
""" Deprecated, use get_list instead"""
return cls.get_list(section, option, default, fail_if_missing)
@classmethod
def getdict(cls, section, option, default=None, fail_if_missing=False):#pylint: disable=W0613
""" Deprecated, use get_dict instead"""
return cls.get_dict(section, option, default, fail_if_missing)
@classmethod
def get_dict(cls, section, option, default=None, fail_if_missing=False):#pylint: disable=W0613
""" get a dict """
return default
class Conf(object):
""" Configuration Object with a several features:
* get configuration info in different types
* support for import
* support for variables in configuration file
* support for default values in all accessors
* integrated with the resources object offering to get the configuration from an env var, a commandline option or the conf
* to be done : support for blocs, list comprehension and dict comprehension, json
* to be done : define resources in the conf using the [Resource] group with A= { ENV:TESTVAR, CLI:--testvar, VAL:1.234 }
"""
# command line and env resource stuff
CLINAME = "--conf_file"
ENVNAME = "CONF_FILE"
#class member
_instance = None
_CLIGROUP = "CLI"
_ENVGROUP = "ENV"
_MAX_INCLUDE_DEPTH = 10
@classmethod
def get_instance(cls):
""" singleton method """
if cls._instance == None:
cls._instance = Conf()
return cls._instance
@classmethod
def can_be_instanciated(cls):
"""Class method used by the Resource to check that the Conf can be instantiated.
These two objects have a special contract as they are strongly coupled.
A Resource can use the Conf to check for a Resource and the Conf uses a Resource to read Conf filepath.
:returns: True if the Conf file has got a file.
:except Error: Base Conf Error
"""
#No conf info passed to the resource so the Resource will not look into the conf (to avoid recursive search)
the_res = Resource(cls.CLINAME, cls.ENVNAME)
filepath = the_res.get_value(a_raise_exception=False)
if (filepath is not None) and os.path.exists(filepath):
return True
return False
def __init__(self, use_resource=True):
"""
Constructor
"""
# create resource for the conf file
self._conf_resource = Resource(Conf.CLINAME, Conf.ENVNAME)
# list of sections
self._sections = {}
self._configuration_file_path = None
# create config object
if use_resource:
self._load_config()
def _load_config(self, a_file=None):
""" _load the configuration file """
try:
# get it from a Resource if not files are passed
if a_file is None:
a_file = self._conf_resource.get_value()
if a_file is None:
raise exceptions.Error("Conf. Error, need a configuration file path")
with codecs.open(a_file, 'r', 'utf-8') as f:
self._read(f, a_file)
# memorize conf file path
self._configuration_file_path = a_file
except Exception, exce:
print "Can't read the config file %s" % a_file
print "Current executing from dir = %s\n" % os.getcwd()
raise exce
def get_conf_file_path(self):
"""return conf_file_path"""
return self._configuration_file_path if self._configuration_file_path != None else "unknown"
def sections(self):
"""Return a list of section names, excluding [DEFAULT]"""
# self._sections will never have [DEFAULT] in it
return self._sections.keys()
@classmethod
def _get_defaults(cls, section, option, default, fail_if_missing):
""" To manage defaults.
Args:
default. The default value to return if fail_if_missing is False
fail_if_missing. Throw an exception when the option is not found and fail_if_missing is true
Returns: default if fail_if_missing is False
Raises:
exception NoOptionError if fail_if_missing is True
"""
if fail_if_missing:
raise exceptions.Error(2, "No option %s in section %s" %(option, section))
else:
if default is not None:
return str(default)
else:
return None
def get(self, section, option, default=None, fail_if_missing=False):
""" get one option from a section.
return the default if it is not found and if fail_if_missing is False, otherwise return NoOptionError
:param section: Section where to find the option
:type section: str
:param option: Option to get
:param default: Default value to return if fail_if_missing is False
:param fail_if_missing: Will throw an exception when the option is not found and fail_if_missing is true
:returns: the option as a string
:except NoOptionError: Raised only when fail_is_missing set to True
"""
# all options are kept in lowercase
opt = self.optionxform(option)
if section not in self._sections:
#check if it is a ENV section
dummy = None
if section == Conf._ENVGROUP:
the_r = Resource(a_cli_argument=None, a_env_variable=opt)
dummy = the_r.get_value()
elif section == Conf._CLIGROUP:
the_r = Resource(a_cli_argument=opt, a_env_variable=None)
dummy = the_r.get_value()
#return default if dummy is None otherwise return dummy
return ((self._get_defaults(section, opt, default, fail_if_missing)) if dummy == None else dummy)
elif opt in self._sections[section]:
return self._replace_vars(self._sections[section][opt], "%s[%s]" % (section, option), - 1)
else:
return self._get_defaults(section, opt, default, fail_if_missing)
def print_content(self, substitute_values = True):
""" print all the options variables substituted.
:param a_substitue_vals: bool for substituting values
:returns: the string containing all sections and variables
"""
result_str = ""
for section_name in self._sections:
result_str += "[%s]\n" % (section_name)
section = self._sections[section_name]
for option in section:
if option != '__name__':
if substitute_values:
result_str += "%s = %s\n" % (option, self.get(section_name, option))
else:
result_str += "%s = %s\n" % (option, self._sections[section_name][option])
result_str += "\n"
return result_str
def items(self, section):
""" return all items from a section. Items is a list of tuples (option,value)
Args:
section. The section where to find the option
Returns: a list of tuples (option,value)
Raises:
exception NoSectionError if the section cannot be found
"""
try:
all_sec = self._sections[section]
# make a copy
a_copy = all_sec.copy()
# remove __name__ from d
if "__name__" in a_copy:
del a_copy["__name__"]
return a_copy.items()
except KeyError:
raise exceptions.NoSectionError(section)
def has_option(self, section, option):
"""Check for the existence of a given option in a given section."""
has_option = False
if self.has_section(section):
option = self.optionxform(option)
has_option = (option in self._sections[section])
return has_option
def has_section(self, section):
"""Check for the existence of a given section in the configuration."""
has_section = False
if section in self._sections:
has_section = True
return has_section
@classmethod
def _get_closing_bracket_index(cls, index, the_str, location, lineno):
""" private method used by _replace_vars to count the closing brackets.
Args:
index. The index from where to look for a closing bracket
s. The string to parse
group. group and options that are substituted. Mainly used to create a nice exception message
option. option that is substituted. Mainly used to create a nice exception message
Returns: the index of the found closing bracket
Raises:
exception NoSectionError if the section cannot be found
"""
tolook = the_str[index + 2:]
opening_brack = 1
closing_brack_index = index + 2
i = 0
for _ch in tolook:
if _ch == ')':
if opening_brack == 1:
return closing_brack_index
else:
opening_brack -= 1
elif _ch == '(':
if tolook[i - 1] == '%':
opening_brack += 1
# inc index
closing_brack_index += 1
i += 1
raise exceptions.SubstitutionError(lineno, location, "Missing a closing bracket in %s" % (tolook))
# very permissive regex
_SUBSGROUPRE = re.compile(r"%\((?P<group>\w*)\[(?P<option>(.*))\]\)")
def _replace_vars(self, a_str, location, lineno= - 1):
""" private replacing all variables. A variable will be in the from of %(group[option]).
Multiple variables are supported, ex /foo/%(group1[opt1])/%(group2[opt2])/bar
Nested variables are also supported, ex /foo/%(group[%(group1[opt1]].
Note that the group part cannot be substituted, only the option can. This is because of the Regular Expression _SUBSGROUPRE that accepts only words as values.
Args:
index. The index from where to look for a closing bracket
s. The string to parse
Returns: the final string with the replacements
Raises:
exception NoSectionError if the section cannot be found
"""
toparse = a_str
index = toparse.find("%(")
# if found opening %( look for end bracket)
if index >= 0:
# look for closing brackets while counting openings one
closing_brack_index = self._get_closing_bracket_index(index, a_str, location, lineno)
#print "closing bracket %d"%(closing_brack_index)
var = toparse[index:closing_brack_index + 1]
dummy = None
matched = self._SUBSGROUPRE.match(var)
if matched == None:
raise exceptions.SubstitutionError(lineno, location, \
"Cannot match a group[option] in %s "\
"but found an opening bracket (. Malformated expression " \
% (var))
else:
# recursive calls
group = self._replace_vars(matched.group('group'), location, - 1)
option = self._replace_vars(matched.group('option'), location, - 1)
try:
# if it is in ENVGROUP then check ENV variables with a Resource object
# if it is in CLIGROUP then check CLI argument with a Resource object
# otherwise check in standard groups
if group == Conf._ENVGROUP:
res = Resource(a_cli_argument=None, a_env_variable=option)
dummy = res.get_value()
elif group == Conf._CLIGROUP:
res = Resource(a_cli_argument=option, a_env_variable=None)
dummy = res.get_value()
else:
dummy = self._sections[group][self.optionxform(option)]
except KeyError, _: #IGNORE:W0612
raise exceptions.SubstitutionError(lineno, location, "Property %s[%s] "\
"doesn't exist in this configuration file \n" \
% (group, option))
toparse = toparse.replace(var, dummy)
return self._replace_vars(toparse, location, - 1)
else:
return toparse
def _get(self, section, conv, option, default, fail_if_missing):
""" Internal getter """
return conv(self.get(section, option, default, fail_if_missing))
def getint(self, section, option, default=0, fail_if_missing=False):
"""Return the int value of the option.
Default value is 0, None value can't be used as default value"""
return self._get(section, int, option, default, fail_if_missing)
def get_int(self, section, option, default=0, fail_if_missing=False):
"""Return the int value of the option.
Default value is 0, None value can't be used as default value"""
return self._get(section, int, option, default, fail_if_missing)
def getfloat(self, section, option, default=0, fail_if_missing=False):
"""Return the float value of the option.
Default value is 0, None value can't be used as default value"""
return self._get(section, float, option, default, fail_if_missing)
def get_float(self, section, option, default=0, fail_if_missing=False):
"""Return the float value of the option.
Default value is 0, None value can't be used as default value"""
return self._get(section, float, option, default, fail_if_missing)
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
'0': False, 'no': False, 'false': False, 'off': False}
def getboolean(self, section, option, default=False, fail_if_missing=False):
"""getboolean value"""
val = self.get(section, option, default, fail_if_missing)
if val.lower() not in self._boolean_states:
raise ValueError, 'Not a boolean: %s' % val
return self._boolean_states[val.lower()]
def get_boolean(self, section, option, default=False, fail_if_missing=False):
"""get_boolean value"""
val = self.get(section, option, default, fail_if_missing)
if val.lower() not in self._boolean_states:
raise ValueError, 'Not a boolean: %s' % val
return self._boolean_states[val.lower()]
def get_list(self, section, option, default=None, fail_if_missing=False):
""" get a list of string, int """
val = self.get(section, option, default, fail_if_missing)
# parse it and return an error if invalid
try:
compiler = struct_parser.Compiler()
return compiler.compile_list(val)
except struct_parser.CompilerError, err:
raise exceptions.Error(err.message)
def getlist(self, section, option, default=None, fail_if_missing=False):
""" Deprecated, use get_list instead"""
return self.get_list(section, option, default, fail_if_missing)
def getdict(self, section, option, default=None, fail_if_missing=False):
""" Deprecated, use get_dict instead"""
return self.get_dict(section, option, default, fail_if_missing)
def get_dict(self, section, option, default=None, fail_if_missing=False):
""" get a dict """
val = self.get(section, option, default, fail_if_missing)
# parse it and return an error if invalid
try:
compiler = struct_parser.Compiler()
return compiler.compile_dict(val)
except struct_parser.CompilerError, err:
raise exceptions.Error(err.message)
@classmethod
def optionxform(cls, optionstr):
"""optionxform"""
return optionstr.lower()
#
# Regular expressions for parsing section headers and options.
#
SECTCRE = re.compile(
r'\[' # [
r'(?P<header>[^]]+)' # very permissive!
r'\]' # ]
)
OPTCRE = re.compile(
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
# followed by separator
# (either : or =), followed
# by any # space/tab
r'(?P<value>.*)$' # everything up to eol
)
def _read_include(self, lineno, line, origin, depth):
"""_read_include"""
# Error if depth is MAX_INCLUDE_DEPTH
if depth >= Conf._MAX_INCLUDE_DEPTH:
raise exceptions.IncludeError("Error. Cannot do more than %d nested includes."\
" It is probably a mistake as you might have created a loop of includes" \
% (Conf._MAX_INCLUDE_DEPTH))
# remove %include from the path and we should have a path
i = line.find('%include')
#check if there is a < for including config files from a different format
#position after include
i = i + 8
# include file with a specific reading module
if line[i] == '<':
dummy = line[i+1:].strip()
f_i = dummy.find('>')
if f_i == -1:
raise exceptions.IncludeError("Error. > is missing in the include line no %s: %s."\
" It should be %%include<mode:group_name> path" \
% (line, lineno), origin )
else:
group_name = None
the_format = dummy[:f_i].strip()
the_list = the_format.split(':')
if len(the_list) != 2 :
raise exceptions.IncludeError("Error. The mode and the group_name are not in the include line no %s: %s."\
" It should be %%include<mode:group_name> path" \
% (line, lineno), origin )
else:
the_format, group_name = the_list
#strip the group name
group_name = group_name.strip()
path = dummy[f_i+1:].strip()
# replace variables if there are any
path = self._replace_vars(path, line, lineno)
raise exceptions.IncludeError("External Module reading not enabled in this ConfHelper")
#self._read_with_module(group_name, format, path, origin)
else:
# normal include
path = line[i:].strip()
# replace variables if there are any
path = self._replace_vars(path, line, lineno)
# check if file exits
if not os.path.exists(path):
raise exceptions.IncludeError("the config file to include %s does not exits" % (path), origin)
else:
# add include file and populate the section hash
self._read(codecs.open(path, 'r', 'utf-8'), path, depth + 1)
#self._read(open(path, 'r'), path, depth + 1)
def _read(self, fpointer, fpname, depth=0): #pylint: disable=R0912
"""Parse a sectioned setup file.
The sections in setup file contains a title line at the top,
indicated by a name in square brackets (`[]'), plus key/value
options lines, indicated by `name: value' format lines.
Continuations are represented by an embedded newline then
leading whitespace. Blank lines, lines beginning with a '#',
and just about everything else are ignored.
Depth for avoiding looping in the includes
"""
cursect = None # None, or a dictionary
optname = None
lineno = 0
err = None # None, or an exception
while True:
line = fpointer.readline()
if not line:
break
lineno = lineno + 1
# include in this form %include
if line.startswith('%include'):
self._read_include(lineno, line, fpname, depth)
continue
# comment or blank line?
if line.strip() == '' or line[0] in '#;':
continue
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
# no leading whitespace
continue
# continuation line?
if line[0].isspace() and cursect is not None and optname:
value = line.strip()
if value:
cursect[optname] = "%s\n%s" % (cursect[optname], value)
# a section header or option header?
else:
# is it a section header?
matched = self.SECTCRE.match(line)
if matched:
sectname = matched.group('header')
if sectname in self._sections:
cursect = self._sections[sectname]
else:
cursect = {'__name__': sectname}
self._sections[sectname] = cursect
# So sections can't start with a continuation line
optname = None
# no section header in the file?
elif cursect is None:
raise exceptions.MissingSectionHeaderError(fpname, lineno, line)
# an option line?
else:
matched = self.OPTCRE.match(line)
if matched:
optname, vio, optval = matched.group('option', 'vi', 'value')
if vio in ('=', ':') and ';' in optval:
# ';' is a comment delimiter only if it follows
# a spacing character
pos = optval.find(';')
if pos != - 1 and optval[pos - 1].isspace():
optval = optval[:pos]
optval = optval.strip()
# allow empty values
if optval == '""':
optval = ''
optname = self.optionxform(optname.rstrip())
cursect[optname] = optval
else:
# a non-fatal parsing error occurred. set up the
# exception but keep going. the exception will be
# raised at the end of the file and will contain a
# list of all bogus lines
if not err:
err = exceptions.ParsingError(fpname)
err.append(lineno, repr(line))
# if any parsing errors occurred, raise an exception
if err:
raise err.get_error()
| 35,508
|
Python
|
.py
| 701
| 35.700428
| 170
| 0.563559
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,313
|
conf_tests.py
|
gaubert_gmvault/src/gmv/conf/conf_tests.py
|
# -*- coding: utf-8 -*-
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
# unit tests part
import unittest
import sys
import os
import codecs
import gmv.conf.conf_helper
class TestConf(unittest.TestCase): #pylint: disable=R0904
"""
Test Class for the Conf Object
"""
@classmethod
def _get_tests_dir_path(cls):
""" get the org.ctbto.conf.tests path depending on where it is defined """
fmod_path = gmv.conf.__path__
test_dir = "%s/tests" % fmod_path[0]
return test_dir
def setUp(self): #pylint: disable=C0103
# necessary for the include with the VAR ENV substitution
os.environ["DIRCONFENV"] = TestConf._get_tests_dir_path()
self.conf = gmv.conf.conf_helper.Conf(use_resource=False)
with codecs.open('%s/%s' % (TestConf._get_tests_dir_path(),
"test.config"), 'r', 'utf-8') as f:
self.conf._read(f, "the file") #pylint: disable=W0212
def tearDown(self): #pylint: disable=C0103
if os.path.exists('/tmp/fake_conf.config'):
os.remove('/tmp/fake_conf.config')
def test_empty(self):
"""
Do nothing
"""
pass
def test_get_objects(self):
"""testGetObjects: test getter from all types """
# get simple string
astring = self.conf.get("GroupTest1", "astring")
self.assertEqual(astring,"oracle.jdbc.driver.OracleDriver")
# get an int
aint = self.conf.getint("GroupTest1", "aint")
self.assertEqual(aint, 10)
# get floatcompile the statements
afloat = self.conf.getfloat("GroupTest1", "afloat")
self.assertEqual(afloat, 5.24)
# get different booleans form
abool1 = self.conf.getboolean("GroupTest1", "abool1")
self.assertEqual(abool1, True)
abool2 = self.conf.getboolean("GroupTest1", "abool2")
self.assertEqual(abool2, False)
abool3 = self.conf.getboolean("GroupTest1", "abool3")
self.assertEqual(abool3, True)
abool4 = self.conf.getboolean("GroupTest1", "abool4")
self.assertEqual(abool4 , False)
def test_get_defaults(self):
"""testGetDefaults: test defaults values """
# get all defaults
astring = self.conf.get("GroupTest", "astring", "astring")
self.assertEqual(astring, "astring")
# get an default for int
aint = self.conf.getint("GroupTest", "aint", 2)
self.assertEqual(aint, 2)
# get float
afloat = self.conf.getfloat("GroupTest", "afloat", 10.541)
self.assertEqual(afloat, 10.541)
abool1 = self.conf.getboolean("GroupTest", "abool1", True)
self.assertEqual(abool1, True)
abool2 = self.conf.getboolean("GroupTest", "abool2", False)
self.assertEqual(abool2, False)
# existing group no option
abool5 = self.conf.getboolean("GroupTest1", "abool32", False)
self.assertEqual(abool5, False)
def test_var_substitutions(self):
"""testVarSubstitutions: test variables substitutions"""
# simple substitution
apath = self.conf.get("GroupTestVars", "path")
self.assertEqual(apath,"/foo/bar//tmp/foo/bar/bar/foo")
# multiple substitution
apath = self.conf.get("GroupTestVars", "path1")
self.assertEqual(apath,"/foo//tmp/foo/bar//foo/bar//tmp/foo/bar/bar/foo/bar")
# nested substitution
nested = self.conf.get("GroupTestVars", "nested")
self.assertEqual(nested, "this is done")
def test_include(self):
"""testInclude: test includes """
val = self.conf.get("IncludedGroup", "hello")
self.assertEqual(val, 'foo')
@classmethod
def _create_fake_conf_file_in_tmp(cls):
"""Create a fake conf file in tmp"""
with open('/tmp/fake_conf.config', 'w') as f:
f.write('\n[MainDatabaseAccess]\n')
f.write('driverClassName=oracle.jdbc.driver.OracleDriver')
f.flush()
def ztest_use_conf_ENVNAME_resource(self): #pylint: disable=C0103
"""testUseConfENVNAMEResource: Use default resource ENVNAME to locate conf file"""
self._create_fake_conf_file_in_tmp()
# need to setup the ENV containing the the path to the conf file:
os.environ[gmv.conf.conf_helper.Conf.ENVNAME] = "/tmp/fake_conf.config"
self.conf = gmv.conf.conf_helper.Conf.get_instance()
the_s = self.conf.get("MainDatabaseAccess", "driverClassName")
self.assertEqual(the_s, 'oracle.jdbc.driver.OracleDriver')
def test_read_from_CLI(self): #pylint: disable=C0103
"""testReadFromCLI: do substitutions from command line resources"""
#set environment
os.environ["TESTENV"] = "/tmp/foo/foo.bar"
val = self.conf.get("GroupTest1", "fromenv")
self.assertEqual(val, '/mydir//tmp/foo/foo.bar')
#set cli arg
sys.argv.append("--LongName")
sys.argv.append("My Cli Value")
val = self.conf.get("GroupTest1", "fromcli1")
self.assertEqual(val, 'My Cli Value is embedded')
#check with a more natural cli value
val = self.conf.get("GroupTest1", "fromcli2")
self.assertEqual(val, 'My Cli Value is embedded 2')
def test_read_from_ENV(self): #pylint: disable=C0103
"""testReadFromENV: do substitutions from ENV resources"""
#set environment
os.environ["TESTENV"] = "/tmp/foo/foo.bar"
val = self.conf.get("ENV", "TESTENV")
self.assertEqual(val, "/tmp/foo/foo.bar")
#set cli arg
sys.argv.append("--LongName")
sys.argv.append("My Cli Value")
val = self.conf.get("CLI", "LongName")
self.assertEqual(val, "My Cli Value")
# get a float from env
os.environ["TESTENV"] = "1.05"
val = self.conf.getfloat("ENV", "TESTENV")
self.assertEqual(val+1, 2.05)
def test_print_content(self):
""" test print content """
#set environment
os.environ["TESTENV"] = "/tmp/foo/foo.bar"
#set cli arg
sys.argv.append("--LongName")
sys.argv.append("My Cli Value")
substitute_values = True
result = self.conf.print_content( substitute_values )
self.assertNotEqual(result, '')
def test_value_as_List(self): #pylint: disable=C0103
""" Value as List """
the_list = self.conf.getlist('GroupTestValueStruct', 'list')
self.assertEqual(the_list, ['a', 1, 3])
def test_value_as_unicodeList(self): #pylint: disable=C0103
""" Value as List """
the_list = self.conf.getlist('GroupTestValueStruct', 'unicode_list')
self.assertEqual(the_list, [ u'[Gmail]/Чаты', 'z' , 1 ])
def test_value_as_dict(self):
"""Dict as Value """
the_dict = self.conf.get_dict('GroupTestValueStruct', 'dict')
self.assertEqual(the_dict, {'a': 2, 'b': 3})
def test_complex_dict(self):
""" complex dict """
the_dict = self.conf.get_dict('GroupTestValueStruct', 'complex_dict')
self.assertEqual(the_dict, {'a': 2, 'c': {'a': 1, 'c': [1, 2, 3], 'b': [1, 2, 3, 4, 5, 6, 7]}, 'b': 3})
def test_dict_error(self):
""" error with a dict """
try:
self.conf.get_dict('GroupTestValueStruct', 'dict_error')
except Exception, err:
self.assertEquals(err.message, "Expression \"{1:2,'v b': a\" cannot be converted as a dict.")
return
self.fail('Should never reach that point')
def test_list_error(self):
""" error with a list """
try:
the_list = self.conf.get_list('GroupTestValueStruct', 'list_error')
print('the_list = %s\n' % (the_list))
except Exception, err:
self.assertEquals(err.message, 'Unsupported token (type: @, value : OP) (line=1,col=3).')
return
self.fail('Should never reach that point')
class TestResource(unittest.TestCase): #pylint: disable=R0904
"""
Test Class for the Resource object
"""
def test_resource_simple_cli(self):
"""testResourceSimpleCli: read resource from CLI"""
# set command line
sys.argv.append("--LongName")
sys.argv.append("My Cli Value")
resource = gmv.conf.conf_helper.Resource(a_cli_argument = "--LongName", a_env_variable = None)
self.assertEqual("My Cli Value", resource.get_value())
# look for LongName without --. It should be formalized by the Resource object
resource = gmv.conf.conf_helper.Resource(a_cli_argument = "LongName", a_env_variable = None)
self.assertEqual("My Cli Value", resource.get_value())
def test_resource_from_env(self):
"""testResourceFromENV: read resource from ENV"""
#ENV
os.environ["MYENVVAR"] = "My ENV Value"
resource = gmv.conf.conf_helper.Resource(a_cli_argument=None, a_env_variable="MYENVVAR")
self.assertEqual("My ENV Value", resource.get_value())
def ztest_resource_priority_rules(self):
"""testResourcePriorityRules: test priority rules"""
resource = gmv.conf.conf_helper.Resource(a_cli_argument="--LongName", a_env_variable="MYENVVAR")
self.assertEqual("My Cli Value", resource.get_value())
def test_resource_get_different_types(self): #pylint: disable=C0103
"""testResourceGetDifferentTypes: return resource in different types"""
os.environ["MYENVVAR"] = "yes"
resource = gmv.conf.conf_helper.Resource(a_cli_argument=None, a_env_variable="MYENVVAR")
self.assertEqual(resource.get_value_as_boolean(), True)
os.environ["MYENVVAR"] = "4"
resource = gmv.conf.conf_helper.Resource(a_cli_argument=None, a_env_variable="MYENVVAR")
self.assertEqual(resource.get_value_as_int()+1, 5)
os.environ["MYENVVAR"] = "4.345"
resource = gmv.conf.conf_helper.Resource(a_cli_argument=None, a_env_variable="MYENVVAR")
self.assertEqual(resource.get_value_as_float()+1, 5.345)
def tests():
""" global test method"""
#suite = unittest.TestLoader().loadTestsFromModule(gmv.conf.conf_tests)
suite = unittest.TestLoader().loadTestsFromTestCase(TestConf)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
tests()
| 11,931
|
Python
|
.py
| 228
| 40.421053
| 111
| 0.639591
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,314
|
exceptions.py
|
gaubert_gmvault/src/gmv/conf/exceptions.py
|
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
# exception classes
class Error(Exception):
"""Base class for Conf exceptions."""
def __init__(self, msg=''):
self.message = msg
Exception.__init__(self, msg)
def __repr__(self):
return self.message
__str__ = __repr__
class NoOptionError(Error):
"""A requested option was not found."""
def __init__(self, option, section):
Error.__init__(self, "No option %r in section: %r" %
(option, section))
self.option = option
self.section = section
class NoSectionError(Error):
"""Raised when no section matches a requested option."""
def __init__(self, section):
Error.__init__(self, 'No section: %r' % (section,))
self.section = section
class SubstitutionError(Error):
"""Base class for substitution-related exceptions."""
def __init__(self, lineno, location, msg):
Error.__init__(self, 'SubstitutionError on line %d: %s. %s' \
% (lineno, location, msg) if lineno != - 1 \
else 'SubstitutionError in %s. %s' % (lineno, location))
class IncludeError(Error):
""" Raised when an include command is incorrect """
def __init__(self, msg, origin):
Error.__init__(self, msg)
self.origin = origin
self.errors = []
class ParsingError(Error):
"""Raised when a configuration file does not follow legal syntax."""
def __init__(self, filename):
Error.__init__(self, 'File contains parsing errors: %s' % filename)
self.filename = filename
self.errors = []
def append(self, lineno, line):
""" add error message """
self.errors.append((lineno, line))
self.message += '\n\t[line %2d]: %s' % (lineno, line)
def get_error(self):
""" return the error """
return self
class MissingSectionHeaderError(ParsingError):
"""Raised when a key-value pair is found before any section header."""
def __init__(self, filename, lineno, line):
ParsingError.__init__(
self,
'File contains no section headers.\nfile: %s, line: %d\n%r' %
(filename, lineno, line))
self.filename = filename
self.lineno = lineno
self.line = line
| 3,136
|
Python
|
.py
| 70
| 37.057143
| 89
| 0.636243
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,315
|
struct_parser_tests.py
|
gaubert_gmvault/src/gmv/conf/utils/struct_parser_tests.py
|
# -*- coding: utf-8 -*-
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
# unit tests part
import unittest
from gmv.conf.utils.struct_parser import Compiler, CompilerError
class TestParser(unittest.TestCase): #pylint: disable=R0904
""" TestParser Class """
def setUp(self): #pylint: disable=C0103
pass
def test_simple_list_test(self):
""" a first simple test with space and indent, dedents to eat"""
the_string = " [ 'a', 1.435, 3 ]"
compiler = Compiler()
the_result = compiler.compile_list(the_string)
self.assertEqual(the_result, [ 'a', 1.435, 3])
def test_negative_number_test(self):
""" a negative number test """
the_string = " [ '-10.4', 1.435, 3 ]"
compiler = Compiler()
the_result = compiler.compile_list(the_string)
self.assertEqual(the_result, [ '-10.4', 1.435, 3])
def test_imbricated_lists_test(self):
""" multiple lists within lists """
the_string = "[a,b, [1,2,3,4, [456,6,'absdef'], 234, 2.456 ], aqwe, done]"
compiler = Compiler()
the_result = compiler.compile_list(the_string)
self.assertEqual(the_result, ['a', 'b', [1, 2, 3, 4, [456, 6, 'absdef'], 234, 2.456 ]\
, 'aqwe', 'done'])
def test_list_without_bracket_test(self):
""" simple list without bracket test """
the_string = " 'a', b"
compiler = Compiler()
the_result = compiler.compile_list(the_string)
self.assertEqual(the_result, ['a', 'b'])
def test_list_without_bracket_test_2(self): #pylint: disable=C0103
""" list without bracket test with a list inside """
the_string = " 'a', b, ['a thing', 2]"
compiler = Compiler()
the_result = compiler.compile_list(the_string)
self.assertEqual(the_result, ['a', 'b', ['a thing', 2] ])
def test_list_error(self):
""" list error """
the_string = " a ]"
compiler = Compiler()
try:
compiler.compile_list(the_string)
except CompilerError, err:
self.assertEqual(err.message, 'Expression " a ]" cannot be converted as a list.')
def test_list_unicode_val(self):
""" list unicode val """
the_string = "[ u'[Gmail]/Чаты', 'z' ]".decode('utf-8')
#to be in the same conditions as the conf object need to decode utf-8 as
# it is done automatically with the os.open(...., 'uft-8')
compiler = Compiler()
compiler.compile_list(the_string)
the_result = compiler.compile_list(the_string)
self.assertEqual(the_result, [ u'[Gmail]/Чаты', 'z' ])
def test_special_character_in_string(self):#pylint: disable=C0103
""" simple list without bracket test """
the_string = " 'a@', b"
compiler = Compiler()
the_result = compiler.compile_list(the_string)
self.assertEqual(the_result, ['a@','b'])
def test_list_error_2(self):
""" unsupported char @ """
the_string = " a @"
compiler = Compiler()
try:
compiler.compile_list(the_string)
except CompilerError, err:
self.assertEqual(err.message, 'Unsupported token (type: @, value : OP) (line=1,col=3).')
def test_simple_dict(self):
""" simple dict """
the_string = "{'a':1, b:2 }"
compiler = Compiler()
the_result = compiler.compile_dict(the_string)
self.assertEqual(the_result, {'a':1, 'b':2 })
def test_dict_error(self):
""" dict error """
the_string = "{'a':1, b:2 "
compiler = Compiler()
try:
compiler.compile_dict(the_string)
except CompilerError, err:
self.assertEqual(err.message, 'Expression "{\'a\':1, b:2 " cannot be converted as a dict.')
def test_dict_with_list(self):
""" dict with list """
the_string = "{'a':1, b:[1,2,3,4,5] }"
compiler = Compiler()
the_result = compiler.compile_dict(the_string)
self.assertEqual(the_result, {'a':1, 'b':[1, 2, 3, 4, 5]})
def test_list_with_dict(self):
""" list with dict """
the_string = "['a',1,'b',{2:3,4:5} ]"
compiler = Compiler()
the_result = compiler.compile_list(the_string)
self.assertEqual(the_result, ['a', 1, 'b', { 2 : 3 , 4 : 5} ])
def test_noquotes_dict(self):
""" no quotes dict """
the_string = "{ no12: a b , no10:a}"
compiler = Compiler()
the_result = compiler.compile_dict(the_string)
self.assertEqual(the_result, { 'no12': 'a b' , 'no10':'a'})
def test_everything(self):
""" everything """
the_string = "['a',1,'b',{2:3,4:[1,'hello', no quotes, [1,2,3,{1:2,3:4}]]} ]"
compiler = Compiler()
the_result = compiler.compile_list(the_string)
self.assertEqual(the_result, ['a', 1, 'b', \
{2 : 3, \
4: [1, 'hello', 'no quotes', [1, 2, 3, {1:2, 3:4 }]]} ])
def tests():
""" Global test method """
#suite = unittest.TestLoader().loadTestsFromModule(struct_parser)
suite = unittest.TestLoader().loadTestsFromTestCase(TestParser)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
tests()
| 6,826
|
Python
|
.py
| 131
| 38.419847
| 103
| 0.590347
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,316
|
struct_parser.py
|
gaubert_gmvault/src/gmv/conf/utils/struct_parser.py
|
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import tokenize
import token
import StringIO
class TokenizerError(Exception):
"""Base class for All exceptions"""
def __init__(self, a_msg, a_line=None, a_col=None):
self._line = a_line
self._col = a_col
if self._line == None and self._col == None:
extra = ""
else:
extra = "(line=%s,col=%s)" % (self._line, self._col)
super(TokenizerError, self).__init__("%s %s." % (a_msg, extra))
class Token(object):
""" Token class """
def __init__(self, a_type, num, value, begin, end, parsed_line):
self._type = a_type
self._num = num
self._value = value
self._begin = begin
self._end = end
self._parsed_line = parsed_line
@property
def type(self):
""" Return the token type """
return self._type
@property
def num(self):
""" Return the token type num """
return self._num
@property
def value(self):
""" Return the token value """
return self._value
@property
def begin(self):
""" Return the token begin """
return self._begin
@property
def end(self):
""" Return the token end """
return self._end
@property
def parsed_line(self):
""" Return the token line """
return self._parsed_line
def __repr__(self):
return "[type,num]=[%s,%s],value=[%s], parsed line=%s,[begin index,end index]=[%s,%s]" \
% (self._type, self._num, self._value, self._parsed_line, self._begin, self._end)
class Tokenizer(object):
"""
Create tokens for parsing the grammar.
This class is a wrapper around the python tokenizer adapt to the DSL that is going to be used.
"""
def __init__(self):
""" constructor """
# list of tokens
self._tokens = []
self._index = 0
self._current = None
def tokenize(self, a_program, a_eatable_token_types = ()):
""" parse the expression.
By default the parser eats space but some extra tokens by have to be eaten
Args:
a_expression: the expression to parser
Returns:
return dict containing the different parts of the request (spectrum, ....)
Raises:
exception TokenizerError if the syntax of the aString string is incorrect
"""
g_info = tokenize.generate_tokens(StringIO.StringIO(a_program).readline) # tokenize the string
for toknum, tokval, tokbeg, tokend, tokline in g_info:
if token.tok_name[toknum] not in a_eatable_token_types:
self._tokens.append(Token(token.tok_name[toknum], toknum, tokval, tokbeg, tokend, tokline))
def __iter__(self):
""" iterator implemented with a generator.
"""
for tok in self._tokens:
self._current = tok
yield tok
def next(self):
""" get next token.
Returns:
return next token
"""
self._current = self._tokens[self._index]
self._index += 1
return self._current
def has_next(self):
""" check it there are more tokens to consume.
Returns:
return True if more tokens to consume False otherwise
"""
return self._index < len(self._tokens)
def current_token(self):
""" return the latest consumed token.
Returns:
return the latest consumerd token
"""
return self._current
def consume_token(self, what):
""" consume the next token if it is what """
if self._current.value != what :
raise TokenizerError("Expected '%s' but instead found '%s'" % (what, self._current.value))
else:
return self.next()
def consume_while_next_token_is_in(self, a_token_types_list):
"""
Consume the next tokens as long as they have one of the passed types.
This means that at least one token with one of the passed types needs to be matched.
Args:
a_token_types_list: the token types to consume
Returns:
return the next non matching token
"""
self.consume_next_tokens(a_token_types_list)
while True:
tok = self.next()
if tok.type not in a_token_types_list:
return tok
def consume_while_current_token_is_in(self, a_token_types_list): #pylint: disable=C0103
"""
Consume the tokens starting from the current token as long as they have one of the passed types.
It is a classical token eater. It eats tokens as long as they are the specified type
Args:
a_token_types_list: the token types to consume
Returns:
return the next non matching token
"""
tok = self.current_token()
while tok.type in a_token_types_list:
tok = self.next()
return tok
def consume_next_tokens(self, a_token_types_list):
"""
Consume one of the next token types given in the list and check that it is the expected type otherwise send an exception
Args:
a_tokens_list: the token types to list
Returns:
return next token
Raises:
exception BadTokenError if a Token Type that is not in a_token_types_list is found
"""
tok = self.next()
if tok.type not in a_token_types_list:
raise TokenizerError("Expected '%s' but instead found '%s'" % (a_token_types_list, tok))
else:
return tok
def advance(self, inc=1):
""" return the next + inc token but do not consume it.
Useful to check future tokens.
Args:
a_expression: increment + 1 is the default (just look one step forward)
Returns:
return lookhead token
"""
return self._tokens[self._index-1 + inc]
class CompilerError(Exception):
"""Base class for All exceptions"""
def __init__(self, a_msg, a_line=None, a_col=None):
self._line = a_line
self._col = a_col
msg = ''
if self._line == None and self._col == None:
extra = ""
msg = "%s." % (a_msg)
else:
extra = "(line=%s,col=%s)" % (self._line, self._col)
msg = "%s %s." % (a_msg, extra)
super(CompilerError, self).__init__(msg)
class Compiler(object):
""" compile some python structures
"""
def __init__(self):
""" constructor """
#default tokens to ignore
self._tokens_to_ignore = ('INDENT', 'DEDENT', 'NEWLINE', 'NL')
def compile_list(self, a_to_compile_str):
""" compile a list object """
try:
tokenizer = Tokenizer()
tokenizer.tokenize(a_to_compile_str, self._tokens_to_ignore)
except tokenize.TokenError, err:
#translate this error into something understandable.
#It is because the bloody tokenizer counts the brackets
if err.args[0] == "EOF in multi-line statement":
raise CompilerError("Expression \"%s\" cannot be converted as a list" % (a_to_compile_str))
else:
raise CompilerError(err)
print("Err = %s\n" % (err))
tokenizer.next()
return self._compile_list(tokenizer)
def compile_dict(self, a_to_compile_str):
""" compile a dict object """
try:
tokenizer = Tokenizer()
tokenizer.tokenize(a_to_compile_str, self._tokens_to_ignore)
except tokenize.TokenError, err:
#translate this error into something understandable.
#It is because the bloody tokenizer counts the brackets
if err.args[0] == "EOF in multi-line statement":
raise CompilerError("Expression \"%s\" cannot be converted as a dict" % (a_to_compile_str))
else:
raise CompilerError(err)
print("Err = %s\n" % (err))
tokenizer.next()
return self._compile_dict(tokenizer)
def _compile_dict(self, a_tokenizer):
""" internal method for compiling a dict struct """
result = {}
the_token = a_tokenizer.current_token()
while the_token.type != 'ENDMARKER':
#look for an open bracket
if the_token.type == 'OP' and the_token.value == '{':
the_token = a_tokenizer.next()
while True:
if the_token.type == 'OP' and the_token.value == '}':
return result
else:
# get key values
(key, val) = self._compile_key_value(a_tokenizer)
result[key] = val
the_token = a_tokenizer.current_token()
else:
raise CompilerError("Unsupported token (type: %s, value : %s)" \
% (the_token.type, the_token.value), the_token.begin[0], the_token.begin[1])
#we should never reach that point (compilation error)
raise CompilerError("End of line reached without finding a list. The line [%s] cannot be transformed as a list" \
% (the_token.parsed_line))
def _compile_key_value(self, a_tokenizer):
""" look for the pair key value component of a dict """
the_token = a_tokenizer.current_token()
key = None
val = None
# get key
if the_token.type in ('STRING', 'NUMBER', 'NAME'):
#next the_token is in _compile_litteral
key = self._compile_litteral(a_tokenizer)
the_token = a_tokenizer.current_token()
else:
raise CompilerError("unexpected token (type: %s, value : %s)" \
% (the_token.type, the_token.value), \
the_token.begin[0], the_token.begin[1])
#should have a comma now
if the_token.type != 'OP' and the_token.value != ':':
raise CompilerError("Expected a token (type:OP, value: :) but instead got (type: %s, value: %s)" \
% (the_token.type, the_token.value), the_token.begin[0], the_token.begin[1])
else:
#eat it
the_token = a_tokenizer.next()
#get value
# it can be a
if the_token.type in ('STRING', 'NUMBER', 'NAME'):
#next the_token is in _compile_litteral
val = self._compile_litteral(a_tokenizer)
the_token = a_tokenizer.current_token()
#check for a list
elif the_token.value == '[' and the_token.type == 'OP':
# look for a list
val = self._compile_list(a_tokenizer)
# positioning to the next token
the_token = a_tokenizer.next()
elif the_token.value == '{' and the_token.type == 'OP':
# look for a dict
val = self._compile_dict(a_tokenizer)
# positioning to the next token
the_token = a_tokenizer.next()
elif the_token.value == '(' and the_token.type == 'OP':
# look for a dict
val = self._compile_tuple(a_tokenizer)
# positioning to the next token
the_token = a_tokenizer.next()
else:
raise CompilerError("unexpected token (type: %s, value : %s)" \
% (the_token.type, the_token.value), the_token.begin[0], \
the_token.begin[1])
#if we have a comma then eat it as it means that we will have more than one values
if the_token.type == 'OP' and the_token.value == ',':
the_token = a_tokenizer.next()
return (key, val)
def _compile_litteral(self, a_tokenizer):
""" compile key. A key can be a NAME, STRING or NUMBER """
val = None
dummy = None
the_token = a_tokenizer.current_token()
while the_token.type not in ('OP', 'ENDMARKER'):
if the_token.type == 'STRING':
#check if the string is unicode
if len(the_token.value) >= 3 and the_token.value[:2] == "u'":
#unicode string
#dummy = unicode(the_token.value[2:-1], 'utf_8') #decode from utf-8 encoding not necessary if read full utf-8 file
dummy = unicode(the_token.value[2:-1])
else:
#ascii string
# the value contains the quote or double quotes so remove them always
dummy = the_token.value[1:-1]
elif the_token.type == 'NAME':
# intepret all non quoted names as a string
dummy = the_token.value
elif the_token.type == 'NUMBER':
dummy = self._create_number(the_token.value)
else:
raise CompilerError("unexpected token (type: %s, value : %s)" \
% (the_token.type, the_token.value), \
the_token.begin[0], the_token.begin[1])
#if val is not None, it has to be a string
if val:
val = '%s %s' % (str(val), str(dummy))
else:
val = dummy
the_token = a_tokenizer.next()
return val
def _compile_tuple(self, a_tokenizer):
""" process tuple structure """
result = []
open_bracket = 0
# this is the mode without [ & ] operator : 1,2,3,4
simple_list_mode = 0
the_token = a_tokenizer.current_token()
while the_token.type != 'ENDMARKER':
#look for an open bracket
if the_token.value == '(' and the_token.type == 'OP':
#first time we open a bracket and not in simple mode
if open_bracket == 0 and simple_list_mode == 0:
open_bracket += 1
#recurse to create the imbricated list
else:
result.append(self._compile_tuple(a_tokenizer))
the_token = a_tokenizer.next()
elif the_token.value == '{' and the_token.type == 'OP':
result.append(self._compile_dict(a_tokenizer))
the_token = a_tokenizer.next()
elif the_token.value == '[' and the_token.type == 'OP':
result.append(self._compile_list(a_tokenizer))
the_token = a_tokenizer.next()
elif the_token.type == 'OP' and the_token.value == ')':
# end of list return result
if open_bracket == 1:
return tuple(result)
# cannot find a closing bracket and a simple list mode
elif simple_list_mode == 1:
raise CompilerError("unexpected token (type: %s, value : %s)" \
% (the_token.value, the_token.type), the_token.begin[0], \
the_token.begin[1])
# the comma case
elif the_token.type == 'OP' and the_token.value == ',':
# just eat it
the_token = a_tokenizer.next()
elif the_token.type in ('STRING', 'NUMBER', 'NAME'):
# find values outside of a list
# this can be okay
if open_bracket == 0:
simple_list_mode = 1
#next the_token is in _compile_litteral
result.append(self._compile_litteral(a_tokenizer))
the_token = a_tokenizer.current_token()
else:
raise CompilerError("Unsupported token (type: %s, value : %s)"\
% (the_token.value, the_token.type), \
the_token.begin[0], the_token.begin[1])
# if we are in simple_list_mode return list else error
if simple_list_mode == 1:
return tuple(result)
#we should never reach that point (compilation error)
raise CompilerError("End of line reached without finding a list. The line [%s] cannot be transformed as a tuple" \
% (the_token.parsed_line))
def _compile_list(self, a_tokenizer):
""" process a list structure """
result = []
open_bracket = 0
# this is the mode without [ & ] operator : 1,2,3,4
simple_list_mode = 0
the_token = a_tokenizer.current_token()
while the_token.type != 'ENDMARKER':
#look for an open bracket
if the_token.value == '[' and the_token.type == 'OP':
#first time we open a bracket and not in simple mode
if open_bracket == 0 and simple_list_mode == 0:
open_bracket += 1
#recurse to create the imbricated list
else:
result.append(self._compile_list(a_tokenizer))
the_token = a_tokenizer.next()
elif the_token.value == '(' and the_token.type == 'OP':
result.append(self._compile_tuple(a_tokenizer))
the_token = a_tokenizer.next()
elif the_token.value == '{' and the_token.type == 'OP':
result.append(self._compile_dict(a_tokenizer))
the_token = a_tokenizer.next()
elif the_token.type == 'OP' and the_token.value == ']':
# end of list return result
if open_bracket == 1:
return result
# cannot find a closing bracket and a simple list mode
elif simple_list_mode == 1:
raise CompilerError("unexpected token (type: %s, value : %s)" \
% (the_token.value, the_token.type), the_token.begin[0], the_token.begin[1])
# the comma case
elif the_token.type == 'OP' and the_token.value == ',':
# just eat it
the_token = a_tokenizer.next()
elif the_token.type in ('STRING', 'NUMBER', 'NAME'):
# find values outside of a list
# this can be okay
if open_bracket == 0:
simple_list_mode = 1
#next the_token is in _compile_litteral
result.append(self._compile_litteral(a_tokenizer))
the_token = a_tokenizer.current_token()
else:
raise CompilerError("Unsupported token (type: %s, value : %s)"\
% (the_token.value, the_token.type), \
the_token.begin[0], the_token.begin[1])
# if we are in simple_list_mode return list else error
if simple_list_mode == 1:
return result
#we should never reach that point (compilation error)
raise CompilerError("End of line reached without finding a list. The line [%s] cannot be transformed as a list" \
% (the_token.parsed_line))
@classmethod
def _create_number(cls, a_number):
""" depending on the value return a int or a float.
For the moment very simple: If there is . it is a float"""
if a_number.find('.') > 0:
return float(a_number)
else:
return int(a_number)
| 22,229
|
Python
|
.py
| 441
| 33.70068
| 134
| 0.547087
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,317
|
gmvault_multiprocess.py
|
gaubert_gmvault/src/sandbox/gmvault_multiprocess.py
|
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import json
import time
import datetime
import os
import itertools
import imaplib
from multiprocessing import Process, Queue
import log_utils
import collections_utils
import gmvault_utils
import imap_utils
import gmvault_db
LOG = log_utils.LoggerFactory.get_logger('gmvault')
def handle_restore_imap_error(the_exception, gm_id, db_gmail_ids_info, gmvaulter):
"""
function to handle restore IMAPError in restore functions
"""
if isinstance(the_exception, imaplib.IMAP4.abort):
# if this is a Gmvault SSL Socket error quarantine the email and continue the restore
if str(the_exception).find("=> Gmvault ssl socket error: EOF") >= 0:
LOG.critical("Quarantine email with gm id %s from %s. GMAIL IMAP cannot restore it:"\
" err={%s}" % (gm_id, db_gmail_ids_info[gm_id], str(the_exception)))
gmvaulter.gstorer.quarantine_email(gm_id)
gmvaulter.error_report['emails_in_quarantine'].append(gm_id)
LOG.critical("Disconnecting and reconnecting to restart cleanly.")
gmvaulter.src.reconnect() #reconnect
else:
raise the_exception
elif isinstance(the_exception, imaplib.IMAP4.error):
LOG.error("Catched IMAP Error %s" % (str(the_exception)))
LOG.exception(the_exception)
#When the email cannot be read from Database because it was empty when returned by gmail imap
#quarantine it.
if str(the_exception) == "APPEND command error: BAD ['Invalid Arguments: Unable to parse message']":
LOG.critical("Quarantine email with gm id %s from %s. GMAIL IMAP cannot restore it:"\
" err={%s}" % (gm_id, db_gmail_ids_info[gm_id], str(the_exception)))
gmvaulter.gstorer.quarantine_email(gm_id)
gmvaulter.error_report['emails_in_quarantine'].append(gm_id)
else:
raise the_exception
elif isinstance(the_exception, imap_utils.PushEmailError):
LOG.error("Catch the following exception %s" % (str(the_exception)))
LOG.exception(the_exception)
if the_exception.quarantined():
LOG.critical("Quarantine email with gm id %s from %s. GMAIL IMAP cannot restore it:"\
" err={%s}" % (gm_id, db_gmail_ids_info[gm_id], str(the_exception)))
gmvaulter.gstorer.quarantine_email(gm_id)
gmvaulter.error_report['emails_in_quarantine'].append(gm_id)
else:
raise the_exception
else:
LOG.error("Catch the following exception %s" % (str(the_exception)))
LOG.exception(the_exception)
raise the_exception
def handle_sync_imap_error(the_exception, the_id, error_report, src):
"""
function to handle IMAPError in gmvault
type = chat or email
"""
if isinstance(the_exception, imaplib.IMAP4.abort):
# imap abort error
# ignore it
# will have to do something with these ignored messages
LOG.critical("Error while fetching message with imap id %s." % (the_id))
LOG.critical("\n=== Exception traceback ===\n")
LOG.critical(gmvault_utils.get_exception_traceback())
LOG.critical("=== End of Exception traceback ===\n")
try:
#try to get the gmail_id
curr = src.fetch(the_id, imap_utils.GIMAPFetcher.GET_GMAIL_ID)
except Exception, _: #pylint:disable-msg=W0703
curr = None
LOG.critical("Error when trying to get gmail id for message with imap id %s." % (the_id))
LOG.critical("Disconnect, wait for 20 sec then reconnect.")
src.disconnect()
#could not fetch the gm_id so disconnect and sleep
#sleep 10 sec
time.sleep(10)
LOG.critical("Reconnecting ...")
src.connect()
if curr:
gmail_id = curr[the_id][imap_utils.GIMAPFetcher.GMAIL_ID]
else:
gmail_id = None
#add ignored id
error_report['cannot_be_fetched'].append((the_id, gmail_id))
LOG.critical("Forced to ignore message with imap id %s, (gmail id %s)." % (the_id, (gmail_id if gmail_id else "cannot be read")))
elif isinstance(the_exception, imaplib.IMAP4.error):
# check if this is a cannot be fetched error
# I do not like to do string guessing within an exception but I do not have any choice here
LOG.critical("Error while fetching message with imap id %s." % (the_id))
LOG.critical("\n=== Exception traceback ===\n")
LOG.critical(gmvault_utils.get_exception_traceback())
LOG.critical("=== End of Exception traceback ===\n")
#quarantine emails that have raised an abort error
if str(the_exception).find("'Some messages could not be FETCHed (Failure)'") >= 0:
try:
#try to get the gmail_id
LOG.critical("One more attempt. Trying to fetch the Gmail ID for %s" % (the_id) )
curr = src.fetch(the_id, imap_utils.GIMAPFetcher.GET_GMAIL_ID)
except Exception, _: #pylint:disable-msg=W0703
curr = None
if curr:
gmail_id = curr[the_id][imap_utils.GIMAPFetcher.GMAIL_ID]
else:
gmail_id = None
#add ignored id
error_report['cannot_be_fetched'].append((the_id, gmail_id))
LOG.critical("Ignore message with imap id %s, (gmail id %s)" % (the_id, (gmail_id if gmail_id else "cannot be read")))
else:
raise the_exception #rethrow error
else:
raise the_exception
class IMAPBatchFetcher(object):
"""
Fetch IMAP data in batch
"""
def __init__(self, src, imap_ids, error_report, request, default_batch_size = 100):
"""
constructor
"""
self.src = src
self.imap_ids = imap_ids
self.def_batch_size = default_batch_size
self.request = request
self.error_report = error_report
self.to_fetch = list(imap_ids)
def individual_fetch(self, imap_ids):
"""
Find the imap_id creating the issue
return the data related to the imap_ids
"""
new_data = {}
for the_id in imap_ids:
try:
single_data = self.src.fetch(the_id, self.request)
new_data.update(single_data)
except Exception, error:
handle_sync_imap_error(error, the_id, self.error_report, self.src) #do everything in this handler
return new_data
def __iter__(self):
return self
def next(self):
"""
Return the next batch of elements
"""
new_data = {}
batch = self.to_fetch[:self.def_batch_size]
if len(batch) <= 0:
raise StopIteration
try:
new_data = self.src.fetch(batch, self.request)
self.to_fetch = self.to_fetch[self.def_batch_size:]
return new_data
except imaplib.IMAP4.error, _:
new_data = self.individual_fetch(batch)
return new_data
def reset(self):
"""
Restart from the beginning
"""
self.to_fetch = self.imap_ids
class GMVaulter(object):
"""
Main object operating over gmail
"""
NB_GRP_OF_ITEMS = 1400
EMAIL_RESTORE_PROGRESS = 'email_last_id.restore'
CHAT_RESTORE_PROGRESS = 'chat_last_id.restore'
EMAIL_SYNC_PROGRESS = 'email_last_id.sync'
CHAT_SYNC_PROGRESS = 'chat_last_id.sync'
OP_EMAIL_RESTORE = "EM_RESTORE"
OP_EMAIL_SYNC = "EM_SYNC"
OP_CHAT_RESTORE = "CH_RESTORE"
OP_CHAT_SYNC = "CH_SYNC"
OP_TO_FILENAME = { OP_EMAIL_RESTORE : EMAIL_RESTORE_PROGRESS,
OP_EMAIL_SYNC : EMAIL_SYNC_PROGRESS,
OP_CHAT_RESTORE : CHAT_RESTORE_PROGRESS,
OP_CHAT_SYNC : CHAT_SYNC_PROGRESS
}
def __init__(self, db_root_dir, host, port, login, credential, read_only_access = True, use_encryption = False): #pylint:disable-msg=R0913
"""
constructor
"""
self.db_root_dir = db_root_dir
#create dir if it doesn't exist
gmvault_utils.makedirs(self.db_root_dir)
#keep track of login email
self.login = login
# create source and try to connect
self.src = imap_utils.GIMAPFetcher(host, port, login, credential, readonly_folder = read_only_access)
self.src.connect()
LOG.debug("Connected")
self.use_encryption = use_encryption
#to report gmail imap problems
self.error_report = { 'empty' : [] ,
'cannot_be_fetched' : [],
'emails_in_quarantine' : [],
'reconnections' : 0}
#instantiate gstorer
self.gstorer = gmvault_db.GmailStorer(self.db_root_dir, self.use_encryption)
#timer used to mesure time spent in the different values
self.timer = gmvault_utils.Timer()
@classmethod
def get_imap_request_btw_2_dates(cls, begin_date, end_date):
"""
Return the imap request for those 2 dates
"""
imap_req = 'Since %s Before %s' % (gmvault_utils.datetime2imapdate(begin_date), gmvault_utils.datetime2imapdate(end_date))
return imap_req
def get_operation_report(self):
"""
Return the error report
"""
the_str = "\n================================================================\n"\
"Number of reconnections: %d.\nNumber of emails quarantined: %d.\n" \
"Number of emails that could not be fetched: %d.\n" \
"Number of emails that were returned empty by gmail: %d\n================================================================" \
% (self.error_report['reconnections'], \
len(self.error_report['emails_in_quarantine']), \
len(self.error_report['cannot_be_fetched']), \
len(self.error_report['empty'])
)
LOG.debug("error_report complete structure = %s" % (self.error_report))
return the_str
def _sync_between(self, begin_date, end_date, storage_dir, compress = True):
"""
sync between 2 dates
"""
#create storer
gstorer = gmvault_db.GmailStorer(storage_dir, self.use_encryption)
#search before the next month
imap_req = self.get_imap_request_btw_2_dates(begin_date, end_date)
ids = self.src.search(imap_req)
#loop over all ids, get email store email
for the_id in ids:
#retrieve email from destination email account
data = self.src.fetch(the_id, imap_utils.GIMAPFetcher.GET_ALL_INFO)
file_path = gstorer.bury_email(data[the_id], compress = compress)
LOG.critical("Stored email %d in %s" %(the_id, file_path))
@classmethod
def _get_next_date(cls, a_current_date, start_month_beginning = False):
"""
return the next date necessary to build the imap req
"""
if start_month_beginning:
dummy_date = a_current_date.replace(day=1)
else:
dummy_date = a_current_date
# the next date = current date + 1 month
return dummy_date + datetime.timedelta(days=31)
@classmethod
def check_email_on_disk(cls, a_gstorer, a_id, a_dir = None):
"""
Factory method to create the object if it exists
"""
try:
a_dir = a_gstorer.get_directory_from_id(a_id, a_dir)
if a_dir:
return a_gstorer.unbury_metadata(a_id, a_dir)
except ValueError, json_error:
LOG.exception("Cannot read file %s. Try to fetch the data again" % ('%s.meta' % (a_id)), json_error )
return None
@classmethod
def _metadata_needs_update(cls, curr_metadata, new_metadata, chat_metadata = False):
"""
Needs update
"""
if curr_metadata[gmvault_db.GmailStorer.ID_K] != new_metadata['X-GM-MSGID']:
raise Exception("Gmail id has changed for %s" % (curr_metadata['id']))
#check flags
prev_set = set(new_metadata['FLAGS'])
for flag in curr_metadata['flags']:
if flag not in prev_set:
return True
else:
prev_set.remove(flag)
if len(prev_set) > 0:
return True
#check labels
prev_labels = set(new_metadata['X-GM-LABELS'])
if chat_metadata: #add gmvault-chats labels
prev_labels.add(gmvault_db.GmailStorer.CHAT_GM_LABEL)
for label in curr_metadata['labels']:
if label not in prev_labels:
return True
else:
prev_labels.remove(label)
if len(prev_labels) > 0:
return True
return False
def _check_email_db_ownership(self, ownership_control):
"""
Check email database ownership.
If ownership control activated then fail if a new additional owner is added.
Else if no ownership control allow one more user and save it in the list of owners
Return the number of owner this will be used to activate or not the db clean.
Activating a db cleaning on a multiownership db would be a catastrophy as it would delete all
the emails from the others users.
"""
#check that the gmvault-db is not associated with another user
db_owners = self.gstorer.get_db_owners()
if ownership_control:
if len(db_owners) > 0 and self.login not in db_owners: #db owner should not be different unless bypass activated
raise Exception("The email database %s is already associated with one or many logins: %s."\
" Use option (-m, --multiple-db-owner) if you want to link it with %s" \
% (self.db_root_dir, ", ".join(db_owners), self.login))
else:
if len(db_owners) == 0:
LOG.critical("Establish %s as the owner of the Gmvault db %s." % (self.login, self.db_root_dir))
elif len(db_owners) > 0 and self.login not in db_owners:
LOG.critical("The email database %s is hosting emails from %s. It will now also store emails from %s" \
% (self.db_root_dir, ", ".join(db_owners), self.login))
#try to save db_owner in the list of owners
self.gstorer.store_db_owner(self.login)
def _sync_chats(self, imap_req, compress, restart):
"""
backup the chat messages
"""
chat_dir = None
timer = gmvault_utils.Timer() #start local timer for chat
timer.start()
LOG.debug("Before selection")
if self.src.is_visible('CHATS'):
chat_dir = self.src.select_folder('CHATS')
LOG.debug("Selection is finished")
if chat_dir:
#imap_ids = self.src.search({ 'type': 'imap', 'req': 'ALL' })
imap_ids = self.src.search(imap_req)
# check if there is a restart
if restart:
LOG.critical("Restart mode activated. Need to find information in Gmail, be patient ...")
imap_ids = self.get_gmails_ids_left_to_sync(self.OP_CHAT_SYNC, imap_ids)
total_nb_chats_to_process = len(imap_ids) # total number of emails to get
LOG.critical("%d chat messages to be fetched." % (total_nb_chats_to_process))
nb_chats_processed = 0
to_fetch = set(imap_ids)
batch_fetcher = IMAPBatchFetcher(self.src, imap_ids, self.error_report, imap_utils.GIMAPFetcher.GET_ALL_BUT_DATA, \
default_batch_size = gmvault_utils.get_conf_defaults().getint("General","nb_messages_per_batch",500))
for new_data in batch_fetcher:
for the_id in new_data:
if new_data.get(the_id, None):
gid = None
LOG.debug("\nProcess imap chat id %s" % ( the_id ))
d = new_data[the_id]
gid = d[imap_utils.GIMAPFetcher.GMAIL_ID]
gid = new_data[the_id][imap_utils.GIMAPFetcher.GMAIL_ID]
the_dir = self.gstorer.get_sub_chats_dir()
LOG.critical("Process chat num %d (imap_id:%s) into %s." % (nb_chats_processed, the_id, the_dir))
#pass the dir and the ID
curr_metadata = GMVaulter.check_email_on_disk( self.gstorer , \
new_data[the_id][imap_utils.GIMAPFetcher.GMAIL_ID], \
the_dir)
#if on disk check that the data is not different
if curr_metadata:
if self._metadata_needs_update(curr_metadata, new_data[the_id], chat_metadata = True):
LOG.debug("Chat with imap id %s and gmail id %s has changed. Updated it." % (the_id, gid))
#restore everything at the moment
gid = self.gstorer.bury_chat_metadata(new_data[the_id], local_dir = the_dir)
#update local index id gid => index per directory to be thought out
else:
LOG.debug("The metadata for chat %s already exists and is identical to the one on GMail." % (gid))
else:
try:
#get the data
email_data = self.src.fetch(the_id, imap_utils.GIMAPFetcher.GET_DATA_ONLY )
new_data[the_id][imap_utils.GIMAPFetcher.EMAIL_BODY] = email_data[the_id][imap_utils.GIMAPFetcher.EMAIL_BODY]
# store data on disk within year month dir
gid = self.gstorer.bury_chat(new_data[the_id], local_dir = the_dir, compress = compress)
#update local index id gid => index per directory to be thought out
LOG.debug("Create and store chat with imap id %s, gmail id %s." % (the_id, gid))
except Exception, error:
handle_sync_imap_error(error, the_id, self.error_report, self.src) #do everything in this handler
nb_chats_processed += 1
#indicate every 50 messages the number of messages left to process
left_emails = (total_nb_chats_to_process - nb_chats_processed)
if (nb_chats_processed % 50) == 0 and (left_emails > 0):
elapsed = timer.elapsed() #elapsed time in seconds
LOG.critical("\n== Processed %d emails in %s. %d left to be stored (time estimate %s).==\n" % \
(nb_chats_processed, timer.seconds_to_human_time(elapsed), \
left_emails, \
timer.estimate_time_left(nb_chats_processed, elapsed, left_emails)))
# save id every 10 restored emails
if (nb_chats_processed % 10) == 0:
if gid:
self.save_lastid(self.OP_CHAT_SYNC, gid)
else:
LOG.info("Could not process imap with id %s. Ignore it\n")
self.error_report['empty'].append((the_id, None))
to_fetch -= set(new_data.keys()) #remove all found keys from to_fetch set
for the_id in to_fetch:
# case when gmail IMAP server returns OK without any data whatsoever
# eg. imap uid 142221L ignore it
LOG.info("Could not process chat with id %s. Ignore it\n")
self.error_report['empty_chats'].append((the_id, None))
else:
imap_ids = []
LOG.critical("\nChats synchronisation operation performed in %s.\n" % (timer.seconds_to_human_time(timer.elapsed())))
return imap_ids
def _sync_emails(self, imap_req, compress, restart):
"""
First part of the double pass strategy:
- create and update emails in db
"""
timer = gmvault_utils.Timer()
timer.start()
#select all mail folder using the constant name defined in GIMAPFetcher
self.src.select_folder('ALLMAIL')
# get all imap ids in All Mail
imap_ids = self.src.search(imap_req)
# check if there is a restart
if restart:
LOG.critical("Restart mode activated for emails. Need to find information in Gmail, be patient ...")
imap_ids = self.get_gmails_ids_left_to_sync(self.OP_EMAIL_SYNC, imap_ids)
total_nb_emails_to_process = len(imap_ids) # total number of emails to get
LOG.critical("%d emails to be fetched." % (total_nb_emails_to_process))
nb_emails_processed = 0
to_fetch = set(imap_ids)
batch_fetcher = IMAPBatchFetcher(self.src, imap_ids, self.error_report, imap_utils.GIMAPFetcher.GET_ALL_BUT_DATA, \
default_batch_size = gmvault_utils.get_conf_defaults().getint("General","nb_messages_per_batch",500))
#LAST Thing to do remove all found ids from imap_ids and if ids left add missing in report
for new_data in batch_fetcher:
for the_id in new_data:
if new_data.get(the_id, None):
LOG.debug("\nProcess imap id %s" % ( the_id ))
gid = new_data[the_id][imap_utils.GIMAPFetcher.GMAIL_ID]
the_dir = gmvault_utils.get_ym_from_datetime(new_data[the_id][imap_utils.GIMAPFetcher.IMAP_INTERNALDATE])
LOG.critical("Process email num %d (imap_id:%s) from %s." % (nb_emails_processed, the_id, the_dir))
#pass the dir and the ID
curr_metadata = GMVaulter.check_email_on_disk( self.gstorer , \
new_data[the_id][imap_utils.GIMAPFetcher.GMAIL_ID], \
the_dir)
#if on disk check that the data is not different
if curr_metadata:
LOG.debug("metadata for %s already exists. Check if different." % (gid))
if self._metadata_needs_update(curr_metadata, new_data[the_id]):
LOG.debug("Chat with imap id %s and gmail id %s has changed. Updated it." % (the_id, gid))
#restore everything at the moment
gid = self.gstorer.bury_metadata(new_data[the_id], local_dir = the_dir)
#update local index id gid => index per directory to be thought out
else:
LOG.debug("On disk metadata for %s is up to date." % (gid))
else:
try:
#get the data
LOG.debug("Get Data for %s." % (gid))
email_data = self.src.fetch(the_id, imap_utils.GIMAPFetcher.GET_DATA_ONLY )
new_data[the_id][imap_utils.GIMAPFetcher.EMAIL_BODY] = email_data[the_id][imap_utils.GIMAPFetcher.EMAIL_BODY]
# store data on disk within year month dir
gid = self.gstorer.bury_email(new_data[the_id], local_dir = the_dir, compress = compress)
#update local index id gid => index per directory to be thought out
LOG.debug("Create and store email with imap id %s, gmail id %s." % (the_id, gid))
except Exception, error:
handle_sync_imap_error(error, the_id, self.error_report, self.src) #do everything in this handler
nb_emails_processed += 1
#indicate every 50 messages the number of messages left to process
left_emails = (total_nb_emails_to_process - nb_emails_processed)
if (nb_emails_processed % 50) == 0 and (left_emails > 0):
elapsed = timer.elapsed() #elapsed time in seconds
LOG.critical("\n== Processed %d emails in %s. %d left to be stored (time estimate %s).==\n" % \
(nb_emails_processed, \
timer.seconds_to_human_time(elapsed), left_emails, \
timer.estimate_time_left(nb_emails_processed, elapsed, left_emails)))
# save id every 10 restored emails
if (nb_emails_processed % 10) == 0:
if gid:
self.save_lastid(self.OP_EMAIL_SYNC, gid)
else:
LOG.info("Could not process imap with id %s. Ignore it\n")
self.error_report['empty'].append((the_id, gid if gid else None))
to_fetch -= set(new_data.keys()) #remove all found keys from to_fetch set
for the_id in to_fetch:
# case when gmail IMAP server returns OK without any data whatsoever
# eg. imap uid 142221L ignore it
LOG.info("Could not process imap with id %s. Ignore it\n")
self.error_report['empty'].append((the_id, None))
LOG.critical("\nEmails synchronisation operation performed in %s.\n" % (timer.seconds_to_human_time(timer.elapsed())))
return imap_ids
def sync(self, imap_req = imap_utils.GIMAPFetcher.IMAP_ALL, compress_on_disk = True, db_cleaning = False, ownership_checking = True, \
restart = False, emails_only = False, chats_only = False):
"""
sync mode
"""
#check ownership to have one email per db unless user wants different
#save the owner if new
self._check_email_db_ownership(ownership_checking)
if not compress_on_disk:
LOG.critical("Disable compression when storing emails.")
if self.use_encryption:
LOG.critical("Encryption activated. All emails will be encrypted before to be stored.")
LOG.critical("Please take care of the encryption key stored in (%s) or all"\
" your stored emails will become unreadable." % (gmvault_db.GmailStorer.get_encryption_key_path(self.db_root_dir)))
self.timer.start() #start syncing emails
if not chats_only:
# backup emails
LOG.critical("Start emails synchronization.\n")
self._sync_emails(imap_req, compress = compress_on_disk, restart = restart)
else:
LOG.critical("Skip emails synchronization.\n")
if not emails_only:
# backup chats
LOG.critical("Start chats synchronization.\n")
self._sync_chats(imap_req, compress = compress_on_disk, restart = restart)
else:
LOG.critical("\nSkip chats synchronization.\n")
#delete supress emails from DB since last sync
if len(self.gstorer.get_db_owners()) <= 1:
self.check_clean_db(db_cleaning)
else:
LOG.critical("Deactivate database cleaning on a multi-owners Gmvault db.")
LOG.critical("Synchronisation operation performed in %s.\n" \
% (self.timer.seconds_to_human_time(self.timer.elapsed())))
#update number of reconnections
self.error_report["reconnections"] = self.src.total_nb_reconns
return self.error_report
def _delete_sync(self, imap_ids, db_gmail_ids, db_gmail_ids_info, msg_type):
"""
Delete emails from the database if necessary
imap_ids : all remote imap_ids to check
db_gmail_ids_info : info read from metadata
msg_type : email or chat
"""
# optimize nb of items
nb_items = self.NB_GRP_OF_ITEMS if len(imap_ids) >= self.NB_GRP_OF_ITEMS else len(imap_ids)
LOG.critical("Call Gmail to check the stored %ss against the Gmail %ss ids and see which ones have been deleted.\n\n"\
"This might take a few minutes ...\n" % (msg_type, msg_type))
#calculate the list elements to delete
#query nb_items items in one query to minimise number of imap queries
for group_imap_id in itertools.izip_longest(fillvalue=None, *[iter(imap_ids)]*nb_items):
# if None in list remove it
if None in group_imap_id:
group_imap_id = [ im_id for im_id in group_imap_id if im_id != None ]
#LOG.debug("Interrogate Gmail Server for %s" % (str(group_imap_id)))
data = self.src.fetch(group_imap_id, imap_utils.GIMAPFetcher.GET_GMAIL_ID)
# syntax for 2.7 set comprehension { data[key][imap_utils.GIMAPFetcher.GMAIL_ID] for key in data }
# need to create a list for 2.6
db_gmail_ids.difference_update([data[key][imap_utils.GIMAPFetcher.GMAIL_ID] for key in data ])
if len(db_gmail_ids) == 0:
break
LOG.critical("Will delete %s %s(s) from gmvault db.\n" % (len(db_gmail_ids), msg_type) )
for gm_id in db_gmail_ids:
LOG.critical("gm_id %s not in the Gmail server. Delete it." % (gm_id))
self.gstorer.delete_emails([(gm_id, db_gmail_ids_info[gm_id])], msg_type)
def get_gmails_ids_left_to_sync(self, op_type, imap_ids):
"""
Get the ids that still needs to be sync
Return a list of ids
"""
filename = self.OP_TO_FILENAME.get(op_type, None)
if not filename:
raise Exception("Bad Operation (%s) in save_last_id. This should not happen, send the error to the software developers." % (op_type))
filepath = '%s/%s_%s' % (self.gstorer.get_info_dir(), self.login, filename)
if not os.path.exists(filepath):
LOG.critical("last_id.sync file %s doesn't exist.\nSync the full list of backed up emails." %(filepath))
return imap_ids
json_obj = json.load(open(filepath, 'r'))
last_id = json_obj['last_id']
last_id_index = -1
new_gmail_ids = imap_ids
try:
#get imap_id from stored gmail_id
dummy = self.src.search({'type':'imap', 'req':'X-GM-MSGID %s' % (last_id)})
imap_id = dummy[0]
last_id_index = imap_ids.index(imap_id)
LOG.critical("Restart from gmail id %s (imap id %s)." % (last_id, imap_id))
new_gmail_ids = imap_ids[last_id_index:]
except Exception, _: #ignore any exception and try to get all ids in case of problems. pylint:disable=W0703
#element not in keys return current set of keys
LOG.critical("Error: Cannot restore from last restore gmail id. It is not in Gmail."\
" Sync the complete list of gmail ids requested from Gmail.")
return new_gmail_ids
def check_clean_db(self, db_cleaning):
"""
Check and clean the database (remove file that are not anymore in Gmail
"""
owners = self.gstorer.get_db_owners()
if not db_cleaning: #decouple the 2 conditions for activating cleaning
LOG.debug("db_cleaning is off so ignore removing deleted emails from disk.")
return
elif len(owners) > 1:
LOG.critical("Gmvault db hosting emails from different accounts: %s.\nCannot activate database cleaning." % (", ".join(owners)))
return
else:
LOG.critical("Look for emails/chats that are in the Gmvault db but not in Gmail servers anymore.\n")
#get gmail_ids from db
LOG.critical("Read all gmail ids from the Gmvault db. It might take a bit of time ...\n")
timer = gmvault_utils.Timer() # needed for enhancing the user information
timer.start()
db_gmail_ids_info = self.gstorer.get_all_existing_gmail_ids()
LOG.critical("Found %s email(s) in the Gmvault db.\n" % (len(db_gmail_ids_info)) )
#create a set of keys
db_gmail_ids = set(db_gmail_ids_info.keys())
# get all imap ids in All Mail
self.src.select_folder('ALLMAIL') #go to all mail
imap_ids = self.src.search(imap_utils.GIMAPFetcher.IMAP_ALL) #search all
LOG.debug("Got %s emails imap_id(s) from the Gmail Server." % (len(imap_ids)))
#delete supress emails from DB since last sync
self._delete_sync(imap_ids, db_gmail_ids, db_gmail_ids_info, 'email')
# get all chats ids
if self.src.is_visible('CHATS'):
db_gmail_ids_info = self.gstorer.get_all_chats_gmail_ids()
LOG.critical("Found %s chat(s) in the Gmvault db.\n" % (len(db_gmail_ids_info)) )
self.src.select_folder('CHATS') #go to chats
chat_ids = self.src.search(imap_utils.GIMAPFetcher.IMAP_ALL)
db_chat_ids = set(db_gmail_ids_info.keys())
LOG.debug("Got %s chat imap_ids from the Gmail Server." % (len(chat_ids)))
#delete supress emails from DB since last sync
self._delete_sync(chat_ids, db_chat_ids, db_gmail_ids_info , 'chat')
else:
LOG.critical("Chats IMAP Directory not visible on Gmail. Ignore deletion of chats.")
LOG.critical("\nDeletion checkup done in %s." % (timer.elapsed_human_time()))
def remote_sync(self):
"""
Sync with a remote source (IMAP mirror or cloud storage area)
"""
#sync remotely
pass
def save_lastid(self, op_type, gm_id):
"""
Save the passed gmid in last_id.restore
For the moment reopen the file every time
"""
filename = self.OP_TO_FILENAME.get(op_type, None)
if not filename:
raise Exception("Bad Operation (%s) in save_last_id. This should not happen, send the error to the software developers." % op_type)
filepath = '%s/%s_%s' % (self.gstorer.get_info_dir(), self.login, filename)
with open(filepath, 'w') as f:
json.dump({'last_id' : gm_id}, f)
def get_gmails_ids_left_to_restore(self, op_type, db_gmail_ids_info):
"""
Get the ids that still needs to be restored
Return a dict key = gm_id, val = directory
"""
filename = self.OP_TO_FILENAME.get(op_type, None)
if not filename:
raise Exception("Bad Operation (%s) in save_last_id. This should not happen, send the error to the software developers." % (op_type))
#filepath = '%s/%s_%s' % (gmvault_utils.get_home_dir_path(), self.login, filename)
filepath = '%s/%s_%s' % (self.gstorer.get_info_dir(), self.login, filename)
if not os.path.exists(filepath):
LOG.critical("last_id restore file %s doesn't exist.\nRestore the full list of backed up emails." %(filepath))
return db_gmail_ids_info
json_obj = json.load(open(filepath, 'r'))
last_id = json_obj['last_id']
last_id_index = -1
try:
keys = db_gmail_ids_info.keys()
last_id_index = keys.index(last_id)
LOG.critical("Restart from gmail id %s." % (last_id))
except ValueError, _:
#element not in keys return current set of keys
LOG.error("Cannot restore from last restore gmail id. It is not in the disk database.")
new_gmail_ids_info = collections_utils.OrderedDict()
if last_id_index != -1:
for key in db_gmail_ids_info.keys()[last_id_index+1:]:
new_gmail_ids_info[key] = db_gmail_ids_info[key]
else:
new_gmail_ids_info = db_gmail_ids_info
return new_gmail_ids_info
def restore(self, pivot_dir = None, extra_labels = [], restart = False, emails_only = False, chats_only = False): #pylint:disable=W0102
"""
Restore emails in a gmail account
"""
self.timer.start() #start restoring
#self.src.select_folder('ALLMAIL') #insure that Gmvault is in ALLMAIL
if not chats_only:
# backup emails
LOG.critical("Start emails restoration.\n")
if pivot_dir:
LOG.critical("Quick mode activated. Will only restore all emails since %s.\n" % (pivot_dir))
self.restore_emails(pivot_dir, extra_labels, restart)
else:
LOG.critical("Skip emails restoration.\n")
if not emails_only:
# backup chats
LOG.critical("Start chats restoration.\n")
self.restore_chats(extra_labels, restart)
else:
LOG.critical("Skip chats restoration.\n")
LOG.critical("Restore operation performed in %s.\n" \
% (self.timer.seconds_to_human_time(self.timer.elapsed())))
#update number of reconnections
self.error_report["reconnections"] = self.src.total_nb_reconns
return self.error_report
def common_restore(self, the_type, db_gmail_ids_info, extra_labels = [], restart = False): #pylint:disable=W0102
"""
common_restore
"""
if the_type == "chats":
msg = "chats"
op = self.OP_CHAT_RESTORE
elif the_type == "emails":
msg = "emails"
op = self.OP_EMAIL_RESTORE
LOG.critical("Restore %s in gmail account %s." % (msg, self.login) )
LOG.critical("Read %s info from %s gmvault-db." % (msg, self.db_root_dir))
LOG.critical("Total number of %s to restore %s." % (msg, len(db_gmail_ids_info.keys())))
if restart:
db_gmail_ids_info = self.get_gmails_ids_left_to_restore(op, db_gmail_ids_info)
total_nb_emails_to_restore = len(db_gmail_ids_info)
LOG.critical("Got all %s id left to restore. Still %s %s to do.\n" % (msg, total_nb_emails_to_restore, msg) )
existing_labels = set() #set of existing labels to not call create_gmail_labels all the time
nb_emails_restored = 0 #to count nb of emails restored
timer = gmvault_utils.Timer() # needed for enhancing the user information
timer.start()
for gm_id in db_gmail_ids_info:
LOG.critical("Restore %s with id %s." % (msg, gm_id))
email_meta, email_data = self.unbury_email(gm_id)
LOG.debug("Unburied %s with id %s." % (msg, gm_id))
#labels for this email => real_labels U extra_labels
labels = set(email_meta[self.gstorer.LABELS_K])
labels = labels.union(extra_labels)
# get list of labels to create
labels_to_create = [ label for label in labels if label not in existing_labels]
#create the non existing labels
if len(labels_to_create) > 0:
LOG.debug("Labels creation tentative for %s with id %s." % (msg, gm_id))
existing_labels = self.src.create_gmail_labels(labels_to_create, existing_labels)
try:
#restore email
self.src.push_email(email_data, \
email_meta[self.gstorer.FLAGS_K] , \
email_meta[self.gstorer.INT_DATE_K], \
labels)
LOG.debug("Pushed %s with id %s." % (msg, gm_id))
nb_emails_restored += 1
#indicate every 10 messages the number of messages left to process
left_emails = (total_nb_emails_to_restore - nb_emails_restored)
if (nb_emails_restored % 50) == 0 and (left_emails > 0):
elapsed = timer.elapsed() #elapsed time in seconds
LOG.critical("\n== Processed %d %s in %s. %d left to be restored (time estimate %s).==\n" % \
(nb_emails_restored, msg, timer.seconds_to_human_time(elapsed), \
left_emails, timer.estimate_time_left(nb_emails_restored, elapsed, left_emails)))
# save id every 20 restored emails
if (nb_emails_restored % 10) == 0:
self.save_lastid(self.OP_CHAT_RESTORE, gm_id)
except imaplib.IMAP4.abort, abort:
# if this is a Gmvault SSL Socket error quarantine the email and continue the restore
if str(abort).find("=> Gmvault ssl socket error: EOF") >= 0:
LOG.critical("Quarantine %s with gm id %s from %s. "\
"GMAIL IMAP cannot restore it: err={%s}" % (msg, gm_id, db_gmail_ids_info[gm_id], str(abort)))
self.gstorer.quarantine_email(gm_id)
self.error_report['emails_in_quarantine'].append(gm_id)
LOG.critical("Disconnecting and reconnecting to restart cleanly.")
self.src.reconnect() #reconnect
else:
raise abort
except imaplib.IMAP4.error, err:
LOG.error("Catched IMAP Error %s" % (str(err)))
LOG.exception(err)
#When the email cannot be read from Database because it was empty when returned by gmail imap
#quarantine it.
if str(err) == "APPEND command error: BAD ['Invalid Arguments: Unable to parse message']":
LOG.critical("Quarantine %s with gm id %s from %s. GMAIL IMAP cannot restore it:"\
" err={%s}" % (msg, gm_id, db_gmail_ids_info[gm_id], str(err)))
self.gstorer.quarantine_email(gm_id)
self.error_report['emails_in_quarantine'].append(gm_id)
else:
raise err
except imap_utils.PushEmailError, p_err:
LOG.error("Catch the following exception %s" % (str(p_err)))
LOG.exception(p_err)
if p_err.quarantined():
LOG.critical("Quarantine %s with gm id %s from %s. GMAIL IMAP cannot restore it:"\
" err={%s}" % (msg, gm_id, db_gmail_ids_info[gm_id], str(p_err)))
self.gstorer.quarantine_email(gm_id)
self.error_report['emails_in_quarantine'].append(gm_id)
else:
raise p_err
except Exception, err:
LOG.error("Catch the following exception %s" % (str(err)))
LOG.exception(err)
raise err
return self.error_report
def old_restore_chats(self, extra_labels = [], restart = False): #pylint:disable=W0102
"""
restore chats
"""
LOG.critical("Restore chats in gmail account %s." % (self.login) )
LOG.critical("Read chats info from %s gmvault-db." % (self.db_root_dir))
#for the restore (save last_restored_id in .gmvault/last_restored_id
#get gmail_ids from db
db_gmail_ids_info = self.gstorer.get_all_chats_gmail_ids()
LOG.critical("Total number of chats to restore %s." % (len(db_gmail_ids_info.keys())))
if restart:
db_gmail_ids_info = self.get_gmails_ids_left_to_restore(self.OP_CHAT_RESTORE, db_gmail_ids_info)
total_nb_emails_to_restore = len(db_gmail_ids_info)
LOG.critical("Got all chats id left to restore. Still %s chats to do.\n" % (total_nb_emails_to_restore) )
existing_labels = set() #set of existing labels to not call create_gmail_labels all the time
nb_emails_restored = 0 #to count nb of emails restored
timer = gmvault_utils.Timer() # needed for enhancing the user information
timer.start()
for gm_id in db_gmail_ids_info:
LOG.critical("Restore chat with id %s." % (gm_id))
email_meta, email_data = self.gstorer.unbury_email(gm_id)
LOG.debug("Unburied chat with id %s." % (gm_id))
#labels for this email => real_labels U extra_labels
labels = set(email_meta[self.gstorer.LABELS_K])
labels = labels.union(extra_labels)
# get list of labels to create
labels_to_create = [ label for label in labels if label not in existing_labels]
#create the non existing labels
if len(labels_to_create) > 0:
LOG.debug("Labels creation tentative for chat with id %s." % (gm_id))
existing_labels = self.src.create_gmail_labels(labels_to_create, existing_labels)
try:
#restore email
self.src.push_email(email_data, \
email_meta[self.gstorer.FLAGS_K] , \
email_meta[self.gstorer.INT_DATE_K], \
labels)
LOG.debug("Pushed chat with id %s." % (gm_id))
nb_emails_restored += 1
#indicate every 10 messages the number of messages left to process
left_emails = (total_nb_emails_to_restore - nb_emails_restored)
if (nb_emails_restored % 50) == 0 and (left_emails > 0):
elapsed = timer.elapsed() #elapsed time in seconds
LOG.critical("\n== Processed %d chats in %s. %d left to be restored (time estimate %s).==\n" % \
(nb_emails_restored, timer.seconds_to_human_time(elapsed), \
left_emails, timer.estimate_time_left(nb_emails_restored, elapsed, left_emails)))
# save id every 20 restored emails
if (nb_emails_restored % 10) == 0:
self.save_lastid(self.OP_CHAT_RESTORE, gm_id)
except imaplib.IMAP4.abort, abort:
# if this is a Gmvault SSL Socket error quarantine the email and continue the restore
if str(abort).find("=> Gmvault ssl socket error: EOF") >= 0:
LOG.critical("Quarantine email with gm id %s from %s. "\
"GMAIL IMAP cannot restore it: err={%s}" % (gm_id, db_gmail_ids_info[gm_id], str(abort)))
self.gstorer.quarantine_email(gm_id)
self.error_report['emails_in_quarantine'].append(gm_id)
LOG.critical("Disconnecting and reconnecting to restart cleanly.")
self.src.reconnect() #reconnect
else:
raise abort
except imaplib.IMAP4.error, err:
LOG.error("Catched IMAP Error %s" % (str(err)))
LOG.exception(err)
#When the email cannot be read from Database because it was empty when returned by gmail imap
#quarantine it.
if str(err) == "APPEND command error: BAD ['Invalid Arguments: Unable to parse message']":
LOG.critical("Quarantine email with gm id %s from %s. GMAIL IMAP cannot restore it:"\
" err={%s}" % (gm_id, db_gmail_ids_info[gm_id], str(err)))
self.gstorer.quarantine_email(gm_id)
self.error_report['emails_in_quarantine'].append(gm_id)
else:
raise err
except imap_utils.PushEmailError, p_err:
LOG.error("Catch the following exception %s" % (str(p_err)))
LOG.exception(p_err)
if p_err.quarantined():
LOG.critical("Quarantine email with gm id %s from %s. GMAIL IMAP cannot restore it:"\
" err={%s}" % (gm_id, db_gmail_ids_info[gm_id], str(p_err)))
self.gstorer.quarantine_email(gm_id)
self.error_report['emails_in_quarantine'].append(gm_id)
else:
raise p_err
except Exception, err:
LOG.error("Catch the following exception %s" % (str(err)))
LOG.exception(err)
raise err
return self.error_report
def restore_chats(self, extra_labels = [], restart = False): #pylint:disable=W0102
"""
restore chats
"""
LOG.critical("Restore chats in gmail account %s." % (self.login) )
LOG.critical("Read chats info from %s gmvault-db." % (self.db_root_dir))
#for the restore (save last_restored_id in .gmvault/last_restored_id
#get gmail_ids from db
db_gmail_ids_info = self.gstorer.get_all_chats_gmail_ids()
LOG.critical("Total number of chats to restore %s." % (len(db_gmail_ids_info.keys())))
if restart:
db_gmail_ids_info = self.get_gmails_ids_left_to_restore(self.OP_CHAT_RESTORE, db_gmail_ids_info)
total_nb_emails_to_restore = len(db_gmail_ids_info)
LOG.critical("Got all chats id left to restore. Still %s chats to do.\n" % (total_nb_emails_to_restore) )
existing_labels = set() #set of existing labels to not call create_gmail_labels all the time
nb_emails_restored = 0 #to count nb of emails restored
labels_to_apply = collections_utils.SetMultimap()
#get all mail folder name
all_mail_name = self.src.get_folder_name("ALLMAIL")
# go to DRAFTS folder because if you are in ALL MAIL when uploading emails it is very slow
folder_def_location = gmvault_utils.get_conf_defaults().get("General","restore_default_location", "DRAFTS")
self.src.select_folder(folder_def_location)
timer = gmvault_utils.Timer() # local timer for restore emails
timer.start()
nb_items = gmvault_utils.get_conf_defaults().get_int("General","nb_messages_per_restore_batch", 100)
for group_imap_ids in itertools.izip_longest(fillvalue=None, *[iter(db_gmail_ids_info)]*nb_items):
last_id = group_imap_ids[-1] #will be used to save the last id
#remove all None elements from group_imap_ids
group_imap_ids = itertools.ifilter(lambda x: x != None, group_imap_ids)
labels_to_create = set() #create label set
labels_to_create.update(extra_labels) # add extra labels to applied to all emails
LOG.critical("Pushing the chats content of the current batch of %d emails.\n" % (nb_items))
# unbury the metadata for all these emails
for gm_id in group_imap_ids:
email_meta, email_data = self.gstorer.unbury_email(gm_id)
LOG.critical("Pushing chat content with id %s." % (gm_id))
LOG.debug("Subject = %s." % (email_meta[self.gstorer.SUBJECT_K]))
try:
# push data in gmail account and get uids
imap_id = self.src.push_data(all_mail_name, email_data, \
email_meta[self.gstorer.FLAGS_K] , \
email_meta[self.gstorer.INT_DATE_K] )
#labels for this email => real_labels U extra_labels
labels = set(email_meta[self.gstorer.LABELS_K])
# add in the labels_to_create struct
for label in labels:
LOG.debug("label = %s\n" % (label))
labels_to_apply[str(label)] = imap_id
# get list of labels to create (do a union with labels to create)
labels_to_create.update([ label for label in labels if label not in existing_labels])
except Exception, err:
handle_restore_imap_error(err, gm_id, db_gmail_ids_info, self)
#create the non existing labels and update existing labels
if len(labels_to_create) > 0:
LOG.debug("Labels creation tentative for chat with id %s." % (gm_id))
existing_labels = self.src.create_gmail_labels(labels_to_create, existing_labels)
# associate labels with emails
LOG.critical("Applying labels to the current batch of %d emails" % (nb_items))
try:
LOG.debug("Changing directory. Going into ALLMAIL")
self.src.select_folder('ALLMAIL') #go to ALL MAIL to make STORE usable
for label in labels_to_apply.keys():
self.src.apply_labels_to(labels_to_apply[label], [label])
except Exception, err:
LOG.error("Problem when applying labels %s to the following ids: %s" %(label, labels_to_apply[label]), err)
if isinstance(err, imaplib.IMAP4.abort) and str(err).find("=> Gmvault ssl socket error: EOF") >= 0:
# if this is a Gmvault SSL Socket error quarantine the email and continue the restore
LOG.critical("Quarantine email with gm id %s from %s. GMAIL IMAP cannot restore it:"\
" err={%s}" % (gm_id, db_gmail_ids_info[gm_id], str(err)))
self.gstorer.quarantine_email(gm_id)
self.error_report['emails_in_quarantine'].append(gm_id)
LOG.critical("Disconnecting and reconnecting to restart cleanly.")
self.src.reconnect() #reconnect
else:
raise err
finally:
self.src.select_folder(folder_def_location) # go back to an empty DIR (Drafts) to be fast
labels_to_apply = collections_utils.SetMultimap() #reset label to apply
nb_emails_restored += nb_items
#indicate every 10 messages the number of messages left to process
left_emails = (total_nb_emails_to_restore - nb_emails_restored)
if (left_emails > 0):
elapsed = timer.elapsed() #elapsed time in seconds
LOG.critical("\n== Processed %d chats in %s. %d left to be restored "\
"(time estimate %s).==\n" % \
(nb_emails_restored, timer.seconds_to_human_time(elapsed), \
left_emails, timer.estimate_time_left(nb_emails_restored, elapsed, left_emails)))
# save id every nb_items restored emails
# add the last treated gm_id
self.save_lastid(self.OP_EMAIL_RESTORE, last_id)
return self.error_report
def restore_emails(self, pivot_dir = None, extra_labels = [], restart = False):
"""
restore emails in a gmail account using batching to group restore
If you are not in "All Mail" Folder, it is extremely fast to push emails.
But it is not possible to reapply labels if you are not in All Mail because the uid which is returned
is dependant on the folder. On the other hand, you can restore labels in batch which would help gaining lots of time.
The idea is to get a batch of 50 emails and push them all in the mailbox one by one and get the uid for each of them.
Then create a dict of labels => uid_list and for each label send a unique store command after having changed dir
"""
LOG.critical("Restore emails in gmail account %s." % (self.login) )
LOG.critical("Read email info from %s gmvault-db." % (self.db_root_dir))
#get gmail_ids from db
db_gmail_ids_info = self.gstorer.get_all_existing_gmail_ids(pivot_dir)
LOG.critical("Total number of elements to restore %s." % (len(db_gmail_ids_info.keys())))
if restart:
db_gmail_ids_info = self.get_gmails_ids_left_to_restore(self.OP_EMAIL_RESTORE, db_gmail_ids_info)
total_nb_emails_to_restore = len(db_gmail_ids_info)
LOG.critical("Got all emails id left to restore. Still %s emails to do.\n" % (total_nb_emails_to_restore) )
existing_labels = set() #set of existing labels to not call create_gmail_labels all the time
nb_emails_restored = 0 #to count nb of emails restored
labels_to_apply = collections_utils.SetMultimap()
#get all mail folder name
all_mail_name = self.src.get_folder_name("ALLMAIL")
# go to DRAFTS folder because if you are in ALL MAIL when uploading emails it is very slow
folder_def_location = gmvault_utils.get_conf_defaults().get("General","restore_default_location", "DRAFTS")
self.src.select_folder(folder_def_location)
timer = gmvault_utils.Timer() # local timer for restore emails
timer.start()
nb_items = gmvault_utils.get_conf_defaults().get_int("General","nb_messages_per_restore_batch", 5)
for group_imap_ids in itertools.izip_longest(fillvalue=None, *[iter(db_gmail_ids_info)]*nb_items):
last_id = group_imap_ids[-1] #will be used to save the last id
#remove all None elements from group_imap_ids
group_imap_ids = itertools.ifilter(lambda x: x != None, group_imap_ids)
labels_to_create = set() #create label set
labels_to_create.update(extra_labels) # add extra labels to applied to all emails
LOG.critical("Pushing the email content of the current batch of %d emails.\n" % (nb_items))
# unbury the metadata for all these emails
for gm_id in group_imap_ids:
email_meta, email_data = self.gstorer.unbury_email(gm_id)
LOG.critical("Pushing email body with id %s." % (gm_id))
LOG.debug("Subject = %s." % (email_meta[self.gstorer.SUBJECT_K]))
try:
# push data in gmail account and get uids
imap_id = self.src.push_data(all_mail_name, email_data, \
email_meta[self.gstorer.FLAGS_K] , \
email_meta[self.gstorer.INT_DATE_K] )
#labels for this email => real_labels U extra_labels
labels = set(email_meta[self.gstorer.LABELS_K])
# add in the labels_to_create struct
for label in labels:
LOG.debug("label = %s\n" % (label))
labels_to_apply[str(label)] = imap_id
# get list of labels to create (do a union with labels to create)
labels_to_create.update([ label for label in labels if label not in existing_labels])
except Exception, err:
handle_restore_imap_error(err, gm_id, db_gmail_ids_info, self)
#create the non existing labels and update existing labels
if len(labels_to_create) > 0:
LOG.debug("Labels creation tentative for email with id %s." % (gm_id))
existing_labels = self.src.create_gmail_labels(labels_to_create, existing_labels)
# associate labels with emails
LOG.critical("Applying labels to the current batch of %d emails" % (nb_items))
try:
LOG.debug("Changing directory. Going into ALLMAIL")
t = gmvault_utils.Timer()
t.start()
self.src.select_folder('ALLMAIL') #go to ALL MAIL to make STORE usable
LOG.debug("Changed dir. Operation time = %s ms" % (t.elapsed_ms()))
for label in labels_to_apply.keys():
self.src.apply_labels_to(labels_to_apply[label], [label])
except Exception, err:
LOG.error("Problem when applying labels %s to the following ids: %s" %(label, labels_to_apply[label]), err)
if isinstance(err, imaplib.IMAP4.abort) and str(err).find("=> Gmvault ssl socket error: EOF") >= 0:
# if this is a Gmvault SSL Socket error quarantine the email and continue the restore
LOG.critical("Quarantine email with gm id %s from %s. GMAIL IMAP cannot restore it:"\
" err={%s}" % (gm_id, db_gmail_ids_info[gm_id], str(err)))
self.gstorer.quarantine_email(gm_id)
self.error_report['emails_in_quarantine'].append(gm_id)
LOG.critical("Disconnecting and reconnecting to restart cleanly.")
self.src.reconnect() #reconnect
else:
raise err
finally:
self.src.select_folder(folder_def_location) # go back to an empty DIR (Drafts) to be fast
labels_to_apply = collections_utils.SetMultimap() #reset label to apply
nb_emails_restored += nb_items
#indicate every 10 messages the number of messages left to process
left_emails = (total_nb_emails_to_restore - nb_emails_restored)
if (left_emails > 0):
elapsed = timer.elapsed() #elapsed time in seconds
LOG.critical("\n== Processed %d emails in %s. %d left to be restored "\
"(time estimate %s).==\n" % \
(nb_emails_restored, timer.seconds_to_human_time(elapsed), \
left_emails, timer.estimate_time_left(nb_emails_restored, elapsed, left_emails)))
# save id every 50 restored emails
# add the last treated gm_id
self.save_lastid(self.OP_EMAIL_RESTORE, last_id)
return self.error_report
def old_restore_emails(self, pivot_dir = None, extra_labels = [], restart = False):
"""
restore emails in a gmail account using batching to group restore
If you are not in "All Mail" Folder, it is extremely fast to push emails.
But it is not possible to reapply labels if you are not in All Mail because the uid which is returned
is dependant on the folder. On the other hand, you can restore labels in batch which would help gaining lots of time.
The idea is to get a batch of 50 emails and push them all in the mailbox one by one and get the uid for each of them.
Then create a dict of labels => uid_list and for each label send a unique store command after having changed dir
"""
LOG.critical("Restore emails in gmail account %s." % (self.login) )
LOG.critical("Read email info from %s gmvault-db." % (self.db_root_dir))
#get gmail_ids from db
db_gmail_ids_info = self.gstorer.get_all_existing_gmail_ids(pivot_dir)
LOG.critical("Total number of elements to restore %s." % (len(db_gmail_ids_info.keys())))
if restart:
db_gmail_ids_info = self.get_gmails_ids_left_to_restore(self.OP_EMAIL_RESTORE, db_gmail_ids_info)
total_nb_emails_to_restore = len(db_gmail_ids_info)
LOG.critical("Got all emails id left to restore. Still %s emails to do.\n" % (total_nb_emails_to_restore) )
existing_labels = set() #set of existing labels to not call create_gmail_labels all the time
nb_emails_restored = 0 #to count nb of emails restored
labels_to_apply = collections_utils.SetMultimap()
new_conn = self.src.spawn_connection()
job_queue = Queue()
job_nb = 1
timer = gmvault_utils.Timer() # local timer for restore emails
timer.start()
labelling_thread = LabellingThread(group=None, target=None, name="LabellingThread", args=(), kwargs={"queue" : job_queue, \
"conn" : new_conn, \
"gmvaulter" : self, \
"total_nb_emails_to_restore": total_nb_emails_to_restore,
"timer": timer}, \
verbose=None)
labelling_thread.start()
#get all mail folder name
all_mail_name = self.src.get_folder_name("ALLMAIL")
# go to DRAFTS folder because if you are in ALL MAIL when uploading emails it is very slow
folder_def_location = gmvault_utils.get_conf_defaults().get("General","restore_default_location", "DRAFTS")
self.src.select_folder(folder_def_location)
nb_items = gmvault_utils.get_conf_defaults().get_int("General","nb_messages_per_restore_batch", 10)
for group_imap_ids in itertools.izip_longest(fillvalue=None, *[iter(db_gmail_ids_info)]*nb_items):
last_id = group_imap_ids[-1] #will be used to save the last id
#remove all None elements from group_imap_ids
group_imap_ids = itertools.ifilter(lambda x: x != None, group_imap_ids)
labels_to_create = set() #create label set
labels_to_create.update(extra_labels) # add extra labels to applied to all emails
LOG.critical("Pushing the email content of the current batch of %d emails.\n" % (nb_items))
# unbury the metadata for all these emails
for gm_id in group_imap_ids:
email_meta, email_data = self.gstorer.unbury_email(gm_id)
LOG.critical("Pushing email body with id %s." % (gm_id))
LOG.debug("Subject = %s." % (email_meta[self.gstorer.SUBJECT_K]))
try:
# push data in gmail account and get uids
imap_id = self.src.push_data(all_mail_name, email_data, \
email_meta[self.gstorer.FLAGS_K] , \
email_meta[self.gstorer.INT_DATE_K] )
#labels for this email => real_labels U extra_labels
labels = set(email_meta[self.gstorer.LABELS_K])
# add in the labels_to_create struct
for label in labels:
LOG.debug("label = %s\n" % (label))
labels_to_apply[str(label)] = imap_id
# get list of labels to create (do a union with labels to create)
labels_to_create.update([ label for label in labels if label not in existing_labels])
except Exception, err:
handle_restore_imap_error(err, gm_id, db_gmail_ids_info, self)
#create the non existing labels and update existing labels
if len(labels_to_create) > 0:
LOG.debug("Labels creation tentative for email with id %s." % (gm_id))
existing_labels = self.src.create_gmail_labels(labels_to_create, existing_labels)
job_queue.put(LabelJob(1, "LabellingJob-%d" % (job_nb), labels_to_apply , last_id, nb_items, None))
job_nb +=1
labels_to_apply = collections_utils.SetMultimap()
return self.error_report
class LabelJob(object):
def __init__(self, priority, name, labels_to_create, last_id, nb_items, imapid_gmid_map):
self.priority = priority
self.labels = labels_to_create
self.nb_items = nb_items
self.last_id = last_id
self.imap_to_gm = imapid_gmid_map
self.name = name
return
def type(self):
return "LABELJOB"
def __cmp__(self, other):
return cmp(self.priority, other.priority)
class StopJob(object):
def __init__(self, priority):
self.priority = priority
return
def type(self):
return "STOPJOB"
def __cmp__(self, other):
return cmp(self.priority, other.priority)
class LabellingThread(Process):
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
Process.__init__(self)
self.args = args
self.kwargs = kwargs
self.queue = kwargs.get("queue", None)
self.src = kwargs.get("conn", None)
self.gmvaulter = kwargs.get("gmvaulter", None)
self.total_nb_emails_to_restore = kwargs.get("total_nb_emails_to_restore", None)
self.timer = kwargs.get("timer", None)
self.nb_emails_restored = 0
def run(self):
"""
Listen to the queue
When job label, apply labels to emails and save last_id
If error quarantine it and continue (if 15 consecutive errors stop).
"""
LOG.debug("Labelling Thread. Changing directory. Going into ALLMAIL")
folder_def_location = gmvault_utils.get_conf_defaults().get("General","restore_default_location", "DRAFTS")
t = gmvault_utils.Timer()
t.start()
self.src.select_folder('ALLMAIL') #go to ALL MAIL to make STORE usable
LOG.debug("Changed dir. Operation time = %s ms" % (t.elapsed_ms()))
running = True
while running:
job =self.queue.get(block = True, timeout = None)
LOG.critical("==== (LabellingThread) ====. Received job %s ====" % (job.name))
if job.type() == "LABELJOB":
# associate labels with emails
labels_to_apply = job.labels
imap_to_gm = job.imap_to_gm
#LOG.critical("Applying labels to the current batch of %d emails" % (job.nb_items))
try:
#for i in range(1,10):
# LOG.critical("Hello")
# #time.sleep(1)
for label in labels_to_apply.keys():
LOG.critical("Apply %s to %s" % (label, labels_to_apply[label]))
self.src.apply_labels_to(labels_to_apply[label], [label])
except Exception, err:
LOG.error("Problem when applying labels %s to the following ids: %s" %(label, labels_to_apply[label]), err)
finally:
#self.queue.task_done()
pass
#to be moved
self.nb_emails_restored += job.nb_items
#indicate every 10 messages the number of messages left to process
left_emails = (self.total_nb_emails_to_restore - self.nb_emails_restored)
if (left_emails > 0):
elapsed = self.timer.elapsed() #elapsed time in seconds
LOG.critical("\n== Processed %d emails in %s. %d left to be restored "\
"(time estimate %s).==\n" % \
(self.nb_emails_restored, self.timer.seconds_to_human_time(elapsed), \
left_emails, self.timer.estimate_time_left(self.nb_emails_restored, elapsed, left_emails)))
# save id every 50 restored emails
# add the last treated gm_id
self.gmvaulter.save_lastid(GMVaulter.OP_EMAIL_RESTORE, job.last_id)
elif job.type() == "STOPJOB":
self.queue.task_done()
running = False
#self.src.select_folder(folder_def_location)
LOG.critical("==== (LabellingThread) ====. End of job %s ====" % (job.name))
| 77,477
|
Python
|
.py
| 1,221
| 44.898444
| 167
| 0.570936
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,318
|
json_tests.py
|
gaubert_gmvault/src/sandbox/json_tests.py
|
# -*- coding: utf-8 -*-
"""
Created on Nov 27, 2012
@author: aubert
"""
import json
string_to_test = u"Чаты"
labels = [ 0, string_to_test ]
def format(self, record):
"""
Formats a record with the given formatter. If no formatter
is set, the record message is returned. Generally speaking the
return value is most likely a unicode string, but nothing in
the handler interface requires a formatter to return a unicode
string.
The combination of a handler and formatter might have the
formatter return an XML element tree for example.
"""
# Decode the message to support non-ascii characters
# We must choose the charset manually
for record_charset in 'UTF-8', 'US-ASCII', 'ISO-8859-1':
try:
record.message = record.message.decode(record_charset)
self.encoding = record_charset
except UnicodeError:
pass
else:
break
if self.formatter is None:
return record.message
return self.formatter(record, self)
def data_to_test():
"""
data to test
"""
meta_obj = { 'labels' : labels }
with open("/tmp/test.json", 'w') as f:
json.dump(meta_obj, f)
print("Data stored")
with open("/tmp/test.json") as f:
metadata = json.load(f)
new_labels = []
for label in metadata['labels']:
if isinstance(label, (int, long, float, complex)):
label = unicode(str(label))
new_labels.append(label)
metadata['labels'] = new_labels
print("metadata = %s\n" % metadata)
print("type(metadata['labels'][0]) = %s" % (type(metadata['labels'][0])))
print("metadata['labels'][0] = %s" % (metadata['labels'][0]))
print("type(metadata['labels'][1]) = %s" % (type(metadata['labels'][1])))
print("metadata['labels'][1] = %s" % (metadata['labels'][1]))
def header_regexpr_test():
"""
"""
#the_str = 'X-Gmail-Received: cef1a177794b2b6282967d22bcc2b6f49447a70d\r\nMessage-ID: <8b230a7105082305316d9c1a54@mail.gmail.com>\r\nSubject: Hessian ssl\r\n\r\n'
the_str = 'Message-ID: <8b230a7105082305316d9c1a54@mail.gmail.com>\r\nX-Gmail-Received: cef1a177794b2b6282967d22bcc2b6f49447a70d\r\nSubject: Hessian ssl\r\n\r\n'
import gmv.gmvault_db as gmvault_db
matched = gmvault_db.GmailStorer.HF_SUB_RE.search(the_str)
if matched:
subject = matched.group('subject')
print("subject matched = <%s>\n" % (subject))
# look for a msg id
matched = gmvault_db.GmailStorer.HF_MSGID_RE.search(the_str)
if matched:
msgid = matched.group('msgid')
print("msgid matched = <%s>\n" % (msgid))
matched = gmvault_db.GmailStorer.HF_XGMAIL_RECV_RE.search(the_str)
if matched:
received = matched.group('received').strip()
print("matched = <%s>\n" % (received))
if __name__ == '__main__':
header_regexpr_test()
#data_to_test()
| 3,137
|
Python
|
.py
| 74
| 33.432432
| 166
| 0.6213
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,319
|
python_api_tests.py
|
gaubert_gmvault/src/sandbox/python_api_tests.py
|
#!/usr/bin/python
import argparse
import httplib2
from apiclient.discovery import build
from oauth2client.client import flow_from_clientsecrets
from oauth2client.file import Storage
from oauth2client.tools import run_flow, argparser
# Parse the command-line arguments (e.g. --noauth_local_webserver)
parser = argparse.ArgumentParser(parents=[argparser])
flags = parser.parse_args()
# Path to the client_secret.json file downloaded from the Developer Console
#CLIENT_SECRET_FILE = 'h:/.client_secret.json'
CLIENT_SECRET_FILE = '/home/gmv/.client_secret.json'
# Check https://developers.google.com/gmail/api/auth/scopes
# for all available scopes
OAUTH_SCOPE = 'https://www.googleapis.com/auth/gmail.readonly'
# Location of the credentials storage file
STORAGE = Storage('gmail.storage')
# Start the OAuth flow to retrieve credentials
flow = flow_from_clientsecrets(CLIENT_SECRET_FILE, scope=OAUTH_SCOPE)
http = httplib2.Http()
# Try to retrieve credentials from storage or run the flow to generate them
credentials = STORAGE.get()
if credentials is None or credentials.invalid:
credentials = run_flow(flow, STORAGE, flags, http=http)
# Authorize the httplib2.Http object with our credentials
http = credentials.authorize(http)
# Build the Gmail service from discovery
gmail_service = build('gmail', 'v1', http=http)
# Retrieve a page of threads
threads = gmail_service.users().threads().list(userId='me').execute()
# Print ID for each thread
if threads['threads']:
for thread in threads['threads']:
print 'Thread ID: %s' % (thread['id'])
| 1,557
|
Python
|
.py
| 35
| 42.914286
| 75
| 0.789404
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,320
|
oauth2_runner.py
|
gaubert_gmvault/src/sandbox/oauth2_runner.py
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performs client tasks for testing IMAP OAuth2 authentication.
To use this script, you'll need to have registered with Google as an OAuth
application and obtained an OAuth client ID and client secret.
See http://code.google.com/apis/accounts/docs/OAuth2.html for instructions on
registering and for documentation of the APIs invoked by this code.
This script has 3 modes of operation.
1. The first mode is used to generate and authorize an OAuth2 token, the
first step in logging in via OAuth2.
oauth2 --user=xxx@gmail.com \
--client_id=1038[...].apps.googleusercontent.com \
--client_secret=VWFn8LIKAMC-MsjBMhJeOplZ \
--generate_oauth2_token
The script will converse with Google and generate an oauth request
token, then present you with a URL you should visit in your browser to
authorize the token. Once you get the verification code from the Google
website, enter it into the script to get your OAuth access token. The output
from this command will contain the access token, a refresh token, and some
metadata about the tokens. The access token can be used until it expires, and
the refresh token lasts indefinitely, so you should record these values for
reuse.
2. The script will generate new access tokens using a refresh token.
oauth2 --user=xxx@gmail.com \
--client_id=1038[...].apps.googleusercontent.com \
--client_secret=VWFn8LIKAMC-MsjBMhJeOplZ \
--refresh_token=1/Yzm6MRy4q1xi7Dx2DuWXNgT6s37OrP_DW_IoyTum4YA
3. The script will generate an OAuth2 string that can be fed
directly to IMAP or SMTP. This is triggered with the --generate_oauth2_string
option.
oauth2 --generate_oauth2_string --user=xxx@gmail.com \
--access_token=ya29.AGy[...]ezLg
The output of this mode will be a base64-encoded string. To use it, connect to a
IMAPFE and pass it as the second argument to the AUTHENTICATE command.
a AUTHENTICATE XOAUTH2 a9sha9sfs[...]9dfja929dk==
"""
import base64
import imaplib
import json
from optparse import OptionParser
import smtplib
import sys
import urllib
def SetupOptionParser():
# Usage message is the module's docstring.
parser = OptionParser(usage=__doc__)
parser.add_option('--generate_oauth2_token',
action='store_true',
dest='generate_oauth2_token',
help='generates an OAuth2 token for testing')
parser.add_option('--generate_oauth2_string',
action='store_true',
dest='generate_oauth2_string',
help='generates an initial client response string for '
'OAuth2')
parser.add_option('--client_id',
default=None,
help='Client ID of the application that is authenticating. '
'See OAuth2 documentation for details.')
parser.add_option('--client_secret',
default=None,
help='Client secret of the application that is '
'authenticating. See OAuth2 documentation for '
'details.')
parser.add_option('--access_token',
default=None,
help='OAuth2 access token')
parser.add_option('--refresh_token',
default=None,
help='OAuth2 refresh token')
parser.add_option('--scope',
default='https://mail.google.com/',
help='scope for the access token. Multiple scopes can be '
'listed separated by spaces with the whole argument '
'quoted.')
parser.add_option('--test_imap_authentication',
action='store_true',
dest='test_imap_authentication',
help='attempts to authenticate to IMAP')
parser.add_option('--test_smtp_authentication',
action='store_true',
dest='test_smtp_authentication',
help='attempts to authenticate to SMTP')
parser.add_option('--user',
default=None,
help='email address of user whose account is being '
'accessed')
return parser
# The URL root for accessing Google Accounts.
GOOGLE_ACCOUNTS_BASE_URL = 'https://accounts.google.com'
# Hardcoded dummy redirect URI for non-web apps.
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
def AccountsUrl(command):
"""Generates the Google Accounts URL.
Args:
command: The command to execute.
Returns:
A URL for the given command.
"""
return '%s/%s' % (GOOGLE_ACCOUNTS_BASE_URL, command)
def UrlEscape(text):
# See OAUTH 5.1 for a definition of which characters need to be escaped.
return urllib.quote(text, safe='~-._')
def UrlUnescape(text):
# See OAUTH 5.1 for a definition of which characters need to be escaped.
return urllib.unquote(text)
def FormatUrlParams(params):
"""Formats parameters into a URL query string.
Args:
params: A key-value map.
Returns:
A URL query string version of the given parameters.
"""
param_fragments = []
for param in sorted(params.iteritems(), key=lambda x: x[0]):
param_fragments.append('%s=%s' % (param[0], UrlEscape(param[1])))
return '&'.join(param_fragments)
def GeneratePermissionUrl(client_id, scope='https://mail.google.com/'):
"""Generates the URL for authorizing access.
This uses the "OAuth2 for Installed Applications" flow described at
https://developers.google.com/accounts/docs/OAuth2InstalledApp
Args:
client_id: Client ID obtained by registering your app.
scope: scope for access token, e.g. 'https://mail.google.com'
Returns:
A URL that the user should visit in their browser.
"""
params = {}
params['client_id'] = client_id
params['redirect_uri'] = REDIRECT_URI
params['scope'] = scope
params['response_type'] = 'code'
return '%s?%s' % (AccountsUrl('o/oauth2/auth'),
FormatUrlParams(params))
def AuthorizeTokens(client_id, client_secret, authorization_code):
"""Obtains OAuth access token and refresh token.
This uses the application portion of the "OAuth2 for Installed Applications"
flow at https://developers.google.com/accounts/docs/OAuth2InstalledApp#handlingtheresponse
Args:
client_id: Client ID obtained by registering your app.
client_secret: Client secret obtained by registering your app.
authorization_code: code generated by Google Accounts after user grants
permission.
Returns:
The decoded response from the Google Accounts server, as a dict. Expected
fields include 'access_token', 'expires_in', and 'refresh_token'.
"""
params = {}
params['client_id'] = client_id
params['client_secret'] = client_secret
params['code'] = authorization_code
params['redirect_uri'] = REDIRECT_URI
params['grant_type'] = 'authorization_code'
request_url = AccountsUrl('o/oauth2/token')
response = urllib.urlopen(request_url, urllib.urlencode(params)).read()
return json.loads(response)
def RefreshToken(client_id, client_secret, refresh_token):
"""Obtains a new token given a refresh token.
See https://developers.google.com/accounts/docs/OAuth2InstalledApp#refresh
Args:
client_id: Client ID obtained by registering your app.
client_secret: Client secret obtained by registering your app.
refresh_token: A previously-obtained refresh token.
Returns:
The decoded response from the Google Accounts server, as a dict. Expected
fields include 'access_token', 'expires_in', and 'refresh_token'.
"""
params = {}
params['client_id'] = client_id
params['client_secret'] = client_secret
params['refresh_token'] = refresh_token
params['grant_type'] = 'refresh_token'
request_url = AccountsUrl('o/oauth2/token')
response = urllib.urlopen(request_url, urllib.urlencode(params)).read()
return json.loads(response)
def GenerateOAuth2String(username, access_token, base64_encode=True):
"""Generates an IMAP OAuth2 authentication string.
See https://developers.google.com/google-apps/gmail/oauth2_overview
Args:
username: the username (email address) of the account to authenticate
access_token: An OAuth2 access token.
base64_encode: Whether to base64-encode the output.
Returns:
The SASL argument for the OAuth2 mechanism.
"""
auth_string = 'user=%s\1auth=Bearer %s\1\1' % (username, access_token)
if base64_encode:
auth_string = base64.b64encode(auth_string)
return auth_string
def TestImapAuthentication(user, auth_string):
"""Authenticates to IMAP with the given auth_string.
Prints a debug trace of the attempted IMAP connection.
Args:
user: The Gmail username (full email address)
auth_string: A valid OAuth2 string, as returned by GenerateOAuth2String.
Must not be base64-encoded, since imaplib does its own base64-encoding.
"""
print
imap_conn = imaplib.IMAP4_SSL('imap.gmail.com')
imap_conn.debug = 4
imap_conn.authenticate('XOAUTH2', lambda x: auth_string)
imap_conn.select('INBOX')
def TestSmtpAuthentication(user, auth_string):
"""Authenticates to SMTP with the given auth_string.
Args:
user: The Gmail username (full email address)
auth_string: A valid OAuth2 string, not base64-encoded, as returned by
GenerateOAuth2String.
"""
print
smtp_conn = smtplib.SMTP('smtp.gmail.com', 587)
smtp_conn.set_debuglevel(True)
smtp_conn.ehlo('test')
smtp_conn.starttls()
smtp_conn.docmd('AUTH', 'XOAUTH2 ' + base64.b64encode(auth_string))
def RequireOptions(options, *args):
missing = [arg for arg in args if getattr(options, arg) is None]
if missing:
print 'Missing options: %s' % ' '.join(missing)
sys.exit(-1)
def main(argv):
options_parser = SetupOptionParser()
(options, args) = options_parser.parse_args()
if options.refresh_token:
RequireOptions(options, 'client_id', 'client_secret')
response = RefreshToken(options.client_id, options.client_secret,
options.refresh_token)
print 'Access Token: %s' % response['access_token']
print 'Access Token Expiration Seconds: %s' % response['expires_in']
elif options.generate_oauth2_string:
RequireOptions(options, 'user', 'access_token')
print ('OAuth2 argument:\n' +
GenerateOAuth2String(options.user, options.access_token))
elif options.generate_oauth2_token:
RequireOptions(options, 'client_id', 'client_secret')
print 'To authorize token, visit this url and follow the directions:'
print ' %s' % GeneratePermissionUrl(options.client_id, options.scope)
authorization_code = raw_input('Enter verification code: ')
response = AuthorizeTokens(options.client_id, options.client_secret,
authorization_code)
print 'Refresh Token: %s' % response['refresh_token']
print 'Access Token: %s' % response['access_token']
print 'Access Token Expiration Seconds: %s' % response['expires_in']
elif options.test_imap_authentication:
RequireOptions(options, 'user', 'access_token')
TestImapAuthentication(options.user,
GenerateOAuth2String(options.user, options.access_token,
base64_encode=False))
elif options.test_smtp_authentication:
RequireOptions(options, 'user', 'access_token')
TestSmtpAuthentication(options.user,
GenerateOAuth2String(options.user, options.access_token,
base64_encode=False))
else:
options_parser.print_help()
print 'Nothing to do, exiting.'
return
if __name__ == '__main__':
main(sys.argv)
| 12,531
|
Python
|
.py
| 272
| 38.838235
| 93
| 0.687958
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,321
|
unicode_test.py
|
gaubert_gmvault/src/sandbox/unicode_test.py
|
# -*- coding: utf-8 -*-
import sys
import unicodedata
def ascii_hex(str):
new_str = ""
for c in str:
new_str += "%s=hex[%s]," % (c,hex(ord(c)))
return new_str
def convert_to_utf8(a_str):
"""
"""
if type(a_str) != type(u'a'):
#import chardet
#char_enc = chardet.detect(a_str)
#print("detected encoding = %s" % (char_enc))
#print("system machine encoding = %s" % (sys.getdefaultencoding()))
#u_str = unicode(a_str, char_enc['encoding'], errors='ignore')
u_str = unicode(a_str, 'cp437', errors='ignore')
else:
print("Already unicode do not convert")
u_str = a_str
print("raw unicode = %s" % (u_str))
#u_str = unicodedata.normalize('NFKC',u_str)
u_str = u_str.encode('unicode_escape').decode('unicode_escape')
print("unicode escape = %s" % (u_str))
print("normalized unicode(NFKD) = %s" % (repr(unicodedata.normalize('NFKD',u_str))))
print("normalized unicode(NFKC) = %s" % (repr(unicodedata.normalize('NFKC',u_str))))
print("normalized unicode(NFC) = %s" % (repr(unicodedata.normalize('NFC',u_str))))
print("normalized unicode(NFD) = %s" % (repr(unicodedata.normalize('NFD',u_str))))
hex_s = ascii_hex(u_str)
print("Hex ascii %s" % (hex_s))
utf8_arg = u_str
#utf8_arg = u_str.encode("utf-8")
return utf8_arg
if __name__ == '__main__':
u_str = u"label:èévader"
convert_to_utf8(sys.argv[1])
#convert_to_utf8(u_str)
| 1,458
|
Python
|
.py
| 38
| 33.473684
| 88
| 0.617816
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,322
|
chardet_test.py
|
gaubert_gmvault/src/sandbox/chardet_test.py
|
# -*- coding: utf-8 -*-
import sys
import chardet
import codecs
print("system encoding: %s" % (sys.getfilesystemencoding()))
first_arg = sys.argv[1]
#first_arg="réception"
#first_arg="て感じでしょうか研"
print first_arg
print("chardet = %s\n" % chardet.detect(first_arg))
res_char = chardet.detect(first_arg)
print type(first_arg)
first_arg_unicode = first_arg.decode(res_char['encoding'])
print first_arg_unicode
print type(first_arg_unicode)
utf8_arg = first_arg_unicode.encode("utf-8")
print type(utf8_arg)
print utf8_arg
| 542
|
Python
|
.py
| 18
| 27.666667
| 60
| 0.761044
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,323
|
oauth2_tests.py
|
gaubert_gmvault/src/sandbox/oauth2_tests.py
|
__author__ = 'Aubert'
import httplib2
from six.moves import input
from oauth2client.client import OAuth2WebServerFlow
CLIENT_ID = "some-ids"
CLIENT_SECRET = "secret"
SCOPES = ['https://mail.google.com/', # IMAP/SMTP client access
'https://www.googleapis.com/auth/email'] # Email address access (verify token authorized by correct account
def test_oauth2_with_google():
"""
Do something
:return:
"""
flow = OAuth2WebServerFlow(CLIENT_ID, CLIENT_SECRET, " ".join(SCOPES))
# Step 1: get user code and verification URL
# https://developers.google.com/accounts/docs/OAuth2ForDevices#obtainingacode
flow_info = flow.step1_get_device_and_user_codes()
print "Enter the following code at %s: %s" % (flow_info.verification_url,
flow_info.user_code)
print "Then press Enter."
input()
# Step 2: get credentials
# https://developers.google.com/accounts/docs/OAuth2ForDevices#obtainingatoken
credentials = flow.step2_exchange(device_flow_info=flow_info)
print "Access token:", credentials.access_token
print "Refresh token:", credentials.refresh_token
#Get IMAP Service
if __name__ == '__main__':
test_oauth2_with_google()
| 1,284
|
Python
|
.py
| 29
| 37.241379
| 122
| 0.677419
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,324
|
test_wx.py
|
gaubert_gmvault/src/sandbox/test_wx.py
|
# border.py
import wx
ID_NEW = 1
ID_RENAME = 2
ID_CLEAR = 3
ID_DELETE = 4
class Example(wx.Frame):
def __init__(self, parent, title):
super(Example, self).__init__(parent, title=title,
size=(260, 180))
self.InitUI()
self.Centre()
self.Show()
def InitUI(self):
panel = wx.Panel(self)
panel.SetBackgroundColour('#4f5049')
#hbox = wx.BoxSizer(wx.HORIZONTAL)
#vbox = wx.BoxSizer(wx.VERTICAL)
lbox = wx.BoxSizer(wx.VERTICAL)
listbox = wx.ListBox(panel, -1, size=(100,50))
#add button panel and its sizer
btnPanel = wx.Panel(panel, -1, size= (30,30))
bbox = wx.BoxSizer(wx.HORIZONTAL)
new = wx.Button(btnPanel, ID_NEW, '+', size=(24, 24))
ren = wx.Button(btnPanel, ID_RENAME, '-', size=(24, 24))
#dlt = wx.Button(btnPanel, ID_DELETE, 'D', size=(30, 30))
#clr = wx.Button(btnPanel, ID_CLEAR, 'C', size=(30, 30))
#hbox5 = wx.BoxSizer(wx.HORIZONTAL)
#btn1 = wx.Button(panel, label='Ok', size=(70, 30))
#hbox5.Add(btn1)
#btn2 = wx.Button(panel, label='Close', size=(70, 30))
#hbox5.Add(btn2, flag=wx.LEFT|wx.BOTTOM, border=5)
#vbox.Add(hbox5, flag=wx.ALIGN_RIGHT|wx.RIGHT, border=10)
#self.Bind(wx.EVT_BUTTON, self.NewItem, id=ID_NEW)
#self.Bind(wx.EVT_BUTTON, self.OnRename, id=ID_RENAME)
#self.Bind(wx.EVT_BUTTON, self.OnDelete, id=ID_DELETE)
#self.Bind(wx.EVT_BUTTON, self.OnClear, id=ID_CLEAR)
#self.Bind(wx.EVT_LISTBOX_DCLICK, self.OnRename)
bbox.Add(new, flag= wx.LEFT, border=2)
bbox.Add(ren, flag= wx.LEFT, border=2)
#buttonbox.Add(dlt)
#buttonbox.Add(clr)
btnPanel.SetSizer(bbox)
lbox.Add(listbox, 1, wx.EXPAND | wx.ALL, 1)
lbox.Add(btnPanel, 0, wx.EXPAND | wx.ALL, 1)
#lbox.Add(buttonbox, 1, wx.EXPAND | wx.ALL, 1)
#hbox.Add(lbox, 1, wx.EXPAND | wx.ALL, 7)
#midPan = wx.Panel(panel)
#midPan.SetBackgroundColour('#ededed')
#midPan1 = wx.Panel(panel)
#midPan1.SetBackgroundColour('#ededed')
#vbox.Add(midPan, 1, wx.EXPAND | wx.ALL, 5)
#vbox.Add(midPan1, 1, wx.EXPAND | wx.ALL, 5)
#hbox.Add(vbox, 1, wx.EXPAND | wx.ALL, 5)
panel.SetSizer(lbox)
if __name__ == '__main__':
app = wx.App()
Example(None, title='Gmvault-test')
app.MainLoop()
| 2,500
|
Python
|
.py
| 59
| 33.711864
| 70
| 0.587767
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,325
|
oauth2.py
|
gaubert_gmvault/src/sandbox/oauth2.py
|
#!/usr/bin/python
#
# Copyright 2012 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Performs client tasks for testing IMAP OAuth2 authentication.
To use this script, you'll need to have registered with Google as an OAuth
application and obtained an OAuth client ID and client secret.
See http://code.google.com/apis/accounts/docs/OAuth2.html for instructions on
registering and for documentation of the APIs invoked by this code.
This script has 3 modes of operation.
1. The first mode is used to generate and authorize an OAuth2 token, the
first step in logging in via OAuth2.
oauth2 --user=xxx@gmail.com \
--client_id=1038[...].apps.googleusercontent.com \
--client_secret=VWFn8LIKAMC-MsjBMhJeOplZ \
--generate_oauth2_token
The script will converse with Google and generate an oauth request
token, then present you with a URL you should visit in your browser to
authorize the token. Once you get the verification code from the Google
website, enter it into the script to get your OAuth access token. The output
from this command will contain the access token, a refresh token, and some
metadata about the tokens. The access token can be used until it expires, and
the refresh token lasts indefinitely, so you should record these values for
reuse.
2. The script will generate new access tokens using a refresh token.
oauth2 --user=xxx@gmail.com \
--client_id=1038[...].apps.googleusercontent.com \
--client_secret=VWFn8LIKAMC-MsjBMhJeOplZ \
--refresh_token=1/Yzm6MRy4q1xi7Dx2DuWXNgT6s37OrP_DW_IoyTum4YA
3. The script will generate an OAuth2 string that can be fed
directly to IMAP or SMTP. This is triggered with the --generate_oauth2_string
option.
oauth2 --generate_oauth2_string --user=xxx@gmail.com \
--access_token=ya29.AGy[...]ezLg
The output of this mode will be a base64-encoded string. To use it, connect to a
IMAPFE and pass it as the second argument to the AUTHENTICATE command.
a AUTHENTICATE XOAUTH2 a9sha9sfs[...]9dfja929dk==
"""
import base64
import imaplib
import json
from optparse import OptionParser
import smtplib
import sys
import urllib
def SetupOptionParser():
# Usage message is the module's docstring.
parser = OptionParser(usage=__doc__)
parser.add_option('--generate_oauth2_token',
action='store_true',
dest='generate_oauth2_token',
help='generates an OAuth2 token for testing')
parser.add_option('--generate_oauth2_string',
action='store_true',
dest='generate_oauth2_string',
help='generates an initial client response string for '
'OAuth2')
parser.add_option('--client_id',
default=None,
help='Client ID of the application that is authenticating. '
'See OAuth2 documentation for details.')
parser.add_option('--client_secret',
default=None,
help='Client secret of the application that is '
'authenticating. See OAuth2 documentation for '
'details.')
parser.add_option('--access_token',
default=None,
help='OAuth2 access token')
parser.add_option('--refresh_token',
default=None,
help='OAuth2 refresh token')
parser.add_option('--scope',
default='https://mail.google.com/',
help='scope for the access token. Multiple scopes can be '
'listed separated by spaces with the whole argument '
'quoted.')
parser.add_option('--test_imap_authentication',
action='store_true',
dest='test_imap_authentication',
help='attempts to authenticate to IMAP')
parser.add_option('--test_smtp_authentication',
action='store_true',
dest='test_smtp_authentication',
help='attempts to authenticate to SMTP')
parser.add_option('--user',
default=None,
help='email address of user whose account is being '
'accessed')
return parser
# The URL root for accessing Google Accounts.
GOOGLE_ACCOUNTS_BASE_URL = 'https://accounts.google.com'
# Hardcoded dummy redirect URI for non-web apps.
REDIRECT_URI = 'urn:ietf:wg:oauth:2.0:oob'
def AccountsUrl(command):
"""Generates the Google Accounts URL.
Args:
command: The command to execute.
Returns:
A URL for the given command.
"""
return '%s/%s' % (GOOGLE_ACCOUNTS_BASE_URL, command)
def UrlEscape(text):
# See OAUTH 5.1 for a definition of which characters need to be escaped.
return urllib.quote(text, safe='~-._')
def UrlUnescape(text):
# See OAUTH 5.1 for a definition of which characters need to be escaped.
return urllib.unquote(text)
def FormatUrlParams(params):
"""Formats parameters into a URL query string.
Args:
params: A key-value map.
Returns:
A URL query string version of the given parameters.
"""
param_fragments = []
for param in sorted(params.iteritems(), key=lambda x: x[0]):
param_fragments.append('%s=%s' % (param[0], UrlEscape(param[1])))
return '&'.join(param_fragments)
def GeneratePermissionUrl(client_id, scope='https://mail.google.com/'):
"""Generates the URL for authorizing access.
This uses the "OAuth2 for Installed Applications" flow described at
https://developers.google.com/accounts/docs/OAuth2InstalledApp
Args:
client_id: Client ID obtained by registering your app.
scope: scope for access token, e.g. 'https://mail.google.com'
Returns:
A URL that the user should visit in their browser.
"""
params = {}
params['client_id'] = client_id
params['redirect_uri'] = REDIRECT_URI
params['scope'] = scope
params['response_type'] = 'code'
return '%s?%s' % (AccountsUrl('o/oauth2/auth'),
FormatUrlParams(params))
def AuthorizeTokens(client_id, client_secret, authorization_code):
"""Obtains OAuth access token and refresh token.
This uses the application portion of the "OAuth2 for Installed Applications"
flow at https://developers.google.com/accounts/docs/OAuth2InstalledApp#handlingtheresponse
Args:
client_id: Client ID obtained by registering your app.
client_secret: Client secret obtained by registering your app.
authorization_code: code generated by Google Accounts after user grants
permission.
Returns:
The decoded response from the Google Accounts server, as a dict. Expected
fields include 'access_token', 'expires_in', and 'refresh_token'.
"""
params = {}
params['client_id'] = client_id
params['client_secret'] = client_secret
params['code'] = authorization_code
params['redirect_uri'] = REDIRECT_URI
params['grant_type'] = 'authorization_code'
request_url = AccountsUrl('o/oauth2/token')
response = urllib.urlopen(request_url, urllib.urlencode(params)).read()
return json.loads(response)
def RefreshToken(client_id, client_secret, refresh_token):
"""Obtains a new token given a refresh token.
See https://developers.google.com/accounts/docs/OAuth2InstalledApp#refresh
Args:
client_id: Client ID obtained by registering your app.
client_secret: Client secret obtained by registering your app.
refresh_token: A previously-obtained refresh token.
Returns:
The decoded response from the Google Accounts server, as a dict. Expected
fields include 'access_token', 'expires_in', and 'refresh_token'.
"""
params = {}
params['client_id'] = client_id
params['client_secret'] = client_secret
params['refresh_token'] = refresh_token
params['grant_type'] = 'refresh_token'
request_url = AccountsUrl('o/oauth2/token')
response = urllib.urlopen(request_url, urllib.urlencode(params)).read()
return json.loads(response)
def GenerateOAuth2String(username, access_token, base64_encode=True):
"""Generates an IMAP OAuth2 authentication string.
See https://developers.google.com/google-apps/gmail/oauth2_overview
Args:
username: the username (email address) of the account to authenticate
access_token: An OAuth2 access token.
base64_encode: Whether to base64-encode the output.
Returns:
The SASL argument for the OAuth2 mechanism.
"""
auth_string = 'user=%s\1auth=Bearer %s\1\1' % (username, access_token)
if base64_encode:
auth_string = base64.b64encode(auth_string)
return auth_string
def TestImapAuthentication(user, auth_string):
"""Authenticates to IMAP with the given auth_string.
Prints a debug trace of the attempted IMAP connection.
Args:
user: The Gmail username (full email address)
auth_string: A valid OAuth2 string, as returned by GenerateOAuth2String.
Must not be base64-encoded, since imaplib does its own base64-encoding.
"""
print
imap_conn = imaplib.IMAP4_SSL('imap.gmail.com')
imap_conn.debug = 4
imap_conn.authenticate('XOAUTH2', lambda x: auth_string)
imap_conn.select('INBOX')
def TestSmtpAuthentication(user, auth_string):
"""Authenticates to SMTP with the given auth_string.
Args:
user: The Gmail username (full email address)
auth_string: A valid OAuth2 string, not base64-encoded, as returned by
GenerateOAuth2String.
"""
print
smtp_conn = smtplib.SMTP('smtp.gmail.com', 587)
smtp_conn.set_debuglevel(True)
smtp_conn.ehlo('test')
smtp_conn.starttls()
smtp_conn.docmd('AUTH', 'XOAUTH2 ' + base64.b64encode(auth_string))
def RequireOptions(options, *args):
missing = [arg for arg in args if getattr(options, arg) is None]
if missing:
print 'Missing options: %s' % ' '.join(missing)
sys.exit(-1)
def main(argv):
options_parser = SetupOptionParser()
(options, args) = options_parser.parse_args()
if options.refresh_token:
RequireOptions(options, 'client_id', 'client_secret')
response = RefreshToken(options.client_id, options.client_secret,
options.refresh_token)
print 'Access Token: %s' % response['access_token']
print 'Access Token Expiration Seconds: %s' % response['expires_in']
elif options.generate_oauth2_string:
RequireOptions(options, 'user', 'access_token')
print ('OAuth2 argument:\n' +
GenerateOAuth2String(options.user, options.access_token))
elif options.generate_oauth2_token:
RequireOptions(options, 'client_id', 'client_secret')
print 'To authorize token, visit this url and follow the directions:'
print ' %s' % GeneratePermissionUrl(options.client_id, options.scope)
authorization_code = raw_input('Enter verification code: ')
response = AuthorizeTokens(options.client_id, options.client_secret,
authorization_code)
print 'Refresh Token: %s' % response['refresh_token']
print 'Access Token: %s' % response['access_token']
print 'Access Token Expiration Seconds: %s' % response['expires_in']
elif options.test_imap_authentication:
RequireOptions(options, 'user', 'access_token')
TestImapAuthentication(options.user,
GenerateOAuth2String(options.user, options.access_token,
base64_encode=False))
elif options.test_smtp_authentication:
RequireOptions(options, 'user', 'access_token')
TestSmtpAuthentication(options.user,
GenerateOAuth2String(options.user, options.access_token,
base64_encode=False))
else:
options_parser.print_help()
print 'Nothing to do, exiting.'
return
if __name__ == '__main__':
main(sys.argv)
| 12,196
|
Python
|
.py
| 272
| 38.838235
| 92
| 0.703735
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,326
|
pycrypto_test.py
|
gaubert_gmvault/src/sandbox/pycrypto_test.py
|
import os, base64
from Crypto.Cipher import AES
import hashlib
import base64
import hashlib
from Crypto import Random
from Crypto.Cipher import AES
class AESEncryptor(object):
def __init__(self, key):
self.bs = 32
self.key = hashlib.sha256(key.encode()).digest()
def encrypt(self, raw):
raw = self._pad(raw)
iv = Random.new().read(AES.block_size)
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return base64.b64encode(iv + cipher.encrypt(raw))
def decrypt(self, enc):
enc = base64.b64decode(enc)
iv = enc[:AES.block_size]
cipher = AES.new(self.key, AES.MODE_CBC, iv)
return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8')
def _pad(self, s):
return s + (self.bs - len(s) % self.bs) * chr(self.bs - len(s) % self.bs)
@staticmethod
def _unpad(s):
return s[:-ord(s[len(s)-1:])]
if __name__ == '__main__':
secrets = ['Do or do not there is no try', 'I love Python !!!!']
key="This is my key"
enc = AESEncryptor(key)
for secret in secrets:
print "Secret:", secret
encrypted = enc.encrypt(secret)
print "Encrypted secret:", encrypted
print "Clear Secret:" , enc.decrypt(encrypted)
print '-' *50
| 1,291
|
Python
|
.py
| 36
| 29.666667
| 81
| 0.619583
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,327
|
common_gmvault.py
|
gaubert_gmvault/src/sandbox/common_gmvault.py
|
'''
Gmvault: a tool to backup and restore your gmail account.
Copyright (C) <since 2011> <guillaume Aubert (guillaume dot aubert at gmail do com)>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import json
import time
import datetime
import os
import itertools
import imaplib
import gmv.log_utils as log_utils
import gmv.collections_utils as collections_utils
import gmv.gmvault_utils as gmvault_utils
import gmv.imap_utils as imap_utils
import gmv.gmvault_db as gmvault_db
LOG = log_utils.LoggerFactory.get_logger('gmvault')
def handle_restore_imap_error(the_exception, gm_id, db_gmail_ids_info, gmvaulter):
"""
function to handle restore IMAPError in restore functions
"""
if isinstance(the_exception, imaplib.IMAP4.abort):
# if this is a Gmvault SSL Socket error quarantine the email and continue the restore
if str(the_exception).find("=> Gmvault ssl socket error: EOF") >= 0:
LOG.critical("Quarantine email with gm id %s from %s. GMAIL IMAP cannot restore it:"\
" err={%s}" % (gm_id, db_gmail_ids_info[gm_id], str(the_exception)))
gmvaulter.gstorer.quarantine_email(gm_id)
gmvaulter.error_report['emails_in_quarantine'].append(gm_id)
LOG.critical("Disconnecting and reconnecting to restart cleanly.")
gmvaulter.src.reconnect() #reconnect
else:
raise the_exception
elif isinstance(the_exception, imaplib.IMAP4.error):
LOG.error("Catched IMAP Error %s" % (str(the_exception)))
LOG.exception(the_exception)
#When the email cannot be read from Database because it was empty when returned by gmail imap
#quarantine it.
if str(the_exception) == "APPEND command error: BAD ['Invalid Arguments: Unable to parse message']":
LOG.critical("Quarantine email with gm id %s from %s. GMAIL IMAP cannot restore it:"\
" err={%s}" % (gm_id, db_gmail_ids_info[gm_id], str(the_exception)))
gmvaulter.gstorer.quarantine_email(gm_id)
gmvaulter.error_report['emails_in_quarantine'].append(gm_id)
else:
raise the_exception
elif isinstance(the_exception, imap_utils.PushEmailError):
LOG.error("Catch the following exception %s" % (str(the_exception)))
LOG.exception(the_exception)
if the_exception.quarantined():
LOG.critical("Quarantine email with gm id %s from %s. GMAIL IMAP cannot restore it:"\
" err={%s}" % (gm_id, db_gmail_ids_info[gm_id], str(the_exception)))
gmvaulter.gstorer.quarantine_email(gm_id)
gmvaulter.error_report['emails_in_quarantine'].append(gm_id)
else:
raise the_exception
else:
LOG.error("Catch the following exception %s" % (str(the_exception)))
LOG.exception(the_exception)
raise the_exception
def handle_sync_imap_error(the_exception, the_id, error_report, src):
"""
function to handle IMAPError in gmvault
type = chat or email
"""
if isinstance(the_exception, imaplib.IMAP4.abort):
# imap abort error
# ignore it
# will have to do something with these ignored messages
LOG.critical("Error while fetching message with imap id %s." % (the_id))
LOG.critical("\n=== Exception traceback ===\n")
LOG.critical(gmvault_utils.get_exception_traceback())
LOG.critical("=== End of Exception traceback ===\n")
try:
#try to get the gmail_id
curr = src.fetch(the_id, imap_utils.GIMAPFetcher.GET_GMAIL_ID)
except Exception, _: #pylint:disable-msg=W0703
curr = None
LOG.critical("Error when trying to get gmail id for message with imap id %s." % (the_id))
LOG.critical("Disconnect, wait for 20 sec then reconnect.")
src.disconnect()
#could not fetch the gm_id so disconnect and sleep
#sleep 10 sec
time.sleep(10)
LOG.critical("Reconnecting ...")
src.connect()
if curr:
gmail_id = curr[the_id][imap_utils.GIMAPFetcher.GMAIL_ID]
else:
gmail_id = None
#add ignored id
error_report['cannot_be_fetched'].append((the_id, gmail_id))
LOG.critical("Forced to ignore message with imap id %s, (gmail id %s)." \
% (the_id, (gmail_id if gmail_id else "cannot be read")))
elif isinstance(the_exception, imaplib.IMAP4.error):
# check if this is a cannot be fetched error
# I do not like to do string guessing within an exception but I do not have any choice here
LOG.critical("Error while fetching message with imap id %s." % (the_id))
LOG.critical("\n=== Exception traceback ===\n")
LOG.critical(gmvault_utils.get_exception_traceback())
LOG.critical("=== End of Exception traceback ===\n")
#quarantine emails that have raised an abort error
if str(the_exception).find("'Some messages could not be FETCHed (Failure)'") >= 0:
try:
#try to get the gmail_id
LOG.critical("One more attempt. Trying to fetch the Gmail ID for %s" % (the_id) )
curr = src.fetch(the_id, imap_utils.GIMAPFetcher.GET_GMAIL_ID)
except Exception, _: #pylint:disable-msg=W0703
curr = None
if curr:
gmail_id = curr[the_id][imap_utils.GIMAPFetcher.GMAIL_ID]
else:
gmail_id = None
#add ignored id
error_report['cannot_be_fetched'].append((the_id, gmail_id))
LOG.critical("Ignore message with imap id %s, (gmail id %s)" % (the_id, (gmail_id if gmail_id else "cannot be read")))
else:
raise the_exception #rethrow error
else:
raise the_exception
class IMAPBatchFetcher(object):
"""
Fetch IMAP data in batch
"""
def __init__(self, src, imap_ids, error_report, request, default_batch_size = 100):
"""
constructor
"""
self.src = src
self.imap_ids = imap_ids
self.def_batch_size = default_batch_size
self.request = request
self.error_report = error_report
self.to_fetch = list(imap_ids)
def individual_fetch(self, imap_ids):
"""
Find the imap_id creating the issue
return the data related to the imap_ids
"""
new_data = {}
for the_id in imap_ids:
try:
single_data = self.src.fetch(the_id, self.request)
new_data.update(single_data)
except Exception, error:
handle_sync_imap_error(error, the_id, self.error_report, self.src) #do everything in this handler
return new_data
def __iter__(self):
return self
def next(self):
"""
Return the next batch of elements
"""
new_data = {}
batch = self.to_fetch[:self.def_batch_size]
if len(batch) <= 0:
raise StopIteration
try:
new_data = self.src.fetch(batch, self.request)
self.to_fetch = self.to_fetch[self.def_batch_size:]
return new_data
except imaplib.IMAP4.error, _:
new_data = self.individual_fetch(batch)
return new_data
def reset(self):
"""
Restart from the beginning
"""
self.to_fetch = self.imap_ids
#Client to support imap serch with non ascii char (not working because of imaplibs limitations)
'''class MonkeyIMAPClient(imapclient.IMAPClient): #pylint:disable=R0903,R0904
"""
Need to extend the IMAPClient to do more things such as compression
Compression inspired by http://www.janeelix.com/piers/python/py2html.cgi/piers/python/imaplib2
"""
def __init__(self, host, port=None, use_uid=True, need_ssl=False):
"""
constructor
"""
super(MonkeyIMAPClient, self).__init__(host, port, use_uid, need_ssl)
def _create_IMAP4(self): #pylint:disable=C0103
"""
Factory method creating an IMAPCOMPSSL or a standard IMAP4 Class
"""
imap_class = self.ssl and IMAP4COMPSSL or imaplib.IMAP4
return imap_class(self.host, self.port)
def xoauth_login(self, xoauth_cred ):
"""
Connect with xoauth
Redefine this method to suppress dependency to oauth2 (non-necessary)
"""
typ, data = self._imap.authenticate('XOAUTH', lambda x: xoauth_cred)
self._checkok('authenticate', typ, data)
return data[0]
def old_search(self, criteria):
"""
Perform a imap search or gmail search
"""
if criteria.get('type','') == 'imap':
#encoding criteria in utf-8
req = criteria['req'].encode('utf-8')
charset = 'utf-8'
return super(MonkeyIMAPClient, self).search(req, charset)
elif criteria.get('type','') == 'gmail':
return self.gmail_search(criteria.get('req',''))
else:
raise Exception("Unknown search type %s" % (criteria.get('type','no request type passed')))
def search(self, criteria):
"""
Perform a imap search or gmail search
"""
if criteria.get('type','') == 'imap':
#encoding criteria in utf-8
#req = criteria['req'].encode('utf-8')
req = criteria['req']
charset = 'utf-8'
#return super(MonkeyIMAPClient, self).search(req, charset)
return self.imap_search(req, charset)
elif criteria.get('type','') == 'gmail':
return self.gmail_search(criteria.get('req',''))
else:
raise Exception("Unknown search type %s" % (criteria.get('type','no request type passed')))
def gmail_search(self, criteria):
"""
perform a search with gmailsearch criteria.
eg, subject:Hello World
"""
criteria = criteria.replace('\\', '\\\\')
criteria = criteria.replace('"', '\\"')
#working but cannot send that understand when non ascii chars are used
#args = ['CHARSET', 'utf-8', 'X-GM-RAW', '"%s"' % (criteria)]
#typ, data = self._imap.uid('SEARCH', *args)
#working Literal search
self._imap.literal = '"%s"' % (criteria)
self._imap.literal = imaplib.MapCRLF.sub(imaplib.CRLF, self._imap.literal)
self._imap.literal = self._imap.literal.encode("utf-8")
#args = ['X-GM-RAW']
#typ, data = self._imap.search('utf-8',*args)
#use uid to keep the imap ids consistent
args = ['CHARSET', 'utf-8', 'X-GM-RAW']
typ, data = self._imap.uid('SEARCH', *args)
self._checkok('search', typ, data)
if data == [None]: # no untagged responses...
return [ ]
return [ long(i) for i in data[0].split() ]
def append(self, folder, msg, flags=(), msg_time=None):
"""Append a message to *folder*.
*msg* should be a string contains the full message including
headers.
*flags* should be a sequence of message flags to set. If not
specified no flags will be set.
*msg_time* is an optional datetime instance specifying the
date and time to set on the message. The server will set a
time if it isn't specified. If *msg_time* contains timezone
information (tzinfo), this will be honoured. Otherwise the
local machine's time zone sent to the server.
Returns the APPEND response as returned by the server.
"""
if msg_time:
time_val = time.mktime(msg_time.timetuple())
else:
time_val = None
flags_list = seq_to_parenlist(flags)
typ, data = self._imap.append(self._encode_folder_name(folder) if folder else None,
flags_list, time_val, msg)
self._checkok('append', typ, data)
return data[0]
def enable_compression(self):
"""
enable_compression()
Ask the server to start compressing the connection.
Should be called from user of this class after instantiation, as in:
if 'COMPRESS=DEFLATE' in imapobj.capabilities:
imapobj.enable_compression()
"""
ret_code, _ = self._imap._simple_command('COMPRESS', 'DEFLATE') #pylint: disable=W0212
if ret_code == 'OK':
self._imap.activate_compression()
else:
#no errors for the moment
pass
'''
class GMVaulter(object):
"""
Main object operating over gmail
"""
NB_GRP_OF_ITEMS = 1400
EMAIL_RESTORE_PROGRESS = 'email_last_id.restore'
CHAT_RESTORE_PROGRESS = 'chat_last_id.restore'
EMAIL_SYNC_PROGRESS = 'email_last_id.sync'
CHAT_SYNC_PROGRESS = 'chat_last_id.sync'
OP_EMAIL_RESTORE = "EM_RESTORE"
OP_EMAIL_SYNC = "EM_SYNC"
OP_CHAT_RESTORE = "CH_RESTORE"
OP_CHAT_SYNC = "CH_SYNC"
OP_TO_FILENAME = { OP_EMAIL_RESTORE : EMAIL_RESTORE_PROGRESS,
OP_EMAIL_SYNC : EMAIL_SYNC_PROGRESS,
OP_CHAT_RESTORE : CHAT_RESTORE_PROGRESS,
OP_CHAT_SYNC : CHAT_SYNC_PROGRESS
}
def __init__(self, db_root_dir, host, port, login, \
credential, read_only_access = True, use_encryption = False): #pylint:disable-msg=R0913,R0914
"""
constructor
"""
self.db_root_dir = db_root_dir
#create dir if it doesn't exist
gmvault_utils.makedirs(self.db_root_dir)
#keep track of login email
self.login = login
# create source and try to connect
self.src = imap_utils.GIMAPFetcher(host, port, login, credential, \
readonly_folder = read_only_access)
self.src.connect()
LOG.debug("Connected")
self.use_encryption = use_encryption
#to report gmail imap problems
self.error_report = { 'empty' : [] ,
'cannot_be_fetched' : [],
'emails_in_quarantine' : [],
'reconnections' : 0}
#instantiate gstorer
self.gstorer = gmvault_db.GmailStorer(self.db_root_dir, self.use_encryption)
#timer used to mesure time spent in the different values
self.timer = gmvault_utils.Timer()
@classmethod
def get_imap_request_btw_2_dates(cls, begin_date, end_date):
"""
Return the imap request for those 2 dates
"""
imap_req = 'Since %s Before %s' % (gmvault_utils.datetime2imapdate(begin_date), gmvault_utils.datetime2imapdate(end_date))
return imap_req
def get_operation_report(self):
"""
Return the error report
"""
the_str = "\n================================================================\n"\
"Number of reconnections: %d.\nNumber of emails quarantined: %d.\n" \
"Number of emails that could not be fetched: %d.\n" \
"Number of emails that were returned empty by gmail: %d\n"\
"================================================================" \
% (self.error_report['reconnections'], \
len(self.error_report['emails_in_quarantine']), \
len(self.error_report['cannot_be_fetched']), \
len(self.error_report['empty'])
)
LOG.debug("error_report complete structure = %s" % (self.error_report))
return the_str
@classmethod
def _get_next_date(cls, a_current_date, start_month_beginning = False):
"""
return the next date necessary to build the imap req
"""
if start_month_beginning:
dummy_date = a_current_date.replace(day=1)
else:
dummy_date = a_current_date
# the next date = current date + 1 month
return dummy_date + datetime.timedelta(days=31)
@classmethod
def check_email_on_disk(cls, a_gstorer, a_id, a_dir = None):
"""
Factory method to create the object if it exists
"""
try:
a_dir = a_gstorer.get_directory_from_id(a_id, a_dir)
if a_dir:
return a_gstorer.unbury_metadata(a_id, a_dir)
except ValueError, json_error:
LOG.exception("Cannot read file %s. Try to fetch the data again" % ('%s.meta' % (a_id)), json_error )
return None
@classmethod
def _metadata_needs_update(cls, curr_metadata, new_metadata, chat_metadata = False):
"""
Needs update
"""
if curr_metadata[gmvault_db.GmailStorer.ID_K] != new_metadata['X-GM-MSGID']:
raise Exception("Gmail id has changed for %s" % (curr_metadata['id']))
#check flags
prev_set = set(new_metadata['FLAGS'])
for flag in curr_metadata['flags']:
if flag not in prev_set:
return True
else:
prev_set.remove(flag)
if len(prev_set) > 0:
return True
#check labels
prev_labels = set(new_metadata['X-GM-LABELS'])
if chat_metadata: #add gmvault-chats labels
prev_labels.add(gmvault_db.GmailStorer.CHAT_GM_LABEL)
for label in curr_metadata['labels']:
if label not in prev_labels:
return True
else:
prev_labels.remove(label)
if len(prev_labels) > 0:
return True
return False
def _check_email_db_ownership(self, ownership_control):
"""
Check email database ownership.
If ownership control activated then fail if a new additional owner is added.
Else if no ownership control allow one more user and save it in the list of owners
Return the number of owner this will be used to activate or not the db clean.
Activating a db cleaning on a multiownership db would be a catastrophy as it would delete all
the emails from the others users.
"""
#check that the gmvault-db is not associated with another user
db_owners = self.gstorer.get_db_owners()
if ownership_control:
if len(db_owners) > 0 and self.login not in db_owners: #db owner should not be different unless bypass activated
raise Exception("The email database %s is already associated with one or many logins: %s."\
" Use option (-m, --multiple-db-owner) if you want to link it with %s" \
% (self.db_root_dir, ", ".join(db_owners), self.login))
else:
if len(db_owners) == 0:
LOG.critical("Establish %s as the owner of the Gmvault db %s." % (self.login, self.db_root_dir))
elif len(db_owners) > 0 and self.login not in db_owners:
LOG.critical("The email database %s is hosting emails from %s. It will now also store emails from %s" \
% (self.db_root_dir, ", ".join(db_owners), self.login))
#try to save db_owner in the list of owners
self.gstorer.store_db_owner(self.login)
def _sync_chats(self, imap_req, compress, restart):
"""
Previous working sync for chats
backup the chat messages
"""
chat_dir = None
timer = gmvault_utils.Timer() #start local timer for chat
timer.start()
LOG.debug("Before selection")
if self.src.is_visible('CHATS'):
chat_dir = self.src.select_folder('CHATS')
LOG.debug("Selection is finished")
if chat_dir:
#imap_ids = self.src.search({ 'type': 'imap', 'req': 'ALL' })
imap_ids = self.src.search(imap_req)
# check if there is a restart
if restart:
LOG.critical("Restart mode activated. Need to find information in Gmail, be patient ...")
imap_ids = self.get_gmails_ids_left_to_sync(self.OP_CHAT_SYNC, imap_ids)
total_nb_chats_to_process = len(imap_ids) # total number of emails to get
LOG.critical("%d chat messages to be fetched." % (total_nb_chats_to_process))
nb_chats_processed = 0
to_fetch = set(imap_ids)
batch_fetcher = IMAPBatchFetcher(self.src, imap_ids, self.error_report, \
imap_utils.GIMAPFetcher.GET_ALL_BUT_DATA, \
default_batch_size = \
gmvault_utils.get_conf_defaults().getint("General", \
"nb_messages_per_batch", 500))
for new_data in batch_fetcher:
for the_id in new_data:
if new_data.get(the_id, None):
gid = None
LOG.debug("\nProcess imap chat id %s" % ( the_id ))
gid = new_data[the_id][imap_utils.GIMAPFetcher.GMAIL_ID]
the_dir = self.gstorer.get_sub_chats_dir()
LOG.critical("Process chat num %d (imap_id:%s) into %s." % (nb_chats_processed, the_id, the_dir))
#pass the dir and the ID
curr_metadata = GMVaulter.check_email_on_disk( self.gstorer , \
new_data[the_id][imap_utils.GIMAPFetcher.GMAIL_ID], \
the_dir)
#if on disk check that the data is not different
if curr_metadata:
if self._metadata_needs_update(curr_metadata, new_data[the_id], chat_metadata = True):
LOG.debug("Chat with imap id %s and gmail id %s has changed. Updated it." % (the_id, gid))
#restore everything at the moment
gid = self.gstorer.bury_chat_metadata(new_data[the_id], local_dir = the_dir)
#update local index id gid => index per directory to be thought out
else:
LOG.debug("The metadata for chat %s already exists and is identical to the one on GMail." % (gid))
else:
try:
#get the data
email_data = self.src.fetch(the_id, imap_utils.GIMAPFetcher.GET_DATA_ONLY )
new_data[the_id][imap_utils.GIMAPFetcher.EMAIL_BODY] = \
email_data[the_id][imap_utils.GIMAPFetcher.EMAIL_BODY]
# store data on disk within year month dir
gid = self.gstorer.bury_chat(new_data[the_id], local_dir = the_dir, compress = compress)
#update local index id gid => index per directory to be thought out
LOG.debug("Create and store chat with imap id %s, gmail id %s." % (the_id, gid))
except Exception, error:
#do everything in this handler
handle_sync_imap_error(error, the_id, self.error_report, self.src)
nb_chats_processed += 1
#indicate every 50 messages the number of messages left to process
left_emails = (total_nb_chats_to_process - nb_chats_processed)
if (nb_chats_processed % 50) == 0 and (left_emails > 0):
elapsed = timer.elapsed() #elapsed time in seconds
LOG.critical("\n== Processed %d emails in %s. %d left to be stored (time estimate %s).==\n" % \
(nb_chats_processed, timer.seconds_to_human_time(elapsed), \
left_emails, \
timer.estimate_time_left(nb_chats_processed, elapsed, left_emails)))
# save id every 10 restored emails
if (nb_chats_processed % 10) == 0:
if gid:
self.save_lastid(self.OP_CHAT_SYNC, gid)
else:
LOG.info("Could not process imap with id %s. Ignore it\n")
self.error_report['empty'].append((the_id, None))
to_fetch -= set(new_data.keys()) #remove all found keys from to_fetch set
for the_id in to_fetch:
# case when gmail IMAP server returns OK without any data whatsoever
# eg. imap uid 142221L ignore it
LOG.info("Could not process chat with id %s. Ignore it\n" % (the_id))
self.error_report['empty_chats'].append((the_id, None))
else:
imap_ids = []
LOG.critical("\nChats synchronisation operation performed in %s.\n" % (timer.seconds_to_human_time(timer.elapsed())))
return imap_ids
def _sync_emails(self, imap_req, compress, restart):
"""
Previous sync for emails
First part of the double pass strategy:
- create and update emails in db
"""
timer = gmvault_utils.Timer()
timer.start()
#select all mail folder using the constant name defined in GIMAPFetcher
self.src.select_folder('ALLMAIL')
# get all imap ids in All Mail
imap_ids = self.src.search(imap_req)
# check if there is a restart
if restart:
LOG.critical("Restart mode activated for emails. Need to find information in Gmail, be patient ...")
imap_ids = self.get_gmails_ids_left_to_sync(self.OP_EMAIL_SYNC, imap_ids)
total_nb_emails_to_process = len(imap_ids) # total number of emails to get
LOG.critical("%d emails to be fetched." % (total_nb_emails_to_process))
nb_emails_processed = 0
to_fetch = set(imap_ids)
batch_fetcher = IMAPBatchFetcher(self.src, imap_ids, self.error_report, imap_utils.GIMAPFetcher.GET_ALL_BUT_DATA, \
default_batch_size = \
gmvault_utils.get_conf_defaults().getint("General","nb_messages_per_batch",500))
#LAST Thing to do remove all found ids from imap_ids and if ids left add missing in report
for new_data in batch_fetcher:
for the_id in new_data:
#LOG.debug("new_data = %s\n" % (new_data))
if new_data.get(the_id, None):
LOG.debug("\nProcess imap id %s" % ( the_id ))
gid = new_data[the_id][imap_utils.GIMAPFetcher.GMAIL_ID]
the_dir = gmvault_utils.get_ym_from_datetime(new_data[the_id][imap_utils.GIMAPFetcher.IMAP_INTERNALDATE])
LOG.critical("Process email num %d (imap_id:%s) from %s." % (nb_emails_processed, the_id, the_dir))
#decode the labels that are received as utf7 => unicode
new_data[the_id][imap_utils.GIMAPFetcher.GMAIL_LABELS] = \
imap_utils.decode_labels(new_data[the_id][imap_utils.GIMAPFetcher.GMAIL_LABELS])
#pass the dir and the ID
curr_metadata = GMVaulter.check_email_on_disk( self.gstorer , \
new_data[the_id][imap_utils.GIMAPFetcher.GMAIL_ID], \
the_dir)
#if on disk check that the data is not different
if curr_metadata:
LOG.debug("metadata for %s already exists. Check if different." % (gid))
if self._metadata_needs_update(curr_metadata, new_data[the_id]):
LOG.debug("Email with imap id %s and gmail id %s has changed. Updated it." % (the_id, gid))
#restore everything at the moment
gid = self.gstorer.bury_metadata(new_data[the_id], local_dir = the_dir)
#update local index id gid => index per directory to be thought out
else:
LOG.debug("On disk metadata for %s is up to date." % (gid))
else:
try:
#get the data
LOG.debug("Get Data for %s." % (gid))
email_data = self.src.fetch(the_id, imap_utils.GIMAPFetcher.GET_DATA_ONLY )
new_data[the_id][imap_utils.GIMAPFetcher.EMAIL_BODY] = \
email_data[the_id][imap_utils.GIMAPFetcher.EMAIL_BODY]
# store data on disk within year month dir
gid = self.gstorer.bury_email(new_data[the_id], local_dir = the_dir, compress = compress)
#update local index id gid => index per directory to be thought out
LOG.debug("Create and store email with imap id %s, gmail id %s." % (the_id, gid))
except Exception, error:
handle_sync_imap_error(error, the_id, self.error_report, self.src) #do everything in this handler
nb_emails_processed += 1
#indicate every 50 messages the number of messages left to process
left_emails = (total_nb_emails_to_process - nb_emails_processed)
if (nb_emails_processed % 50) == 0 and (left_emails > 0):
elapsed = timer.elapsed() #elapsed time in seconds
LOG.critical("\n== Processed %d emails in %s. %d left to be stored (time estimate %s).==\n" % \
(nb_emails_processed, \
timer.seconds_to_human_time(elapsed), left_emails, \
timer.estimate_time_left(nb_emails_processed, elapsed, left_emails)))
# save id every 10 restored emails
if (nb_emails_processed % 10) == 0:
if gid:
self.save_lastid(self.OP_EMAIL_SYNC, gid)
else:
LOG.info("Could not process imap with id %s. Ignore it\n")
self.error_report['empty'].append((the_id, gid if gid else None))
to_fetch -= set(new_data.keys()) #remove all found keys from to_fetch set
for the_id in to_fetch:
# case when gmail IMAP server returns OK without any data whatsoever
# eg. imap uid 142221L ignore it
LOG.info("Could not process imap with id %s. Ignore it\n")
self.error_report['empty'].append((the_id, None))
LOG.critical("\nEmails synchronisation operation performed in %s.\n" % (timer.seconds_to_human_time(timer.elapsed())))
return imap_ids
def sync(self, imap_req = imap_utils.GIMAPFetcher.IMAP_ALL, compress_on_disk = True, \
db_cleaning = False, ownership_checking = True, \
restart = False, emails_only = False, chats_only = False):
"""
sync mode
"""
#check ownership to have one email per db unless user wants different
#save the owner if new
self._check_email_db_ownership(ownership_checking)
if not compress_on_disk:
LOG.critical("Disable compression when storing emails.")
if self.use_encryption:
LOG.critical("Encryption activated. All emails will be encrypted before to be stored.")
LOG.critical("Please take care of the encryption key stored in (%s) or all"\
" your stored emails will become unreadable." \
% (gmvault_db.GmailStorer.get_encryption_key_path(self.db_root_dir)))
self.timer.start() #start syncing emails
if not chats_only:
# backup emails
LOG.critical("Start emails synchronization.\n")
self._sync_emails(imap_req, compress = compress_on_disk, restart = restart)
else:
LOG.critical("Skip emails synchronization.\n")
if not emails_only:
# backup chats
LOG.critical("Start chats synchronization.\n")
self._sync_chats(imap_req, compress = compress_on_disk, restart = restart)
else:
LOG.critical("\nSkip chats synchronization.\n")
#delete supress emails from DB since last sync
if len(self.gstorer.get_db_owners()) <= 1:
self.check_clean_db(db_cleaning)
else:
LOG.critical("Deactivate database cleaning on a multi-owners Gmvault db.")
LOG.critical("Synchronisation operation performed in %s.\n" \
% (self.timer.seconds_to_human_time(self.timer.elapsed())))
#update number of reconnections
self.error_report["reconnections"] = self.src.total_nb_reconns
return self.error_report
def _delete_sync(self, imap_ids, db_gmail_ids, db_gmail_ids_info, msg_type):
"""
Delete emails from the database if necessary
imap_ids : all remote imap_ids to check
db_gmail_ids_info : info read from metadata
msg_type : email or chat
"""
# optimize nb of items
nb_items = self.NB_GRP_OF_ITEMS if len(imap_ids) >= self.NB_GRP_OF_ITEMS else len(imap_ids)
LOG.critical("Call Gmail to check the stored %ss against the Gmail %ss ids and see which ones have been deleted.\n\n"\
"This might take a few minutes ...\n" % (msg_type, msg_type))
#calculate the list elements to delete
#query nb_items items in one query to minimise number of imap queries
for group_imap_id in itertools.izip_longest(fillvalue=None, *[iter(imap_ids)]*nb_items):
# if None in list remove it
if None in group_imap_id:
group_imap_id = [ im_id for im_id in group_imap_id if im_id != None ]
data = self.src.fetch(group_imap_id, imap_utils.GIMAPFetcher.GET_GMAIL_ID)
# syntax for 2.7 set comprehension { data[key][imap_utils.GIMAPFetcher.GMAIL_ID] for key in data }
# need to create a list for 2.6
db_gmail_ids.difference_update([data[key][imap_utils.GIMAPFetcher.GMAIL_ID] for key in data ])
if len(db_gmail_ids) == 0:
break
LOG.critical("Will delete %s %s(s) from gmvault db.\n" % (len(db_gmail_ids), msg_type) )
for gm_id in db_gmail_ids:
LOG.critical("gm_id %s not in the Gmail server. Delete it." % (gm_id))
self.gstorer.delete_emails([(gm_id, db_gmail_ids_info[gm_id])], msg_type)
def get_gmails_ids_left_to_sync(self, op_type, imap_ids):
"""
Get the ids that still needs to be sync
Return a list of ids
"""
filename = self.OP_TO_FILENAME.get(op_type, None)
if not filename:
raise Exception("Bad Operation (%s) in save_last_id. "\
"This should not happen, send the error to the software developers." % (op_type))
filepath = '%s/%s_%s' % (self.gstorer.get_info_dir(), self.login, filename)
if not os.path.exists(filepath):
LOG.critical("last_id.sync file %s doesn't exist.\nSync the full list of backed up emails." %(filepath))
return imap_ids
json_obj = json.load(open(filepath, 'r'))
last_id = json_obj['last_id']
last_id_index = -1
new_gmail_ids = imap_ids
try:
#get imap_id from stored gmail_id
dummy = self.src.search({'type':'imap', 'req':'X-GM-MSGID %s' % (last_id)})
imap_id = dummy[0]
last_id_index = imap_ids.index(imap_id)
LOG.critical("Restart from gmail id %s (imap id %s)." % (last_id, imap_id))
new_gmail_ids = imap_ids[last_id_index:]
except Exception, _: #ignore any exception and try to get all ids in case of problems. pylint:disable=W0703
#element not in keys return current set of keys
LOG.critical("Error: Cannot restore from last restore gmail id. It is not in Gmail."\
" Sync the complete list of gmail ids requested from Gmail.")
return new_gmail_ids
def check_clean_db(self, db_cleaning):
"""
Check and clean the database (remove file that are not anymore in Gmail
"""
owners = self.gstorer.get_db_owners()
if not db_cleaning: #decouple the 2 conditions for activating cleaning
LOG.debug("db_cleaning is off so ignore removing deleted emails from disk.")
return
elif len(owners) > 1:
LOG.critical("Gmvault db hosting emails from different accounts: %s.\n"\
"Cannot activate database cleaning." % (", ".join(owners)))
return
else:
LOG.critical("Look for emails/chats that are in the Gmvault db but not in Gmail servers anymore.\n")
#get gmail_ids from db
LOG.critical("Read all gmail ids from the Gmvault db. It might take a bit of time ...\n")
timer = gmvault_utils.Timer() # needed for enhancing the user information
timer.start()
db_gmail_ids_info = self.gstorer.get_all_existing_gmail_ids()
LOG.critical("Found %s email(s) in the Gmvault db.\n" % (len(db_gmail_ids_info)) )
#create a set of keys
db_gmail_ids = set(db_gmail_ids_info.keys())
# get all imap ids in All Mail
self.src.select_folder('ALLMAIL') #go to all mail
imap_ids = self.src.search(imap_utils.GIMAPFetcher.IMAP_ALL) #search all
LOG.debug("Got %s emails imap_id(s) from the Gmail Server." % (len(imap_ids)))
#delete supress emails from DB since last sync
self._delete_sync(imap_ids, db_gmail_ids, db_gmail_ids_info, 'email')
# get all chats ids
if self.src.is_visible('CHATS'):
db_gmail_ids_info = self.gstorer.get_all_chats_gmail_ids()
LOG.critical("Found %s chat(s) in the Gmvault db.\n" % (len(db_gmail_ids_info)) )
self.src.select_folder('CHATS') #go to chats
chat_ids = self.src.search(imap_utils.GIMAPFetcher.IMAP_ALL)
db_chat_ids = set(db_gmail_ids_info.keys())
LOG.debug("Got %s chat imap_ids from the Gmail Server." % (len(chat_ids)))
#delete supress emails from DB since last sync
self._delete_sync(chat_ids, db_chat_ids, db_gmail_ids_info , 'chat')
else:
LOG.critical("Chats IMAP Directory not visible on Gmail. Ignore deletion of chats.")
LOG.critical("\nDeletion checkup done in %s." % (timer.elapsed_human_time()))
def remote_sync(self):
"""
Sync with a remote source (IMAP mirror or cloud storage area)
"""
#sync remotely
pass
def save_lastid(self, op_type, gm_id):
"""
Save the passed gmid in last_id.restore
For the moment reopen the file every time
"""
filename = self.OP_TO_FILENAME.get(op_type, None)
if not filename:
raise Exception("Bad Operation (%s) in save_last_id. "
"This should not happen, send the error to the "
"software developers." % op_type)
#filepath = '%s/%s_%s' % (gmvault_utils.get_home_dir_path(), self.login, filename)
filepath = '%s/%s_%s' % (self.gstorer.get_info_dir(), self.login,
filename)
with open(filepath, 'w') as f:
json.dump({'last_id' : gm_id}, f)
def get_gmails_ids_left_to_restore(self, op_type, db_gmail_ids_info):
"""
Get the ids that still needs to be restored
Return a dict key = gm_id, val = directory
"""
filename = self.OP_TO_FILENAME.get(op_type, None)
if not filename:
raise Exception("Bad Operation (%s) in save_last_id. This should "
"not happen, send the error to the software "
"developers." % op_type)
#filepath = '%s/%s_%s' % (gmvault_utils.get_home_dir_path(), self.login, filename)
filepath = '%s/%s_%s' % (self.gstorer.get_info_dir(), self.login, filename)
if not os.path.exists(filepath):
LOG.critical("last_id restore file %s doesn't exist.\nRestore the full list of backed up emails." %(filepath))
return db_gmail_ids_info
json_obj = json.load(open(filepath, 'r'))
last_id = json_obj['last_id']
last_id_index = -1
try:
keys = db_gmail_ids_info.keys()
last_id_index = keys.index(last_id)
LOG.critical("Restart from gmail id %s." % (last_id))
except ValueError, _:
#element not in keys return current set of keys
LOG.error("Cannot restore from last restore gmail id. It is not in the disk database.")
new_gmail_ids_info = collections_utils.OrderedDict()
if last_id_index != -1:
for key in db_gmail_ids_info.keys()[last_id_index+1:]:
new_gmail_ids_info[key] = db_gmail_ids_info[key]
else:
new_gmail_ids_info = db_gmail_ids_info
return new_gmail_ids_info
def restore(self, pivot_dir = None, extra_labels = [], \
restart = False, emails_only = False, chats_only = False): #pylint:disable=W0102
"""
Restore emails in a gmail account
"""
self.timer.start() #start restoring
#self.src.select_folder('ALLMAIL') #insure that Gmvault is in ALLMAIL
if not chats_only:
# backup emails
LOG.critical("Start emails restoration.\n")
if pivot_dir:
LOG.critical("Quick mode activated. Will only restore all emails since %s.\n" % (pivot_dir))
self.restore_emails(pivot_dir, extra_labels, restart)
else:
LOG.critical("Skip emails restoration.\n")
if not emails_only:
# backup chats
LOG.critical("Start chats restoration.\n")
self.restore_chats(extra_labels, restart)
else:
LOG.critical("Skip chats restoration.\n")
LOG.critical("Restore operation performed in %s.\n" \
% (self.timer.seconds_to_human_time(self.timer.elapsed())))
#update number of reconnections
self.error_report["reconnections"] = self.src.total_nb_reconns
return self.error_report
def restore_chats(self, extra_labels = [], restart = False): #pylint:disable=W0102
"""
restore chats
"""
LOG.critical("Restore chats in gmail account %s." % (self.login) )
LOG.critical("Read chats info from %s gmvault-db." % (self.db_root_dir))
#get gmail_ids from db
db_gmail_ids_info = self.gstorer.get_all_chats_gmail_ids()
LOG.critical("Total number of chats to restore %s." % (len(db_gmail_ids_info.keys())))
if restart:
db_gmail_ids_info = self.get_gmails_ids_left_to_restore(self.OP_CHAT_RESTORE, db_gmail_ids_info)
total_nb_emails_to_restore = len(db_gmail_ids_info)
LOG.critical("Got all chats id left to restore. Still %s chats to do.\n" % (total_nb_emails_to_restore) )
existing_labels = set() #set of existing labels to not call create_gmail_labels all the time
nb_emails_restored = 0 #to count nb of emails restored
labels_to_apply = collections_utils.SetMultimap()
#get all mail folder name
all_mail_name = self.src.get_folder_name("ALLMAIL")
# go to DRAFTS folder because if you are in ALL MAIL when uploading emails it is very slow
folder_def_location = gmvault_utils.get_conf_defaults().get("General", "restore_default_location", "DRAFTS")
self.src.select_folder(folder_def_location)
timer = gmvault_utils.Timer() # local timer for restore emails
timer.start()
nb_items = gmvault_utils.get_conf_defaults().get_int("General", "nb_messages_per_restore_batch", 100)
for group_imap_ids in itertools.izip_longest(fillvalue=None, *[iter(db_gmail_ids_info)]*nb_items):
last_id = group_imap_ids[-1] #will be used to save the last id
#remove all None elements from group_imap_ids
group_imap_ids = itertools.ifilter(lambda x: x != None, group_imap_ids)
labels_to_create = set() #create label set
labels_to_create.update(extra_labels) # add extra labels to applied to all emails
LOG.critical("Processing next batch of %s chats.\n" % (nb_items))
# unbury the metadata for all these emails
for gm_id in group_imap_ids:
email_meta, email_data = self.gstorer.unbury_email(gm_id)
LOG.critical("Pushing chat content with id %s." % (gm_id))
LOG.debug("Subject = %s." % (email_meta[self.gstorer.SUBJECT_K]))
try:
# push data in gmail account and get uids
imap_id = self.src.push_data(all_mail_name, email_data, \
email_meta[self.gstorer.FLAGS_K] , \
email_meta[self.gstorer.INT_DATE_K] )
#labels for this email => real_labels U extra_labels
labels = set(email_meta[self.gstorer.LABELS_K])
# add in the labels_to_create struct
for label in labels:
LOG.debug("label = %s\n" % (label))
labels_to_apply[str(label)] = imap_id
# get list of labels to create (do a union with labels to create)
labels_to_create.update([ label for label in labels if label not in existing_labels])
except Exception, err:
handle_restore_imap_error(err, gm_id, db_gmail_ids_info, self)
#create the non existing labels and update existing labels
if len(labels_to_create) > 0:
LOG.debug("Labels creation tentative for chats ids %s." % (group_imap_ids))
existing_labels = self.src.create_gmail_labels(labels_to_create, existing_labels)
# associate labels with emails
LOG.critical("Applying labels to the current batch of chats.")
try:
LOG.debug("Changing directory. Going into ALLMAIL")
self.src.select_folder('ALLMAIL') #go to ALL MAIL to make STORE usable
for label in labels_to_apply.keys():
self.src.apply_labels_to(labels_to_apply[label], [label])
except Exception, err:
LOG.error("Problem when applying labels %s to the following ids: %s" %(label, labels_to_apply[label]), err)
if isinstance(err, imaplib.IMAP4.abort) and str(err).find("=> Gmvault ssl socket error: EOF") >= 0:
# if this is a Gmvault SSL Socket error ignore labelling and continue the restore
LOG.critical("Ignore labelling")
LOG.critical("Disconnecting and reconnecting to restart cleanly.")
self.src.reconnect() #reconnect
else:
raise err
finally:
self.src.select_folder(folder_def_location) # go back to an empty DIR (Drafts) to be fast
labels_to_apply = collections_utils.SetMultimap() #reset label to apply
nb_emails_restored += nb_items
#indicate every 10 messages the number of messages left to process
left_emails = (total_nb_emails_to_restore - nb_emails_restored)
if (left_emails > 0):
elapsed = timer.elapsed() #elapsed time in seconds
LOG.critical("\n== Processed %d chats in %s. %d left to be restored "\
"(time estimate %s).==\n" % \
(nb_emails_restored, timer.seconds_to_human_time(elapsed), \
left_emails, timer.estimate_time_left(nb_emails_restored, elapsed, left_emails)))
# save id every nb_items restored emails
# add the last treated gm_id
self.save_lastid(self.OP_EMAIL_RESTORE, last_id)
return self.error_report
def restore_emails(self, pivot_dir = None, extra_labels = [], restart = False):
"""
restore emails in a gmail account using batching to group restore
If you are not in "All Mail" Folder, it is extremely fast to push emails.
But it is not possible to reapply labels if you are not in All Mail because the uid which is returned
is dependant on the folder. On the other hand, you can restore labels in batch which would help gaining lots of time.
The idea is to get a batch of 50 emails and push them all in the mailbox one by one and get the uid for each of them.
Then create a dict of labels => uid_list and for each label send a unique store command after having changed dir
"""
LOG.critical("Restore emails in gmail account %s." % (self.login) )
LOG.critical("Read email info from %s gmvault-db." % (self.db_root_dir))
#get gmail_ids from db
db_gmail_ids_info = self.gstorer.get_all_existing_gmail_ids(pivot_dir)
LOG.critical("Total number of elements to restore %s." % (len(db_gmail_ids_info.keys())))
if restart:
db_gmail_ids_info = self.get_gmails_ids_left_to_restore(self.OP_EMAIL_RESTORE, db_gmail_ids_info)
total_nb_emails_to_restore = len(db_gmail_ids_info)
LOG.critical("Got all emails id left to restore. Still %s emails to do.\n" % (total_nb_emails_to_restore) )
existing_labels = set() #set of existing labels to not call create_gmail_labels all the time
nb_emails_restored = 0 #to count nb of emails restored
labels_to_apply = collections_utils.SetMultimap()
#get all mail folder name
all_mail_name = self.src.get_folder_name("ALLMAIL")
# go to DRAFTS folder because if you are in ALL MAIL when uploading emails it is very slow
folder_def_location = gmvault_utils.get_conf_defaults().get("General", "restore_default_location", "DRAFTS")
self.src.select_folder(folder_def_location)
timer = gmvault_utils.Timer() # local timer for restore emails
timer.start()
nb_items = gmvault_utils.get_conf_defaults().get_int("General", "nb_messages_per_restore_batch", 80)
for group_imap_ids in itertools.izip_longest(fillvalue=None, *[iter(db_gmail_ids_info)]*nb_items):
last_id = group_imap_ids[-1] #will be used to save the last id
#remove all None elements from group_imap_ids
group_imap_ids = itertools.ifilter(lambda x: x != None, group_imap_ids)
labels_to_create = set() #create label set
labels_to_create.update(extra_labels) # add extra labels to applied to all emails
LOG.critical("Processing next batch of %s emails.\n" % (nb_items))
# unbury the metadata for all these emails
for gm_id in group_imap_ids:
email_meta, email_data = self.gstorer.unbury_email(gm_id)
LOG.critical("Pushing email body with id %s." % (gm_id))
LOG.debug("Subject = %s." % (email_meta[self.gstorer.SUBJECT_K]))
try:
# push data in gmail account and get uids
imap_id = self.src.push_data(all_mail_name, email_data, \
email_meta[self.gstorer.FLAGS_K] , \
email_meta[self.gstorer.INT_DATE_K] )
#labels for this email => real_labels U extra_labels
labels = set(email_meta[self.gstorer.LABELS_K])
# add in the labels_to_create struct
for label in labels:
if label != "\\Starred":
#LOG.debug("label = %s\n" % (label.encode('utf-8')))
LOG.debug("label = %s\n" % (label))
labels_to_apply[label] = imap_id
# get list of labels to create (do a union with labels to create)
labels_to_create.update([ label for label in labels if label not in existing_labels])
except Exception, err:
handle_restore_imap_error(err, gm_id, db_gmail_ids_info, self)
#create the non existing labels and update existing labels
if len(labels_to_create) > 0:
LOG.debug("Labels creation tentative for emails with ids %s." % (group_imap_ids))
existing_labels = self.src.create_gmail_labels(labels_to_create, existing_labels)
# associate labels with emails
LOG.critical("Applying labels to the current batch of emails.")
try:
LOG.debug("Changing directory. Going into ALLMAIL")
the_timer = gmvault_utils.Timer()
the_timer.start()
self.src.select_folder('ALLMAIL') #go to ALL MAIL to make STORE usable
LOG.debug("Changed dir. Operation time = %s ms" % (the_timer.elapsed_ms()))
for label in labels_to_apply.keys():
self.src.apply_labels_to(labels_to_apply[label], [label])
except Exception, err:
LOG.error("Problem when applying labels %s to the following ids: %s" %(label, labels_to_apply[label]), err)
LOG.error("Problem when applying labels.", err)
if isinstance(err, imaplib.IMAP4.abort) and str(err).find("=> Gmvault ssl socket error: EOF") >= 0:
# if this is a Gmvault SSL Socket error ignore labelling and continue the restore
LOG.critical("Ignore labelling")
LOG.critical("Disconnecting and reconnecting to restart cleanly.")
self.src.reconnect() #reconnect
else:
raise err
finally:
self.src.select_folder(folder_def_location) # go back to an empty DIR (Drafts) to be fast
labels_to_apply = collections_utils.SetMultimap() #reset label to apply
nb_emails_restored += nb_items
#indicate every 10 messages the number of messages left to process
left_emails = (total_nb_emails_to_restore - nb_emails_restored)
if (left_emails > 0):
elapsed = timer.elapsed() #elapsed time in seconds
LOG.critical("\n== Processed %d emails in %s. %d left to be restored "\
"(time estimate %s).==\n" % \
(nb_emails_restored, timer.seconds_to_human_time(elapsed), \
left_emails, timer.estimate_time_left(nb_emails_restored, elapsed, left_emails)))
# save id every 50 restored emails
# add the last treated gm_id
self.save_lastid(self.OP_EMAIL_RESTORE, last_id)
return self.error_report
| 60,133
|
Python
|
.py
| 1,004
| 42.648406
| 130
| 0.574155
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,328
|
.pydevproject
|
gaubert_gmvault/.pydevproject
|
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?>
<pydev_project>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/gmvault/src</path>
</pydev_pathproperty>
</pydev_project>
| 417
|
Python
|
.pyde
| 9
| 45.222222
| 91
| 0.773956
|
gaubert/gmvault
| 3,572
| 285
| 144
|
AGPL-3.0
|
9/5/2024, 5:11:34 PM (Europe/Amsterdam)
|
12,329
|
conftest.py
|
harelba_q/conftest.py
|
#!/usr/bin/env python
# Required so pytest can find files properly
| 70
|
Python
|
.py
| 2
| 32.5
| 44
| 0.784615
|
harelba/q
| 10,180
| 421
| 117
|
GPL-3.0
|
9/5/2024, 5:11:42 PM (Europe/Amsterdam)
|
12,330
|
setup.py
|
harelba_q/setup.py
|
#!/usr/bin/env python
from setuptools import setup
import setuptools
q_version = '3.1.6'
with open("README.markdown", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name='q',
url='https://github.com/harelba/q',
license='LICENSE',
version=q_version,
author='Harel Ben-Attia',
description="Run SQL directly on CSV or TSV files",
long_description=long_description,
long_description_content_type="text/markdown",
author_email='harelba@gmail.com',
install_requires=[
'six==1.11.0'
],
package_dir={"": "bin"},
packages=setuptools.find_packages(where="bin"),
entry_points={
'console_scripts': [
'q = bin.q:run_standalone'
]
}
)
| 744
|
Python
|
.py
| 27
| 22.703704
| 58
| 0.645161
|
harelba/q
| 10,180
| 421
| 117
|
GPL-3.0
|
9/5/2024, 5:11:42 PM (Europe/Amsterdam)
|
12,331
|
test_suite.py
|
harelba_q/test/test_suite.py
|
#!/usr/bin/env python3
#
# test suite for q.
#
# Prefer end-to-end tests, running the actual q command and testing stdout/stderr, and the return code.
# Some utilities are provided for making that easy, see other tests for examples.
#
# Q_EXECUTABLE env var can be used to inject the path of q. This allows full e2e testing of the resulting executable
# instead of just testing the python code.
#
# Tests are compatible with Linux and OSX (path separators, tmp folder, etc.).
from __future__ import print_function
import collections
import functools
import tempfile
import unittest
import random
import json
import uuid
from collections import OrderedDict
from json import JSONEncoder
from subprocess import PIPE, Popen, STDOUT
import sys
import os
import time
from tempfile import NamedTemporaryFile
import locale
import pprint
import six
from six.moves import range
import codecs
import itertools
from gzip import GzipFile
import pytest
import uuid
import sqlite3
import re
import collections
sys.path.append(os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])),'..','bin'))
from bin.q import QTextAsData, QOutput, QOutputPrinter, QInputParams, DataStream, Sqlite3DB
# q uses this encoding as the default output encoding. Some of the tests use it in order to
# make sure that the output is correctly encoded
SYSTEM_ENCODING = locale.getpreferredencoding()
EXAMPLES = os.path.abspath(os.path.join(os.getcwd(), 'examples'))
Q_EXECUTABLE = os.getenv('Q_EXECUTABLE', os.path.abspath('./bin/q.py'))
Q_SKIP_EXECUTABLE_VALIDATION = os.getenv('Q_SKIP_EXECUTABLE_VALIDATION','false')
if not Q_SKIP_EXECUTABLE_VALIDATION == 'true':
Q_EXECUTABLE = os.path.abspath(Q_EXECUTABLE)
if not os.path.exists(Q_EXECUTABLE):
raise Exception("q executable must reside in {}".format(Q_EXECUTABLE))
else:
Q_EXECUTABLE = os.getenv('Q_EXECUTABLE')
# Skip checking of executable (useful for testing that q is in the path)
pass
DEBUG = '-v' in sys.argv
if os.environ.get('Q_DEBUG'):
DEBUG = True
def batch(iterable, n=1):
r = []
l = len(iterable)
for ndx in range(0, l, n):
r += [iterable[ndx:min(ndx + n, l)]]
return r
def partition(pred, iterable):
t1, t2 = itertools.tee(iterable)
return list(itertools.filterfalse(pred, t1)), list(filter(pred, t2))
def run_command(cmd_to_run,env_to_inject=None):
global DEBUG
if DEBUG:
print("CMD: {}".format(cmd_to_run))
if env_to_inject is None:
env_to_inject = os.environ
env = env_to_inject
p = Popen(cmd_to_run, stdout=PIPE, stderr=PIPE, shell=True,env=env)
o, e = p.communicate()
# remove last newline
o = o.rstrip()
e = e.strip()
# split rows
if o != six.b(''):
o = o.split(six.b(os.linesep))
else:
o = []
if e != six.b(''):
e = e.split(six.b(os.linesep))
else:
e = []
res = (p.returncode, o, e)
if DEBUG:
print("RESULT:{}".format(res))
return res
uneven_ls_output = six.b("""drwxr-xr-x 2 root root 4096 Jun 11 2012 /selinux
drwxr-xr-x 2 root root 4096 Apr 19 2013 /mnt
drwxr-xr-x 2 root root 4096 Apr 24 2013 /srv
drwx------ 2 root root 16384 Jun 21 2013 /lost+found
lrwxrwxrwx 1 root root 33 Jun 21 2013 /initrd.img.old -> /boot/initrd.img-3.8.0-19-generic
drwxr-xr-x 2 root root 4096 Jun 21 2013 /cdrom
drwxr-xr-x 3 root root 4096 Jun 21 2013 /home
lrwxrwxrwx 1 root root 29 Jun 21 2013 /vmlinuz -> boot/vmlinuz-3.8.0-19-generic
lrwxrwxrwx 1 root root 32 Jun 21 2013 /initrd.img -> boot/initrd.img-3.8.0-19-generic
""")
find_output = six.b("""8257537 32 drwxrwxrwt 218 root root 28672 Mar 1 11:00 /tmp
8299123 4 drwxrwxr-x 2 harel harel 4096 Feb 27 10:06 /tmp/1628a3fd-b9fe-4dd1-bcdc-7eb869fe7461/supervisor/stormdist/testTopology3fad644a-54c0-4def-b19e-77ca97941595-1-1393513576
8263229 964 -rw-rw-r-- 1 mapred mapred 984569 Feb 27 10:06 /tmp/1628a3fd-b9fe-4dd1-bcdc-7eb869fe7461/supervisor/stormdist/testTopology3fad644a-54c0-4def-b19e-77ca97941595-1-1393513576/stormcode.ser
8263230 4 -rw-rw-r-- 1 harel harel 1223 Feb 27 10:06 /tmp/1628a3fd-b9fe-4dd1-bcdc-7eb869fe7461/supervisor/stormdist/testTopology3fad644a-54c0-4def-b19e-77ca97941595-1-1393513576/stormconf.ser
8299113 4 drwxrwxr-x 2 harel harel 4096 Feb 27 10:16 /tmp/1628a3fd-b9fe-4dd1-bcdc-7eb869fe7461/supervisor/localstate
8263406 4 -rw-rw-r-- 1 harel harel 2002 Feb 27 10:16 /tmp/1628a3fd-b9fe-4dd1-bcdc-7eb869fe7461/supervisor/localstate/1393514168746
8263476 0 -rw-rw-r-- 1 harel harel 0 Feb 27 10:16 /tmp/1628a3fd-b9fe-4dd1-bcdc-7eb869fe7461/supervisor/localstate/1393514168746.version
8263607 0 -rw-rw-r-- 1 harel harel 0 Feb 27 10:16 /tmp/1628a3fd-b9fe-4dd1-bcdc-7eb869fe7461/supervisor/localstate/1393514169735.version
8263533 0 -rw-rw-r-- 1 harel harel 0 Feb 27 10:16 /tmp/1628a3fd-b9fe-4dd1-bcdc-7eb869fe7461/supervisor/localstate/1393514172733.version
8263604 0 -rw-rw-r-- 1 harel harel 0 Feb 27 10:16 /tmp/1628a3fd-b9fe-4dd1-bcdc-7eb869fe7461/supervisor/localstate/1393514175754.version
""")
header_row = six.b('name,value1,value2')
sample_data_rows = [six.b('a,1,0'), six.b('b,2,0'), six.b('c,,0')]
sample_data_rows_with_empty_string = [six.b('a,aaa,0'), six.b('b,bbb,0'), six.b('c,,0')]
sample_data_no_header = six.b("\n").join(sample_data_rows) + six.b("\n")
sample_data_with_empty_string_no_header = six.b("\n").join(
sample_data_rows_with_empty_string) + six.b("\n")
sample_data_with_header = header_row + six.b("\n") + sample_data_no_header
sample_data_with_missing_header_names = six.b("name,value1\n") + sample_data_no_header
def generate_sample_data_with_header(header):
return header + six.b("\n") + sample_data_no_header
sample_quoted_data = six.b('''non_quoted regular_double_quoted double_double_quoted escaped_double_quoted multiline_double_double_quoted multiline_escaped_double_quoted
control-value-1 "control-value-2" control-value-3 "control-value-4" control-value-5 "control-value-6"
non-quoted-value "this is a quoted value" "this is a ""double double"" quoted value" "this is an escaped \\"quoted value\\"" "this is a double double quoted ""multiline
value""." "this is an escaped \\"multiline
value\\"."
control-value-1 "control-value-2" control-value-3 "control-value-4" control-value-5 "control-value-6"
''')
double_double_quoted_data = six.b('''regular_double_quoted double_double_quoted
"this is a quoted value" "this is a quoted value with ""double double quotes"""
''')
escaped_double_quoted_data = six.b('''regular_double_quoted escaped_double_quoted
"this is a quoted value" "this is a quoted value with \\"escaped double quotes\\""
''')
combined_quoted_data = six.b('''regular_double_quoted double_double_quoted escaped_double_quoted
"this is a quoted value" "this is a quoted value with ""double double quotes""" "this is a quoted value with \\"escaped double quotes\\""
''')
sample_quoted_data2 = six.b('"quoted data" 23\nunquoted-data 54')
sample_quoted_data2_with_newline = six.b('"quoted data with\na new line inside it":23\nunquoted-data:54')
one_column_data = six.b('''data without commas 1
data without commas 2
''')
# Values with leading whitespace
sample_data_rows_with_spaces = [six.b('a,1,0'), six.b(' b, 2,0'), six.b('c,,0')]
sample_data_with_spaces_no_header = six.b("\n").join(
sample_data_rows_with_spaces) + six.b("\n")
header_row_with_spaces = six.b('name,value 1,value2')
sample_data_with_spaces_with_header = header_row_with_spaces + \
six.b("\n") + sample_data_with_spaces_no_header
long_value1 = "23683289372328372328373"
int_value = "2328372328373"
sample_data_with_long_values = "%s\n%s\n%s" % (long_value1,int_value,int_value)
def one_column_warning(e):
return e[0].startswith(six.b('Warning: column count is one'))
def sqlite_dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
class AbstractQTestCase(unittest.TestCase):
def create_file_with_data(self, data, encoding=None,prefix=None,suffix=None,use_real_path=True):
if encoding is not None:
raise Exception('Deprecated: Encoding must be none')
tmpfile = NamedTemporaryFile(delete=False,prefix=prefix,suffix=suffix)
tmpfile.write(data)
tmpfile.close()
if use_real_path:
tmpfile.name = os.path.realpath(tmpfile.name)
return tmpfile
def generate_tmpfile_name(self,prefix=None,suffix=None):
tmpfile = NamedTemporaryFile(delete=False,prefix=prefix,suffix=suffix)
os.remove(tmpfile.name)
return os.path.realpath(tmpfile.name)
def arrays_to_csv_file_content(self,delimiter,header_row_list,cell_list):
all_rows = [delimiter.join(row) for row in [header_row_list] + cell_list]
return six.b("\n").join(all_rows)
def create_qsql_file_with_content_and_return_filename(self, header_row,cell_list):
csv_content = self.arrays_to_csv_file_content(six.b(','),header_row,cell_list)
tmpfile = self.create_file_with_data(csv_content)
cmd = '%s -d , -H "select count(*) from %s" -C readwrite' % (Q_EXECUTABLE,tmpfile.name)
r, o, e = run_command(cmd)
self.assertEqual(r, 0)
created_qsql_filename = '%s.qsql' % tmpfile.name
self.assertTrue(os.path.exists(created_qsql_filename))
return created_qsql_filename
def arrays_to_qsql_file_content(self, header_row,cell_list):
csv_content = self.arrays_to_csv_file_content(six.b(','),header_row,cell_list)
tmpfile = self.create_file_with_data(csv_content)
cmd = '%s -d , -H "select count(*) from %s" -C readwrite' % (Q_EXECUTABLE,tmpfile.name)
r, o, e = run_command(cmd)
self.assertEqual(r, 0)
matching_qsql_filename = '%s.qsql' % tmpfile.name
f = open(matching_qsql_filename,'rb')
qsql_file_bytes = f.read()
f.close()
self.assertEqual(matching_qsql_filename,'%s.qsql' % tmpfile.name)
return qsql_file_bytes
def write_file(self,filename,data):
f = open(filename,'wb')
f.write(data)
f.close()
def create_folder_with_files(self,filename_to_content_dict,prefix, suffix):
name = self.random_tmp_filename(prefix,suffix)
os.makedirs(name)
for filename,content in six.iteritems(filename_to_content_dict):
if os.path.sep in filename:
os.makedirs('%s/%s' % (name,os.path.split(filename)[0]))
f = open(os.path.join(name,filename),'wb')
f.write(content)
f.close()
return name
def cleanup_folder(self,tmpfolder):
if not tmpfolder.startswith(os.path.realpath('/var/tmp')):
raise Exception('Guard against accidental folder deletions: %s' % tmpfolder)
global DEBUG
if not DEBUG:
print("should have removed tmpfolder %s. Not doing it for the sake of safety. # TODO re-add" % tmpfolder)
pass # os.remove(tmpfolder)
def cleanup(self, tmpfile):
global DEBUG
if not DEBUG:
os.remove(tmpfile.name)
def random_tmp_filename(self,prefix,postfix):
# TODO Use more robust method for this
path = '/var/tmp'
return os.path.realpath('%s/%s-%s.%s' % (path,prefix,random.randint(0,1000000000),postfix))
def get_sqlite_table_list(c,exclude_qcatalog=True):
if exclude_qcatalog:
r = c.execute("select tbl_name from sqlite_master where type='table' and tbl_name != '_qcatalog'").fetchall()
else:
r = c.execute("select tbl_name from sqlite_master where type='table'").fetchall()
return r
class SaveToSqliteTests(AbstractQTestCase):
# Returns a folder with files and a header in each, one column named 'a'
def generate_files_in_folder(self,batch_size, file_count):
numbers = list(range(1, 1 + batch_size * file_count))
numbers_as_text = batch([str(x) for x in numbers], n=batch_size)
content_list = list(map(six.b, ['a\n' + "\n".join(x) + '\n' for x in numbers_as_text]))
filename_list = list(map(lambda x: 'file-%s' % x, range(file_count)))
d = collections.OrderedDict(zip(filename_list, content_list))
tmpfolder = self.create_folder_with_files(d, 'split-files', 'sqlite-stuff')
return (tmpfolder,filename_list)
# 11074 3.8.2021 10:53 bin/q.py "select count(*) from xxxx/file-95 left join xxxx/file-96 left join xxxx/file-97 left join xxxx/file-97 left join xxxx/file-98 left join xxxx/*" -c 1 -C readwrite -A
# # fails because it takes qsql files as well
def test_save_glob_files_to_sqlite(self):
BATCH_SIZE = 50
FILE_COUNT = 5
tmpfolder,filename_list = self.generate_files_in_folder(BATCH_SIZE,FILE_COUNT)
output_sqlite_file = self.random_tmp_filename("x","sqlite")
cmd = '%s -H "select count(*) from %s/*" -c 1 -S %s' % (Q_EXECUTABLE,tmpfolder,output_sqlite_file)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 4)
c = sqlite3.connect(output_sqlite_file)
results = c.execute('select a from file_dash_0').fetchall()
self.assertEqual(len(results),BATCH_SIZE*FILE_COUNT)
self.assertEqual(sum(map(lambda x:x[0],results)),sum(range(1,BATCH_SIZE*FILE_COUNT+1)))
tables = get_sqlite_table_list(c)
self.assertEqual(len(tables),1)
c.close()
self.cleanup_folder(tmpfolder)
def test_save_multiple_files_to_sqlite(self):
BATCH_SIZE = 50
FILE_COUNT = 5
tmpfolder,filename_list = self.generate_files_in_folder(BATCH_SIZE,FILE_COUNT)
output_sqlite_file = self.random_tmp_filename("x","sqlite")
tables_as_str = " left join ".join(["%s/%s" % (tmpfolder,x) for x in filename_list])
cmd = '%s -H "select count(*) from %s" -c 1 -S %s' % (Q_EXECUTABLE,tables_as_str,output_sqlite_file)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 4)
c = sqlite3.connect(output_sqlite_file)
tables = get_sqlite_table_list(c)
self.assertEqual(len(tables), FILE_COUNT)
for i in range(FILE_COUNT):
results = c.execute('select a from file_dash_%s' % i).fetchall()
self.assertEqual(len(results),BATCH_SIZE)
self.assertEqual(sum(map(lambda x:x[0],results)),sum(range(1+i*BATCH_SIZE,1+(i+1)*BATCH_SIZE)))
c.close()
self.cleanup_folder(tmpfolder)
def test_save_multiple_files_to_sqlite_without_duplicates(self):
BATCH_SIZE = 50
FILE_COUNT = 5
tmpfolder,filename_list = self.generate_files_in_folder(BATCH_SIZE,FILE_COUNT)
output_sqlite_file = self.random_tmp_filename("x","sqlite")
tables_as_str = " left join ".join(["%s/%s" % (tmpfolder,x) for x in filename_list])
# duplicate the left-joins for all the files, so the query will contain each filename twice
tables_as_str = "%s left join %s" % (tables_as_str,tables_as_str)
cmd = '%s -H "select count(*) from %s" -c 1 -S %s' % (Q_EXECUTABLE,tables_as_str,output_sqlite_file)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 4)
c = sqlite3.connect(output_sqlite_file)
tables = get_sqlite_table_list(c)
# total table count should still be FILE_COUNT, even with the duplications
self.assertEqual(len(tables), FILE_COUNT)
for i in range(FILE_COUNT):
results = c.execute('select a from file_dash_%s' % i).fetchall()
self.assertEqual(len(results),BATCH_SIZE)
self.assertEqual(sum(map(lambda x:x[0],results)),sum(range(1+i*BATCH_SIZE,1+(i+1)*BATCH_SIZE)))
c.close()
self.cleanup_folder(tmpfolder)
def test_sqlite_file_is_not_created_if_some_table_does_not_exist(self):
BATCH_SIZE = 50
FILE_COUNT = 5
tmpfolder,filename_list = self.generate_files_in_folder(BATCH_SIZE,FILE_COUNT)
output_sqlite_file = self.random_tmp_filename("x","sqlite")
tables_as_str = " left join ".join(["%s/%s" % (tmpfolder,x) for x in filename_list])
tables_as_str = tables_as_str + ' left join %s/non_existent_table' % (tmpfolder)
cmd = '%s -H "select count(*) from %s" -c 1 -S %s' % (Q_EXECUTABLE,tables_as_str,output_sqlite_file)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 30)
self.assertEqual(len(e), 2)
self.assertEqual(e[0],six.b("Going to save data into a disk database: %s" % output_sqlite_file))
self.assertEqual(e[1],six.b("No files matching '%s/non_existent_table' have been found" % tmpfolder))
self.assertTrue(not os.path.exists(output_sqlite_file))
self.cleanup_folder(tmpfolder)
def test_recurring_glob_and_separate_files_in_same_query_when_writing_to_sqlite(self):
BATCH_SIZE = 50
FILE_COUNT = 5
tmpfolder,filename_list = self.generate_files_in_folder(BATCH_SIZE,FILE_COUNT)
output_sqlite_file = self.random_tmp_filename("x","sqlite")
tables_as_str = " left join ".join(["%s/%s" % (tmpfolder,x) for x in filename_list])
# The same files are left-joined in the query as an additional "left join <folder>/*". This should create an additional table
# in the sqlite file, with all the data in it
cmd = '%s -H "select count(*) from %s left join %s/*" -c 1 -S %s' % (Q_EXECUTABLE,tables_as_str,tmpfolder,output_sqlite_file)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 4)
c = sqlite3.connect(output_sqlite_file)
tables = get_sqlite_table_list(c)
# plus the additional table from the glob
self.assertEqual(len(tables), FILE_COUNT+1)
# check all the per-file tables
for i in range(FILE_COUNT):
results = c.execute('select a from file_dash_%s' % i).fetchall()
self.assertEqual(len(results),BATCH_SIZE)
self.assertEqual(sum(map(lambda x:x[0],results)),sum(range(1+i*BATCH_SIZE,1+(i+1)*BATCH_SIZE)))
# ensure the glob-based table exists, with an _2 added to the name, as the original "file_dash_0" already exists in the sqlite db
results = c.execute('select a from file_dash_0_2').fetchall()
self.assertEqual(len(results),FILE_COUNT*BATCH_SIZE)
self.assertEqual(sum(map(lambda x:x[0],results)),sum(range(1,1+FILE_COUNT*BATCH_SIZE)))
c.close()
self.cleanup_folder(tmpfolder)
def test_empty_sqlite_handling(self):
fn = self.generate_tmpfile_name("empty",".sqlite")
c = sqlite3.connect(fn)
c.execute('create table x (a int)').fetchall()
c.execute('drop table x').fetchall()
c.close()
cmd = '%s "select * from %s"' % (Q_EXECUTABLE,fn)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,88)
self.assertEqual(len(o),0)
self.assertEqual(len(e),1)
self.assertEqual(e[0],six.b('sqlite file %s has no tables' % fn))
def test_storing_to_disk_too_many_qsql_files(self):
BATCH_SIZE = 10
MAX_ATTACHED_DBS = 5
FILE_COUNT = MAX_ATTACHED_DBS + 4
numbers_as_text = batch([str(x) for x in range(1, 1 + BATCH_SIZE * FILE_COUNT)], n=BATCH_SIZE)
content_list = map(six.b, ["\n".join(x) for x in numbers_as_text])
filename_list = list(map(lambda x: 'file-%s' % x, range(FILE_COUNT)))
d = collections.OrderedDict(zip(filename_list, content_list))
tmpfolder = self.create_folder_with_files(d, 'split-files', 'attach-limit')
for fn in filename_list:
cmd = '%s -c 1 "select count(*) from %s/%s" -C readwrite' % (Q_EXECUTABLE,tmpfolder, fn)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
output_sqlite_file = self.generate_tmpfile_name("many-sqlites",".sqlite")
table_refs = list(['select * from %s/%s.qsql' % (tmpfolder,x) for x in filename_list])
table_refs_str = " UNION ALL ".join(table_refs)
# Limit max attached dbs according to the parameter (must be below the hardcoded sqlite limit, which is 10 when having a standard version compiled)
cmd = '%s "select * from (%s)" -S %s --max-attached-sqlite-databases=%s' % (Q_EXECUTABLE,table_refs_str,output_sqlite_file,MAX_ATTACHED_DBS)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(o),0)
self.assertEqual(len(e),4)
c = sqlite3.connect(output_sqlite_file)
tables_results = c.execute("select tbl_name from sqlite_master where type='table'").fetchall()
table_names = list(sorted([x[0] for x in tables_results]))
self.assertEqual(len(table_names),FILE_COUNT)
for i,tn in enumerate(table_names):
self.assertEqual(tn,'file_dash_%s' % i)
table_content = c.execute('select * from %s' % tn).fetchall()
self.assertEqual(len(table_content),BATCH_SIZE)
cmd = '%s "select * from %s:::%s"' % (Q_EXECUTABLE,output_sqlite_file,tn)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),BATCH_SIZE)
self.assertEqual(o,list([six.b(str(x)) for x in range(1 + i*BATCH_SIZE,1+(i+1)*BATCH_SIZE)]))
self.cleanup_folder(tmpfolder)
def test_storing_to_disk_too_many_sqlite_files(self):
# a variation of test_storing_to_disk_too_many_qsql_files, which deletes the qcatalog file from the caches,
# so they'll be just regular sqlite files
BATCH_SIZE = 10
MAX_ATTACHED_DBS = 5
FILE_COUNT = MAX_ATTACHED_DBS + 4
numbers_as_text = batch([str(x) for x in range(1, 1 + BATCH_SIZE * FILE_COUNT)], n=BATCH_SIZE)
content_list = map(six.b, ["\n".join(x) for x in numbers_as_text])
filename_list = list(map(lambda x: 'file-%s' % x, range(FILE_COUNT)))
d = collections.OrderedDict(zip(filename_list, content_list))
tmpfolder = self.create_folder_with_files(d, 'split-files', 'attach-limit')
for fn in filename_list:
cmd = '%s -c 1 "select count(*) from %s/%s" -C readwrite' % (Q_EXECUTABLE,tmpfolder, fn)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
c = sqlite3.connect('%s/%s.qsql' % (tmpfolder,fn))
c.execute('drop table _qcatalog').fetchall()
c.close()
os.rename('%s/%s.qsql' % (tmpfolder,fn),'%s/%s.sqlite' % (tmpfolder,fn))
output_sqlite_file = self.generate_tmpfile_name("many-sqlites",".sqlite")
table_refs = list(['select * from %s/%s.sqlite' % (tmpfolder,x) for x in filename_list])
table_refs_str = " UNION ALL ".join(table_refs)
# Limit max attached dbs according to the parameter (must be below the hardcoded sqlite limit, which is 10 when having a standard version compiled)
cmd = '%s "select * from (%s)" -S %s --max-attached-sqlite-databases=%s' % (Q_EXECUTABLE,table_refs_str,output_sqlite_file,MAX_ATTACHED_DBS)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(o),0)
self.assertEqual(len(e),4)
c = sqlite3.connect(output_sqlite_file)
tables_results = c.execute("select tbl_name from sqlite_master where type='table'").fetchall()
table_names = list(sorted([x[0] for x in tables_results]))
self.assertEqual(len(table_names),FILE_COUNT)
for i,tn in enumerate(table_names):
self.assertEqual(tn,'file_dash_%s' % i)
table_content = c.execute('select * from %s' % tn).fetchall()
self.assertEqual(len(table_content),BATCH_SIZE)
cmd = '%s "select * from %s:::%s"' % (Q_EXECUTABLE,output_sqlite_file,tn)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),BATCH_SIZE)
self.assertEqual(o,list([six.b(str(x)) for x in range(1 + i*BATCH_SIZE,1+(i+1)*BATCH_SIZE)]))
self.cleanup_folder(tmpfolder)
def test_storing_to_disk_too_many_sqlite_files__over_the_sqlite_limit(self):
# a variation of test_storing_to_disk_too_many_sqlite_files, but with a limit above the sqlite hardcoded limit
MAX_ATTACHED_DBS = 20 # standard sqlite limit is 10, so q should throw an error
BATCH_SIZE = 10
FILE_COUNT = MAX_ATTACHED_DBS + 4
numbers_as_text = batch([str(x) for x in range(1, 1 + BATCH_SIZE * FILE_COUNT)], n=BATCH_SIZE)
content_list = map(six.b, ["\n".join(x) for x in numbers_as_text])
filename_list = list(map(lambda x: 'file-%s' % x, range(FILE_COUNT)))
d = collections.OrderedDict(zip(filename_list, content_list))
tmpfolder = self.create_folder_with_files(d, 'split-files', 'attach-limit')
for fn in filename_list:
cmd = '%s -c 1 "select count(*) from %s/%s" -C readwrite' % (Q_EXECUTABLE,tmpfolder, fn)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
c = sqlite3.connect('%s/%s.qsql' % (tmpfolder,fn))
c.execute('drop table _qcatalog').fetchall()
c.close()
os.rename('%s/%s.qsql' % (tmpfolder,fn),'%s/%s.sqlite' % (tmpfolder,fn))
output_sqlite_file = self.generate_tmpfile_name("many-sqlites",".sqlite")
table_refs = list(['select * from %s/%s.sqlite' % (tmpfolder,x) for x in filename_list])
table_refs_str = " UNION ALL ".join(table_refs)
# Limit max attached dbs according to the parameter (must be below the hardcoded sqlite limit, which is 10 when having a standard version compiled)
cmd = '%s "select * from (%s)" -S %s --max-attached-sqlite-databases=%s' % (Q_EXECUTABLE,table_refs_str,output_sqlite_file,MAX_ATTACHED_DBS)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,89)
self.assertEqual(len(o),0)
self.assertEqual(len(e),2)
self.assertTrue(e[0].startswith(six.b('Going to save data into')))
self.assertTrue(e[1].startswith(six.b('There are too many attached databases. Use a proper --max-attached-sqlite-databases parameter which is below the maximum')))
self.cleanup_folder(tmpfolder)
def test_qtable_name_normalization__starting_with_a_digit(self):
numbers = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 101)]
header = [six.b('aa'), six.b('bb'), six.b('cc')]
base_filename_with_digits = '010'
new_tmp_folder = self.create_folder_with_files({
base_filename_with_digits : self.arrays_to_csv_file_content(six.b(','),header,numbers)
},prefix='xx',suffix='digits')
effective_filename = '%s/010' % new_tmp_folder
output_sqlite_filename = self.generate_tmpfile_name("starting-with-digit",".sqlite")
cmd = '%s -d , -H "select count(aa),count(bb),count(cc) from %s" -S %s' % (Q_EXECUTABLE,effective_filename,output_sqlite_filename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(o),0)
self.assertEqual(len(e),4)
c = sqlite3.connect(output_sqlite_filename)
results = c.execute('select aa,bb,cc from t_%s' % base_filename_with_digits).fetchall()
self.assertEqual(results,list([(x,x,x) for x in range(1,101)]))
c.close()
self.cleanup_folder(new_tmp_folder)
def test_qtable_name_normalization(self):
x = [six.b(a) for a in map(str, range(1, 101))]
large_file_data = six.b("val\n") + six.b("\n").join(x)
tmpfile = self.create_file_with_data(large_file_data)
tmpfile_folder = os.path.dirname(tmpfile.name)
tmpfile_basename = os.path.basename(tmpfile.name)
cmd = 'cd %s && %s -c 1 -H -D , -O "select a.val,b.val from %s a cross join ./%s b on (a.val = b.val * 2)"' % (tmpfile_folder,Q_EXECUTABLE,tmpfile_basename,tmpfile_basename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 51)
evens = list(filter(lambda x: x%2 == 0,range(1,101)))
expected_result_rows = [six.b('val,val')] + [six.b('%d,%d' % (x,x / 2)) for x in evens]
self.assertEqual(o,expected_result_rows)
def test_qtable_name_normalization2(self):
cmd = '%s "select * from"' % Q_EXECUTABLE
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 118)
self.assertEqual(len(e), 1)
self.assertEqual(e[0],six.b('FROM/JOIN is missing a table name after it'))
def test_qtable_name_normalization3(self):
# with a space after the from
cmd = '%s "select * from "' % Q_EXECUTABLE
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 118)
self.assertEqual(len(e), 1)
self.assertEqual(e[0],six.b('FROM/JOIN is missing a table name after it'))
def test_save_multiple_files_to_sqlite_while_caching_them(self):
BATCH_SIZE = 50
FILE_COUNT = 5
tmpfolder,filename_list = self.generate_files_in_folder(BATCH_SIZE,FILE_COUNT)
output_sqlite_file = self.random_tmp_filename("x","sqlite")
tables_as_str = " left join ".join(["%s/%s" % (tmpfolder,x) for x in filename_list])
cmd = '%s -H "select count(*) from %s" -c 1 -S %s -C readwrite' % (Q_EXECUTABLE,tables_as_str,output_sqlite_file)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 4)
c = sqlite3.connect(output_sqlite_file)
tables = get_sqlite_table_list(c)
self.assertEqual(len(tables), FILE_COUNT)
for i,filename in enumerate(filename_list):
matching_table_name = 'file_dash_%s' % i
results = c.execute('select a from %s' % matching_table_name).fetchall()
self.assertEqual(len(results),BATCH_SIZE)
self.assertEqual(sum(map(lambda x:x[0],results)),sum(range(1+i*BATCH_SIZE,1+(i+1)*BATCH_SIZE)))
# check actual resulting qsql file for the file
cmd = '%s -c 1 -H "select a from %s/%s"' % (Q_EXECUTABLE,tmpfolder,filename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), BATCH_SIZE)
self.assertEqual(sum(map(int,o)),sum(range(1+i*BATCH_SIZE,1+(i+1)*BATCH_SIZE)))
self.assertEqual(len(e), 0)
# check analysis returns proper file-with-unused-qsql for each file, since by default `-C none` which means don't read the cache
# even if it exists
cmd = '%s -c 1 -H "select a from %s/%s" -A' % (Q_EXECUTABLE,tmpfolder,filename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 5)
self.assertEqual(o,[
six.b('Table: %s/file-%s' % (tmpfolder,i)),
six.b(' Sources:'),
six.b(' source_type: file-with-unused-qsql source: %s/file-%s' % (tmpfolder,i)),
six.b(' Fields:'),
six.b(' `a` - int')
])
cmd = '%s -c 1 -H "select a from %s/%s" -A -C read' % (Q_EXECUTABLE,tmpfolder,filename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 5)
self.assertEqual(o,[
six.b('Table: %s/file-%s' % (tmpfolder,i)),
six.b(' Sources:'),
six.b(' source_type: qsql-file-with-original source: %s/file-%s.qsql' % (tmpfolder,i)),
six.b(' Fields:'),
six.b(' `a` - int')
])
# check qsql file is readable directly through q
cmd = '%s -c 1 -H "select a from %s/%s.qsql"' % (Q_EXECUTABLE,tmpfolder,filename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), BATCH_SIZE)
self.assertEqual(sum(map(int,o)),sum(range(1+i*BATCH_SIZE,1+(i+1)*BATCH_SIZE)))
self.assertEqual(len(e), 0)
# check analysis returns proper qsql-with-original for each file when running directly against the qsql file
cmd = '%s -c 1 -H "select a from %s/%s.qsql" -A' % (Q_EXECUTABLE,tmpfolder,filename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 5)
self.assertEqual(o,[
six.b('Table: %s/file-%s.qsql' % (tmpfolder,i)),
six.b(' Sources:'),
six.b(' source_type: qsql-file source: %s/file-%s.qsql' % (tmpfolder,i)),
six.b(' Fields:'),
six.b(' `a` - int')
])
c.close()
import glob
filename_list_with_qsql = list(map(lambda x: x+'.qsql',filename_list))
files_in_folder = glob.glob('%s/*' % tmpfolder)
regular_files,qsql_files = partition(lambda x: x.endswith('.qsql'),files_in_folder)
self.assertEqual(len(files_in_folder),2*FILE_COUNT)
self.assertEqual(sorted(list(map(os.path.basename,regular_files))),sorted(list(map(os.path.basename,filename_list))))
self.assertEqual(sorted(list(map(os.path.basename,qsql_files))),sorted(list(map(os.path.basename,filename_list_with_qsql))))
self.cleanup_folder(tmpfolder)
def test_globs_ignore_matching_qsql_files(self):
BATCH_SIZE = 10
FILE_COUNT = 5
tmpfolder,filename_list = self.generate_files_in_folder(BATCH_SIZE,FILE_COUNT)
tables_as_str = " left join ".join(["%s/%s" % (tmpfolder,x) for x in filename_list])
cmd = '%s -H "select count(*) from %s" -c 1 -C readwrite' % (Q_EXECUTABLE,tables_as_str)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b(str(pow(BATCH_SIZE,FILE_COUNT))))
cmd = '%s -H "select a from %s/*" -c 1 -C read' % (Q_EXECUTABLE,tmpfolder)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), BATCH_SIZE*FILE_COUNT)
self.assertEqual(len(e), 0)
self.assertEqual(sum(map(int,o)),sum(range(1,1+BATCH_SIZE*FILE_COUNT)))
self.cleanup_folder(tmpfolder)
def test_error_on_reading_from_multi_table_sqlite_without_explicit_table_name(self):
BATCH_SIZE = 50
FILE_COUNT = 5
tmpfolder,filename_list = self.generate_files_in_folder(BATCH_SIZE,FILE_COUNT)
output_sqlite_file = self.random_tmp_filename("x","sqlite")
tables_as_str = " left join ".join(["%s/%s" % (tmpfolder,x) for x in filename_list])
cmd = '%s -H "select count(*) from %s" -c 1 -S %s' % (Q_EXECUTABLE,tables_as_str,output_sqlite_file)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 4)
cmd = '%s -H "select count(*) from %s"' % (Q_EXECUTABLE,output_sqlite_file)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 87)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertEqual(e[0],six.b("Could not autodetect table name in sqlite file %s . Existing tables: file_dash_0,file_dash_1,file_dash_2,file_dash_3,file_dash_4" % output_sqlite_file))
self.cleanup_folder(tmpfolder)
def test_error_on_trying_to_specify_an_explicit_non_existent_qsql_file(self):
cmd = '%s -H "select count(*) from /non-existent-folder/non-existent.qsql:::mytable"' % (Q_EXECUTABLE)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 30)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertEqual(e[0],six.b("Could not find file /non-existent-folder/non-existent.qsql"))
def test_error_on_providing_a_non_qsql_file_when_specifying_an_explicit_table(self):
data = six.b("\x1f\x8b\x08\x00\tZ\x0ea\x00\x03\xed\x93\xdd\n\xc20\x0cF\xf3(}\x01ij\x93\xf6y:\xd9P\x10)\xb3\xbe\xbf\x9d\x1d\xbbQ\xc6\x06F\x10rn\xbe\x9b\xd0\xfc\x1c\x9a-\x88\x83\x88\x91\xd9\xbc2\xb4\xc4#\xb5\x9c1\x8e\x1czb\x8a\xd1\x19t\xdeS\x00\xc3\xf2\xa3\x01<\xee%\x8du\x94s\x1a\xfbk\xd7\xdf\x0e\xa9\x94Kz\xaf\xabe\xc3\xb0\xf2\xce\xbc\xc7\x92\x7fB\xb6\x1fv\xfd2\xf5\x1e\x81h\xa3\xff\x10'\xff\x8c\x04\x06\xc5'\x03\xf5oO\xe2=v\xf9o\xff\x9f\xd1\xa9\xff_\x90m'\xdec\x9f\x7f\x9c\xfc\xd7T\xff\x8a\xa2(\x92<\x01WY\x0c\x06\x00\x0c\x00\x00")
tmpfilename = self.random_tmp_filename('xx','yy')
f = open(tmpfilename,'wb')
f.write(data)
f.close()
cmd = '%s -H "select count(*) from %s:::mytable1"' % (Q_EXECUTABLE,tmpfilename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 95)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertEqual(e[0],six.b("Cannot detect the type of table %s:::mytable1" % tmpfilename))
def test_error_on_providing_a_non_qsql_file_when_not_specifying_an_explicit_table(self):
data = six.b("\x1f\x8b\x08\x00\tZ\x0ea\x00\x03\xed\x93\xdd\n\xc20\x0cF\xf3(}\x01ij\x93\xf6y:\xd9P\x10)\xb3\xbe\xbf\x9d\x1d\xbbQ\xc6\x06F\x10rn\xbe\x9b\xd0\xfc\x1c\x9a-\x88\x83\x88\x91\xd9\xbc2\xb4\xc4#\xb5\x9c1\x8e\x1czb\x8a\xd1\x19t\xdeS\x00\xc3\xf2\xa3\x01<\xee%\x8du\x94s\x1a\xfbk\xd7\xdf\x0e\xa9\x94Kz\xaf\xabe\xc3\xb0\xf2\xce\xbc\xc7\x92\x7fB\xb6\x1fv\xfd2\xf5\x1e\x81h\xa3\xff\x10'\xff\x8c\x04\x06\xc5'\x03\xf5oO\xe2=v\xf9o\xff\x9f\xd1\xa9\xff_\x90m'\xdec\x9f\x7f\x9c\xfc\xd7T\xff\x8a\xa2(\x92<\x01WY\x0c\x06\x00\x0c\x00\x00")
tmpfilename = self.random_tmp_filename('xx','yy')
f = open(tmpfilename,'wb')
f.write(data)
f.close()
cmd = '%s -H "select count(*) from %s"' % (Q_EXECUTABLE,tmpfilename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 59)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertTrue(e[0].startswith(six.b("Could not parse the input. Please make sure to set the proper -w input-wrapping parameter for your input, and that you use the proper input encoding (-e). Error:")))
class OldSaveDbToDiskTests(AbstractQTestCase):
def test_join_with_stdin_and_save(self):
x = [six.b(a) for a in map(str,range(1,101))]
large_file_data = six.b("val\n") + six.b("\n").join(x)
tmpfile = self.create_file_with_data(large_file_data)
tmpfile_expected_table_name = os.path.basename(tmpfile.name)
disk_db_filename = self.random_tmp_filename('save-to-db','sqlite')
cmd = '(echo id ; seq 1 2 10) | ' + Q_EXECUTABLE + ' -c 1 -H -O "select stdin.*,f.* from - stdin left join %s f on (stdin.id * 10 = f.val)" -S %s' % \
(tmpfile.name,disk_db_filename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 4)
self.assertEqual(e[0],six.b('Going to save data into a disk database: %s' % disk_db_filename))
self.assertTrue(e[1].startswith(six.b('Data has been saved into %s . Saving has taken ' % disk_db_filename)))
self.assertEqual(e[2],six.b('Query to run on the database: select stdin.*,f.* from data_stream_stdin stdin left join %s f on (stdin.id * 10 = f.val);' % \
tmpfile_expected_table_name))
self.assertEqual(e[3],six.b('You can run the query directly from the command line using the following command: echo "select stdin.*,f.* from data_stream_stdin stdin left join %s f on (stdin.id * 10 = f.val)" | sqlite3 %s' %
(tmpfile_expected_table_name,disk_db_filename)))
P = re.compile(six.b("^Query to run on the database: (?P<query_to_run_on_db>.*)$"))
m = P.search(e[2])
query_to_run_on_db = m.groupdict()['query_to_run_on_db']
self.assertTrue(os.path.exists(disk_db_filename))
# validate disk db content natively
c = sqlite3.connect(disk_db_filename)
c.row_factory = sqlite_dict_factory
t0_results = c.execute('select * from data_stream_stdin').fetchall()
self.assertEqual(len(t0_results),5)
self.assertEqual(sorted(list(t0_results[0].keys())), ['id'])
self.assertEqual(list(map(lambda x:x['id'],t0_results)),[1,3,5,7,9])
t1_results = c.execute('select * from %s' % tmpfile_expected_table_name).fetchall()
self.assertEqual(len(t1_results),100)
self.assertEqual(sorted(list(t1_results[0].keys())), ['val'])
self.assertEqual("\n".join(list(map(lambda x:str(x['val']),t1_results))),"\n".join(map(str,range(1,101))))
query_results = c.execute(query_to_run_on_db.decode('utf-8')).fetchall()
self.assertEqual(query_results[0],{ 'id': 1 , 'val': 10})
self.assertEqual(query_results[1],{ 'id': 3 , 'val': 30})
self.assertEqual(query_results[2],{ 'id': 5 , 'val': 50})
self.assertEqual(query_results[3],{ 'id': 7 , 'val': 70})
self.assertEqual(query_results[4],{ 'id': 9 , 'val': 90})
self.cleanup(tmpfile)
def test_join_with_qsql_file(self):
numbers1 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 10001)]
numbers2 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 11)]
header = [six.b('aa'), six.b('bb'), six.b('cc')]
new_tmp_folder = self.create_folder_with_files({
'some_csv_file': self.arrays_to_csv_file_content(six.b(','),header,numbers1),
'some_qsql_database.qsql' : self.arrays_to_qsql_file_content(header,numbers2)
},prefix='xx',suffix='yy')
effective_filename1 = '%s/some_csv_file' % new_tmp_folder
effective_filename2 = '%s/some_qsql_database.qsql' % new_tmp_folder
cmd = Q_EXECUTABLE + ' -d , -H "select sum(large_file.aa),sum(small_file.aa) from %s large_file left join %s small_file on (small_file.aa == large_file.bb)"' % \
(effective_filename1,effective_filename2)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(o),1)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('50005000,55'))
# TODO RLRL Check if needed anymore
# def test_creation_of_qsql_database(self):
# numbers = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 11)]
# header = [six.b('aa'), six.b('bb'), six.b('cc')]
#
# qsql_filename = self.create_qsql_file_with_content_and_return_filename(header,numbers)
#
# conn = sqlite3.connect(qsql_filename)
# qcatalog = conn.execute('select temp_table_name,source_type,source from _qcatalog').fetchall()
# print(qcatalog)
#
# cmd = '%s "select count(*) from %s" -A' % (Q_EXECUTABLE,qsql_filename)
# retcode, o, e = run_command(cmd)
# print(o)
def test_join_with_qsql_file_and_save(self):
numbers1 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 10001)]
numbers2 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 11)]
header = [six.b('aa'), six.b('bb'), six.b('cc')]
saved_qsql_with_multiple_tables = self.generate_tmpfile_name(suffix='.qsql')
new_tmp_folder = self.create_folder_with_files({
'some_csv_file': self.arrays_to_csv_file_content(six.b(','),header,numbers1),
'some_qsql_database' : self.arrays_to_csv_file_content(six.b(','),header,numbers2)
},prefix='xx',suffix='yy')
cmd = '%s -d , -H "select count(*) from %s/some_qsql_database" -C readwrite' % (Q_EXECUTABLE,new_tmp_folder)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
os.remove('%s/some_qsql_database' % new_tmp_folder)
effective_filename1 = '%s/some_csv_file' % new_tmp_folder
effective_filename2 = '%s/some_qsql_database.qsql' % new_tmp_folder
cmd = Q_EXECUTABLE + ' -d , -H "select sum(large_file.aa),sum(small_file.aa) from %s large_file left join %s small_file on (small_file.aa == large_file.bb)" -S %s' % \
(effective_filename1,effective_filename2,saved_qsql_with_multiple_tables)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
conn = sqlite3.connect(saved_qsql_with_multiple_tables)
c1 = conn.execute('select count(*) from some_csv_file').fetchall()
c2 = conn.execute('select count(*) from some_qsql_database').fetchall()
self.assertEqual(c1[0][0],10000)
self.assertEqual(c2[0][0],10)
def test_saving_to_db_with_same_basename_files(self):
numbers1 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 10001)]
numbers2 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 11)]
header = [six.b('aa'), six.b('bb'), six.b('cc')]
qsql_with_multiple_tables = self.generate_tmpfile_name(suffix='.qsql')
new_tmp_folder = self.create_folder_with_files({
'filename1': self.arrays_to_csv_file_content(six.b(','),header,numbers1),
'otherfolder/filename1' : self.arrays_to_csv_file_content(six.b(','),header,numbers2)
},prefix='xx',suffix='yy')
effective_filename1 = '%s/filename1' % new_tmp_folder
effective_filename2 = '%s/otherfolder/filename1' % new_tmp_folder
expected_stored_table_name1 = 'filename1'
expected_stored_table_name2 = 'filename1_2'
cmd = Q_EXECUTABLE + ' -d , -H "select sum(large_file.aa),sum(large_file.bb),sum(large_file.cc) from %s small_file left join %s large_file on (large_file.aa == small_file.bb)" -S %s' % \
(effective_filename1,effective_filename2,qsql_with_multiple_tables)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 4)
self.assertEqual(e[0], six.b('Going to save data into a disk database: %s' % qsql_with_multiple_tables))
self.assertTrue(e[1].startswith(six.b('Data has been saved into %s . Saving has taken' % qsql_with_multiple_tables)))
self.assertEqual(e[2],six.b('Query to run on the database: select sum(large_file.aa),sum(large_file.bb),sum(large_file.cc) from %s small_file left join %s large_file on (large_file.aa == small_file.bb);' % \
(expected_stored_table_name1,expected_stored_table_name2)))
self.assertEqual(e[3],six.b('You can run the query directly from the command line using the following command: echo "select sum(large_file.aa),sum(large_file.bb),sum(large_file.cc) from %s small_file left join %s large_file on (large_file.aa == small_file.bb)" | sqlite3 %s' % \
(expected_stored_table_name1,expected_stored_table_name2,qsql_with_multiple_tables)))
#self.assertTrue(False) # pxpx - need to actually test reading from the saved db file
conn = sqlite3.connect(qsql_with_multiple_tables)
c1 = conn.execute('select count(*) from filename1').fetchall()
c2 = conn.execute('select count(*) from filename1_2').fetchall()
self.assertEqual(c1[0][0],10000)
self.assertEqual(c2[0][0],10)
def test_error_when_not_specifying_table_name_in_multi_table_qsql(self):
numbers1 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 10001)]
numbers2 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 11)]
header = [six.b('aa'), six.b('bb'), six.b('cc')]
qsql_with_multiple_tables = self.generate_tmpfile_name(suffix='.qsql')
new_tmp_folder = self.create_folder_with_files({
'filename1': self.arrays_to_csv_file_content(six.b(','),header,numbers1),
'otherfolder/filename1' : self.arrays_to_csv_file_content(six.b(','),header,numbers2)
},prefix='xx',suffix='yy')
effective_filename1 = '%s/filename1' % new_tmp_folder
effective_filename2 = '%s/otherfolder/filename1' % new_tmp_folder
expected_stored_table_name1 = 'filename1'
expected_stored_table_name2 = 'filename1_2'
cmd = Q_EXECUTABLE + ' -d , -H "select sum(large_file.aa),sum(large_file.bb),sum(large_file.cc) from %s small_file left join %s large_file on (large_file.aa == small_file.bb)" -S %s' % \
(effective_filename1,effective_filename2,qsql_with_multiple_tables)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 4)
# Actual tests
cmd = '%s "select count(*) from %s"' % (Q_EXECUTABLE,qsql_with_multiple_tables)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 87)
self.assertEqual(len(o),0)
self.assertEqual(len(e),1)
self.assertEqual(e[0],six.b('Could not autodetect table name in sqlite file %s . Existing tables: %s,%s' % (qsql_with_multiple_tables,expected_stored_table_name1,expected_stored_table_name2)))
def test_error_when_not_specifying_table_name_in_multi_table_sqlite(self):
sqlite_with_multiple_tables = self.generate_tmpfile_name(suffix='.sqlite')
c = sqlite3.connect(sqlite_with_multiple_tables)
c.execute('create table my_table_1 (x int, y int)').fetchall()
c.execute('create table my_table_2 (x int, y int)').fetchall()
c.close()
cmd = '%s "select count(*) from %s"' % (Q_EXECUTABLE,sqlite_with_multiple_tables)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 87)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
print(e[0])
self.assertEqual(e[0],six.b('Could not autodetect table name in sqlite file %s . Existing tables: my_table_1,my_table_2' % sqlite_with_multiple_tables))
def test_querying_from_multi_table_sqlite_using_explicit_table_name(self):
sqlite_with_multiple_tables = self.generate_tmpfile_name(suffix='.sqlite')
c = sqlite3.connect(sqlite_with_multiple_tables)
c.execute('create table my_table_1 (x int, y int)').fetchall()
c.execute('insert into my_table_1 (x,y) values (100,200),(300,400)').fetchall()
c.execute('commit').fetchall()
c.execute('create table my_table_2 (x int, y int)').fetchall()
c.close()
cmd = '%s -d , "select * from %s:::my_table_1"' % (Q_EXECUTABLE,sqlite_with_multiple_tables)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 2)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('100,200'))
self.assertEqual(o[1],six.b('300,400'))
# Check again, this time with a different output delimiter and with explicit column names
cmd = '%s -t "select x,y from %s:::my_table_1"' % (Q_EXECUTABLE,sqlite_with_multiple_tables)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 2)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('100\t200'))
self.assertEqual(o[1],six.b('300\t400'))
def test_error_when_specifying_nonexistent_table_name_in_multi_table_qsql(self):
numbers1 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 10001)]
numbers2 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 11)]
header = [six.b('aa'), six.b('bb'), six.b('cc')]
qsql_with_multiple_tables = self.generate_tmpfile_name(suffix='.qsql')
new_tmp_folder = self.create_folder_with_files({
'filename1': self.arrays_to_csv_file_content(six.b(','),header,numbers1),
'otherfolder/filename1' : self.arrays_to_csv_file_content(six.b(','),header,numbers2)
},prefix='xx',suffix='yy')
effective_filename1 = '%s/filename1' % new_tmp_folder
effective_filename2 = '%s/otherfolder/filename1' % new_tmp_folder
expected_stored_table_name1 = 'filename1'
expected_stored_table_name2 = 'filename1_2'
cmd = Q_EXECUTABLE + ' -d , -H "select sum(large_file.aa),sum(large_file.bb),sum(large_file.cc) from %s small_file left join %s large_file on (large_file.aa == small_file.bb)" -S %s' % \
(effective_filename1,effective_filename2,qsql_with_multiple_tables)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 4)
# Actual tests
cmd = '%s "select count(*) from %s:::non_existent_table"' % (Q_EXECUTABLE,qsql_with_multiple_tables)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 85)
self.assertEqual(len(o),0)
self.assertEqual(len(e),1)
self.assertEqual(e[0],six.b('Table non_existent_table could not be found in sqlite file %s . Existing table names: %s,%s' % \
(qsql_with_multiple_tables,expected_stored_table_name1,expected_stored_table_name2)))
def test_querying_multi_table_qsql_file(self):
numbers1 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 10001)]
numbers2 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 11)]
header = [six.b('aa'), six.b('bb'), six.b('cc')]
qsql_with_multiple_tables = self.generate_tmpfile_name(suffix='.qsql')
new_tmp_folder = self.create_folder_with_files({
'filename1': self.arrays_to_csv_file_content(six.b(','),header,numbers1),
'otherfolder/filename1' : self.arrays_to_csv_file_content(six.b(','),header,numbers2)
},prefix='xx',suffix='yy')
effective_filename1 = '%s/filename1' % new_tmp_folder
effective_filename2 = '%s/otherfolder/filename1' % new_tmp_folder
expected_stored_table_name1 = 'filename1'
expected_stored_table_name2 = 'filename1_2'
cmd = Q_EXECUTABLE + ' -d , -H "select sum(large_file.aa),sum(large_file.bb),sum(large_file.cc) from %s small_file left join %s large_file on (large_file.aa == small_file.bb)" -S %s' % \
(effective_filename1,effective_filename2,qsql_with_multiple_tables)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 4)
# Actual tests
cmd = '%s "select count(*) from %s:::%s"' % (Q_EXECUTABLE,qsql_with_multiple_tables,expected_stored_table_name1)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),1)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('10000'))
cmd = '%s "select count(*) from %s:::%s"' % (Q_EXECUTABLE,qsql_with_multiple_tables,expected_stored_table_name2)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),1)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('10'))
def test_preventing_db_overwrite(self):
db_filename = self.random_tmp_filename('store-to-disk', 'db')
self.assertFalse(os.path.exists(db_filename))
retcode, o, e = run_command('seq 1 1000 | ' + Q_EXECUTABLE + ' "select count(*) from -" -c 1 -S %s' % db_filename)
self.assertTrue(retcode == 0)
self.assertTrue(os.path.exists(db_filename))
retcode2, o2, e2 = run_command('seq 1 1000 | ' + Q_EXECUTABLE + ' "select count(*) from -" -c 1 -S %s' % db_filename)
self.assertTrue(retcode2 != 0)
self.assertTrue(e2[0].startswith(six.b('Going to save data into a disk database')))
self.assertTrue(e2[1] == six.b('Disk database file {} already exists.'.format(db_filename)))
os.remove(db_filename)
class BasicTests(AbstractQTestCase):
def test_basic_aggregation(self):
retcode, o, e = run_command(
'seq 1 10 | ' + Q_EXECUTABLE + ' "select sum(c1),avg(c1) from -"')
self.assertTrue(retcode == 0)
self.assertTrue(len(o) == 1)
self.assertTrue(len(e) == 0)
s = sum(range(1, 11))
self.assertTrue(o[0] == six.b('%s %s' % (s, s / 10.0)))
def test_select_one_column(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , "select c1 from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 0)
self.assertEqual(six.b(" ").join(o), six.b('a b c'))
self.cleanup(tmpfile)
def test_column_separation(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , "select c1,c2,c3 from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], sample_data_rows[0])
self.assertEqual(o[1], sample_data_rows[1])
self.assertEqual(o[2], sample_data_rows[2])
self.cleanup(tmpfile)
def test_header_exception_on_numeric_header_data(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , "select * from %s" -A -H' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertNotEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 3)
self.assertTrue(
six.b('Bad header row: Header must contain only strings') in e[0])
self.assertTrue(six.b("Column name must be a string") in e[1])
self.assertTrue(six.b("Column name must be a string") in e[2])
self.cleanup(tmpfile)
def test_different_header_in_second_file(self):
folder_name = self.create_folder_with_files({
'file1': self.arrays_to_csv_file_content(six.b(','),[six.b('a'),six.b('b')],[[six.b(str(x)),six.b(str(x))] for x in range(1,6)]),
'file2': self.arrays_to_csv_file_content(six.b(','),[six.b('c'),six.b('d')],[[six.b(str(x)),six.b(str(x))] for x in range(1,6)])
},prefix="xx",suffix="aa")
cmd = Q_EXECUTABLE + ' -d , "select * from %s/*" -H' % (folder_name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 35)
self.assertEqual(len(e),1)
self.assertEqual(e[0],six.b("Bad header row: Extra header 'c,d' in file '%s/file2' mismatches original header 'a,b' from file '%s/file1'. Table name is '%s/*'" % (folder_name,folder_name,folder_name)))
def test_data_with_header(self):
tmpfile = self.create_file_with_data(sample_data_with_header)
cmd = Q_EXECUTABLE + ' -d , "select name from %s" -H' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(six.b(" ").join(o), six.b("a b c"))
self.cleanup(tmpfile)
def test_output_header_when_input_header_exists(self):
tmpfile = self.create_file_with_data(sample_data_with_header)
cmd = Q_EXECUTABLE + ' -d , "select name from %s" -H -O' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 4)
self.assertEqual(o[0],six.b('name'))
self.assertEqual(o[1],six.b('a'))
self.assertEqual(o[2],six.b('b'))
self.assertEqual(o[3],six.b('c'))
self.cleanup(tmpfile)
def test_generated_column_name_warning_when_header_line_exists(self):
tmpfile = self.create_file_with_data(sample_data_with_header)
cmd = Q_EXECUTABLE + ' -d , "select c3 from %s" -H' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertNotEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 2)
self.assertTrue(six.b('no such column: c3') in e[0])
self.assertTrue(
e[1].startswith(six.b('Warning - There seems to be a "no such column" error, and -H (header line) exists. Please make sure that you are using the column names from the header line and not the default (cXX) column names')))
self.cleanup(tmpfile)
def test_empty_data(self):
tmpfile = self.create_file_with_data(six.b(''))
cmd = Q_EXECUTABLE + ' -d , "select * from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertTrue(six.b('Warning - data is empty') in e[0])
self.cleanup(tmpfile)
def test_empty_data_with_header_param(self):
tmpfile = self.create_file_with_data(six.b(''))
cmd = Q_EXECUTABLE + ' -d , "select c1 from %s" -H' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertNotEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
m = six.b("Header line is expected but missing in file %s" % tmpfile.name)
self.assertTrue(m in e[0])
self.cleanup(tmpfile)
def test_one_row_of_data_without_header_param(self):
tmpfile = self.create_file_with_data(header_row)
cmd = Q_EXECUTABLE + ' -d , "select c2 from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], six.b('value1'))
self.cleanup(tmpfile)
def test_one_row_of_data_with_header_param(self):
tmpfile = self.create_file_with_data(header_row)
cmd = Q_EXECUTABLE + ' -d , "select name from %s" -H' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertTrue(six.b('Warning - data is empty') in e[0])
self.cleanup(tmpfile)
def test_dont_leading_keep_whitespace_in_values(self):
tmpfile = self.create_file_with_data(sample_data_with_spaces_no_header)
cmd = Q_EXECUTABLE + ' -d , "select c1 from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 3)
self.assertEqual(o[0], six.b('a'))
self.assertEqual(o[1], six.b('b'))
self.assertEqual(o[2], six.b('c'))
self.cleanup(tmpfile)
def test_keep_leading_whitespace_in_values(self):
tmpfile = self.create_file_with_data(sample_data_with_spaces_no_header)
cmd = Q_EXECUTABLE + ' -d , "select c1 from %s" -k' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 3)
self.assertEqual(o[0], six.b('a'))
self.assertEqual(o[1], six.b(' b'))
self.assertEqual(o[2], six.b('c'))
self.cleanup(tmpfile)
def test_no_impact_of_keeping_leading_whitespace_on_integers(self):
tmpfile = self.create_file_with_data(sample_data_with_spaces_no_header)
cmd = Q_EXECUTABLE + ' -d , "select c2 from %s" -k -A' % tmpfile.name
retcode, o, e = run_command(cmd)
f = open("/var/tmp/XXX","wb")
f.write(six.b("\n").join(o))
f.write(six.b("STDERR:"))
f.write(six.b("\n").join(e))
f.close()
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 7)
self.assertEqual(o[0], six.b('Table: %s' % tmpfile.name))
self.assertEqual(o[1], six.b(' Sources:'))
self.assertEqual(o[2], six.b(' source_type: file source: %s') % six.b(tmpfile.name))
self.assertEqual(o[3], six.b(' Fields:'))
self.assertEqual(o[4], six.b(' `c1` - text'))
self.assertEqual(o[5], six.b(' `c2` - int'))
self.assertEqual(o[6], six.b(' `c3` - int'))
self.cleanup(tmpfile)
def test_spaces_in_header_row(self):
tmpfile = self.create_file_with_data(
header_row_with_spaces + six.b("\n") + sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , "select name,\\`value 1\\` from %s" -H' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 3)
self.assertEqual(o[0], six.b('a,1'))
self.assertEqual(o[1], six.b('b,2'))
self.assertEqual(o[2], six.b('c,'))
self.cleanup(tmpfile)
def test_no_query_in_command_line(self):
cmd = Q_EXECUTABLE + ' -d , ""'
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 1)
self.assertEqual(len(e), 1)
self.assertEqual(len(o), 0)
self.assertEqual(e[0],six.b('Query cannot be empty (query number 1)'))
def test_empty_query_in_command_line(self):
cmd = Q_EXECUTABLE + ' -d , " "'
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 1)
self.assertEqual(len(e), 1)
self.assertEqual(len(o), 0)
self.assertEqual(e[0],six.b('Query cannot be empty (query number 1)'))
def test_failure_in_query_stops_processing_queries(self):
cmd = Q_EXECUTABLE + ' -d , "select 500" "select 300" "wrong-query" "select 8000"'
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 1)
self.assertEqual(len(e), 1)
self.assertEqual(len(o), 2)
self.assertEqual(o[0],six.b('500'))
self.assertEqual(o[1],six.b('300'))
def test_multiple_queries_in_command_line(self):
cmd = Q_EXECUTABLE + ' -d , "select 500" "select 300+100" "select 300" "select 200"'
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 4)
self.assertEqual(o[0],six.b('500'))
self.assertEqual(o[1],six.b('400'))
self.assertEqual(o[2],six.b('300'))
self.assertEqual(o[3],six.b('200'))
def test_literal_calculation_query(self):
cmd = Q_EXECUTABLE + ' -d , "select 1+40/6"'
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 1)
self.assertEqual(o[0],six.b('7'))
def test_literal_calculation_query_float_result(self):
cmd = Q_EXECUTABLE + ' -d , "select 1+40/6.0"'
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 1)
self.assertEqual(o[0],six.b('7.666666666666667'))
def test_use_query_file(self):
tmp_data_file = self.create_file_with_data(sample_data_with_header)
tmp_query_file = self.create_file_with_data(six.b("select name from %s" % tmp_data_file.name))
cmd = Q_EXECUTABLE + ' -d , -q %s -H' % tmp_query_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 3)
self.assertEqual(o[0], six.b('a'))
self.assertEqual(o[1], six.b('b'))
self.assertEqual(o[2], six.b('c'))
self.cleanup(tmp_data_file)
self.cleanup(tmp_query_file)
def test_use_query_file_with_incorrect_query_encoding(self):
tmp_data_file = self.create_file_with_data(sample_data_with_header)
tmp_query_file = self.create_file_with_data(six.b("select name,'Hr\xc3\xa1\xc4\x8d' from %s" % tmp_data_file.name),encoding=None)
cmd = Q_EXECUTABLE + ' -d , -q %s -H -Q ascii' % tmp_query_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,3)
self.assertEqual(len(o),0)
self.assertEqual(len(e),1)
self.assertTrue(e[0].startswith(six.b('Could not decode query number 1 using the provided query encoding (ascii)')))
self.cleanup(tmp_data_file)
self.cleanup(tmp_query_file)
def test_output_header_with_non_ascii_names(self):
OUTPUT_ENCODING = 'utf-8'
tmp_data_file = self.create_file_with_data(sample_data_with_header)
tmp_query_file = self.create_file_with_data(six.b("select name,'Hr\xc3\xa1\xc4\x8d' Hr\xc3\xa1\xc4\x8d from %s" % tmp_data_file.name),encoding=None)
cmd = Q_EXECUTABLE + ' -d , -q %s -H -Q utf-8 -O -E %s' % (tmp_query_file.name,OUTPUT_ENCODING)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(o),4)
self.assertEqual(len(e),0)
self.assertEqual(o[0].decode(OUTPUT_ENCODING), u'name,Hr\xe1\u010d')
self.assertEqual(o[1].decode(OUTPUT_ENCODING), u'a,Hr\xe1\u010d')
self.assertEqual(o[2].decode(OUTPUT_ENCODING), u'b,Hr\xe1\u010d')
self.assertEqual(o[3].decode(OUTPUT_ENCODING), u'c,Hr\xe1\u010d')
self.cleanup(tmp_data_file)
self.cleanup(tmp_query_file)
def test_use_query_file_with_query_encoding(self):
OUTPUT_ENCODING = 'utf-8'
tmp_data_file = self.create_file_with_data(sample_data_with_header)
tmp_query_file = self.create_file_with_data(six.b("select name,'Hr\xc3\xa1\xc4\x8d' from %s" % tmp_data_file.name),encoding=None)
cmd = Q_EXECUTABLE + ' -d , -q %s -H -Q utf-8 -E %s' % (tmp_query_file.name,OUTPUT_ENCODING)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 3)
self.assertEqual(o[0].decode(OUTPUT_ENCODING), u'a,Hr\xe1\u010d')
self.assertEqual(o[1].decode(OUTPUT_ENCODING), u'b,Hr\xe1\u010d')
self.assertEqual(o[2].decode(OUTPUT_ENCODING), u'c,Hr\xe1\u010d')
self.cleanup(tmp_data_file)
self.cleanup(tmp_query_file)
def test_use_query_file_and_command_line(self):
tmp_data_file = self.create_file_with_data(sample_data_with_header)
tmp_query_file = self.create_file_with_data(six.b("select name from %s" % tmp_data_file.name))
cmd = Q_EXECUTABLE + ' -d , -q %s -H "select * from ppp"' % tmp_query_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 1)
self.assertEqual(len(e), 1)
self.assertEqual(len(o), 0)
self.assertTrue(e[0].startswith(six.b("Can't provide both a query file and a query on the command line")))
self.cleanup(tmp_data_file)
self.cleanup(tmp_query_file)
def test_select_output_encoding(self):
tmp_data_file = self.create_file_with_data(sample_data_with_header)
tmp_query_file = self.create_file_with_data(six.b("select 'Hr\xc3\xa1\xc4\x8d' from %s" % tmp_data_file.name),encoding=None)
for target_encoding in ['utf-8','ibm852']:
cmd = Q_EXECUTABLE + ' -d , -q %s -H -Q utf-8 -E %s' % (tmp_query_file.name,target_encoding)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 3)
self.assertEqual(o[0].decode(target_encoding), u'Hr\xe1\u010d')
self.assertEqual(o[1].decode(target_encoding), u'Hr\xe1\u010d')
self.assertEqual(o[2].decode(target_encoding), u'Hr\xe1\u010d')
self.cleanup(tmp_data_file)
self.cleanup(tmp_query_file)
def test_select_failed_output_encoding(self):
tmp_data_file = self.create_file_with_data(sample_data_with_header)
tmp_query_file = self.create_file_with_data(six.b("select 'Hr\xc3\xa1\xc4\x8d' from %s" % tmp_data_file.name),encoding=None)
cmd = Q_EXECUTABLE + ' -d , -q %s -H -Q utf-8 -E ascii' % tmp_query_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 3)
self.assertEqual(len(e), 1)
self.assertEqual(len(o), 0)
self.assertTrue(e[0].startswith(six.b('Cannot encode data')))
self.cleanup(tmp_data_file)
self.cleanup(tmp_query_file)
def test_use_query_file_with_empty_query(self):
tmp_query_file = self.create_file_with_data(six.b(" "))
cmd = Q_EXECUTABLE + ' -d , -q %s -H' % tmp_query_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 1)
self.assertEqual(len(e), 1)
self.assertEqual(len(o), 0)
self.assertTrue(e[0].startswith(six.b("Query cannot be empty")))
self.cleanup(tmp_query_file)
def test_use_non_existent_query_file(self):
cmd = Q_EXECUTABLE + ' -d , -q non-existent-query-file -H'
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 1)
self.assertEqual(len(e), 1)
self.assertEqual(len(o), 0)
self.assertTrue(e[0].startswith(six.b("Could not read query from file")))
def test_nonexistent_file(self):
cmd = Q_EXECUTABLE + ' "select * from non-existent-file"'
retcode, o, e = run_command(cmd)
self.assertNotEqual(retcode,0)
self.assertEqual(len(o),0)
self.assertEqual(len(e),1)
self.assertEqual(e[0],six.b("No files matching '%s/non-existent-file' have been found" % os.getcwd()))
def test_default_column_max_length_parameter__short_enough(self):
huge_text = six.b("x" * 131000)
file_data = six.b("a,b,c\n1,{},3\n".format(huge_text))
tmpfile = self.create_file_with_data(file_data)
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('1'))
self.cleanup(tmpfile)
def test_default_column_max_length_parameter__too_long(self):
huge_text = six.b("x") * 132000
file_data = six.b("a,b,c\n1,{},3\n".format(huge_text))
tmpfile = self.create_file_with_data(file_data)
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 31)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertTrue(e[0].startswith(six.b("Column length is larger than the maximum")))
self.assertTrue(six.b("Offending file is '{}'".format(tmpfile.name)) in e[0])
self.assertTrue(six.b('Line is 2') in e[0])
self.cleanup(tmpfile)
def test_column_max_length_parameter(self):
file_data = six.b("a,b,c\nvery-long-text,2,3\n")
tmpfile = self.create_file_with_data(file_data)
cmd = Q_EXECUTABLE + ' -H -d , -M 3 "select a from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 31)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertTrue(e[0].startswith(six.b("Column length is larger than the maximum")))
self.assertTrue((six.b("Offending file is '%s'" % tmpfile.name)) in e[0])
self.assertTrue(six.b('Line is 2') in e[0])
cmd2 = Q_EXECUTABLE + ' -H -d , -M 300 -H "select a from %s"' % tmpfile.name
retcode2, o2, e2 = run_command(cmd2)
self.assertEqual(retcode2, 0)
self.assertEqual(len(o2), 1)
self.assertEqual(len(e2), 0)
self.assertEqual(o2[0],six.b('very-long-text'))
self.cleanup(tmpfile)
def test_invalid_column_max_length_parameter(self):
file_data = six.b("a,b,c\nvery-long-text,2,3\n")
tmpfile = self.create_file_with_data(file_data)
cmd = Q_EXECUTABLE + ' -H -d , -M xx "select a from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 31)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertEqual(e[0],six.b('Max column length limit must be an integer larger than 2 (xx)'))
self.cleanup(tmpfile)
def test_duplicate_column_name_detection(self):
file_data = six.b("a,b,a\n10,20,30\n30,40,50")
tmpfile = self.create_file_with_data(file_data)
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 35)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 2)
self.assertTrue(e[0].startswith(six.b('Bad header row:')))
self.assertEqual(e[1],six.b("'a': Column name is duplicated"))
self.cleanup(tmpfile)
def test_join_with_stdin(self):
x = [six.b(a) for a in map(str,range(1,101))]
large_file_data = six.b("val\n") + six.b("\n").join(x)
tmpfile = self.create_file_with_data(large_file_data)
cmd = '(echo id ; seq 1 2 10) | %s -c 1 -H -O "select stdin.*,f.* from - stdin left join %s f on (stdin.id * 10 = f.val)"' % (Q_EXECUTABLE,tmpfile.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 6)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('id val'))
self.assertEqual(o[1],six.b('1 10'))
self.assertEqual(o[2],six.b('3 30'))
self.assertEqual(o[3],six.b('5 50'))
self.assertEqual(o[4],six.b('7 70'))
self.assertEqual(o[5],six.b('9 90'))
self.cleanup(tmpfile)
def test_concatenated_files(self):
file_data1 = six.b("a,b,c\n10,11,12\n20,21,22")
tmpfile1 = self.create_file_with_data(file_data1)
tmpfile1_folder = os.path.dirname(tmpfile1.name)
tmpfile1_filename = os.path.basename(tmpfile1.name)
expected_cache_filename1 = os.path.join(tmpfile1_folder,tmpfile1_filename + '.qsql')
file_data2 = six.b("a,b,c\n30,31,32\n40,41,42")
tmpfile2 = self.create_file_with_data(file_data2)
tmpfile2_folder = os.path.dirname(tmpfile2.name)
tmpfile2_filename = os.path.basename(tmpfile2.name)
expected_cache_filename2 = os.path.join(tmpfile2_folder,tmpfile2_filename + '.qsql')
cmd = Q_EXECUTABLE + ' -O -H -d , "select * from %s UNION ALL select * from %s" -C none' % (tmpfile1.name,tmpfile2.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 5)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('a,b,c'))
self.assertEqual(o[1],six.b('10,11,12'))
self.assertEqual(o[2],six.b('20,21,22'))
self.assertEqual(o[3],six.b('30,31,32'))
self.assertEqual(o[4],six.b('40,41,42'))
self.cleanup(tmpfile1)
self.cleanup(tmpfile2)
def test_out_of_range_expected_column_count(self):
cmd = '%s "select count(*) from some_table" -c -1' % Q_EXECUTABLE
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 90)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertEqual(e[0], six.b('Column count must be between 1 and 131072'))
def test_out_of_range_expected_column_count__with_explicit_limit(self):
cmd = '%s "select count(*) from some_table" -c -1 -M 100' % Q_EXECUTABLE
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 90)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertEqual(e[0], six.b('Column count must be between 1 and 100'))
def test_other_out_of_range_expected_column_count__with_explicit_limit(self):
cmd = '%s "select count(*) from some_table" -c 101 -M 100' % Q_EXECUTABLE
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 90)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertEqual(e[0], six.b('Column count must be between 1 and 100'))
def test_explicit_limit_of_columns__data_is_ok(self):
file_data1 = six.b("191\n192\n")
tmpfile1 = self.create_file_with_data(file_data1)
cmd = '%s "select count(*) from %s" -c 1 -M 3' % (Q_EXECUTABLE,tmpfile1.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], six.b('2'))
self.cleanup(tmpfile1)
class ManyOpenFilesTests(AbstractQTestCase):
def test_multi_file_header_skipping(self):
BATCH_SIZE = 50
FILE_COUNT = 5
numbers = list(range(1,1+BATCH_SIZE*FILE_COUNT))
numbers_as_text = batch([str(x) for x in numbers],n=BATCH_SIZE)
content_list = list(map(six.b,['a\n' + "\n".join(x)+'\n' for x in numbers_as_text]))
filename_list = list(map(lambda x: 'file-%s' % x,range(FILE_COUNT)))
d = collections.OrderedDict(zip(filename_list, content_list))
tmpfolder = self.create_folder_with_files(d,'split-files','multi-header')
cmd = '%s -d , -H -c 1 "select count(a),sum(a) from %s/*" -C none' % (Q_EXECUTABLE,tmpfolder)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b("%s,%s" % (BATCH_SIZE*FILE_COUNT,sum(numbers))))
self.cleanup_folder(tmpfolder)
def test_that_globs_dont_max_out_sqlite_attached_database_limits(self):
BATCH_SIZE = 50
FILE_COUNT = 40
numbers_as_text = batch([str(x) for x in range(1,1+BATCH_SIZE*FILE_COUNT)],n=BATCH_SIZE)
content_list = map(six.b,["\n".join(x)+'\n' for x in numbers_as_text])
filename_list = list(map(lambda x: 'file-%s' % x,range(FILE_COUNT)))
d = collections.OrderedDict(zip(filename_list, content_list))
tmpfolder = self.create_folder_with_files(d,'split-files','attach-limit')
#expected_cache_filename = os.path.join(tmpfile_folder,tmpfile_filename + '.qsql')
cmd = 'cd %s && %s -c 1 "select count(*) from *" -C none --max-attached-sqlite-databases=10' % (tmpfolder,Q_EXECUTABLE)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b(str(BATCH_SIZE*FILE_COUNT)))
self.cleanup_folder(tmpfolder)
def test_maxing_out_max_attached_database_limits__regular_files(self):
BATCH_SIZE = 50
FILE_COUNT = 40
numbers_as_text = batch([str(x) for x in range(1,1+BATCH_SIZE*FILE_COUNT)],n=BATCH_SIZE)
content_list = map(six.b,["\n".join(x)+'\n' for x in numbers_as_text])
filename_list = list(map(lambda x: 'file-%s' % x,range(FILE_COUNT)))
d = collections.OrderedDict(zip(filename_list, content_list))
tmpfolder = self.create_folder_with_files(d,'split-files','attach-limit')
#expected_cache_filename = os.path.join(tmpfile_folder,tmpfile_filename + '.qsql')
unioned_subquery = " UNION ALL ".join(["select * from %s/%s" % (tmpfolder,filename) for filename in filename_list])
cmd = 'cd %s && %s -c 1 "select count(*) from (%s)" -C none --max-attached-sqlite-databases=10' % (tmpfolder,Q_EXECUTABLE,unioned_subquery)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b(str(BATCH_SIZE*FILE_COUNT)))
self.cleanup_folder(tmpfolder)
def test_maxing_out_max_attached_database_limits__with_qsql_files_below_attached_limit(self):
MAX_ATTACHED_SQLITE_DATABASES = 10
BATCH_SIZE = 50
FILE_COUNT = MAX_ATTACHED_SQLITE_DATABASES - 1
numbers_as_text = batch([str(x) for x in range(1,1+BATCH_SIZE*FILE_COUNT)],n=BATCH_SIZE)
content_list = map(six.b,["\n".join(x)+'\n' for x in numbers_as_text])
filename_list = list(map(lambda x: 'file-%s' % x,range(FILE_COUNT)))
d = collections.OrderedDict(zip(filename_list, content_list))
tmpfolder = self.create_folder_with_files(d,'split-files','attach-limit')
#expected_cache_filename = os.path.join(tmpfile_folder,tmpfile_filename + '.qsql')
# Execute the query with -C readwrite, so all qsql files will be created
unioned_subquery = " UNION ALL ".join(["select * from %s/%s" % (tmpfolder,filename) for filename in filename_list])
cmd = 'cd %s && %s -c 1 "select count(*) from (%s)" -C readwrite --max-attached-sqlite-databases=%s' % (tmpfolder,Q_EXECUTABLE,unioned_subquery,MAX_ATTACHED_SQLITE_DATABASES)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b(str(BATCH_SIZE*FILE_COUNT)))
# Now execute the same query with -C readwrite, so all files will be read directly from the qsql files
cmd = 'cd %s && %s -c 1 "select count(*) from (%s)" -C readwrite' % (tmpfolder,Q_EXECUTABLE,unioned_subquery)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b(str(BATCH_SIZE*FILE_COUNT)))
self.cleanup_folder(tmpfolder)
def test_maxing_out_max_attached_database_limits__with_qsql_files_above_attached_limit(self):
MAX_ATTACHED_SQLITE_DATABASES = 10
BATCH_SIZE = 50
# Here's the difference from test_maxing_out_max_attached_database_limits__with_qsql_files_below_attached_limit
# We're trying to cache 2 times the number of files than the number of databases that can be attached.
# Expectation is that only a part of the files will be cached
FILE_COUNT = MAX_ATTACHED_SQLITE_DATABASES * 2
numbers_as_text = batch([str(x) for x in range(1,1+BATCH_SIZE*FILE_COUNT)],n=BATCH_SIZE)
content_list = map(six.b,["\n".join(x)+'\n' for x in numbers_as_text])
filename_list = list(map(lambda x: 'file-%s' % x,range(FILE_COUNT)))
d = collections.OrderedDict(zip(filename_list, content_list))
tmpfolder = self.create_folder_with_files(d,'split-files','attach-limit')
#expected_cache_filename = os.path.join(tmpfile_folder,tmpfile_filename + '.qsql')
# Execute the query with -C readwrite, so all qsql files will be created
unioned_subquery = " UNION ALL ".join(["select * from %s/%s" % (tmpfolder,filename) for filename in filename_list])
cmd = 'cd %s && %s -c 1 "select count(*) from (%s)" -C readwrite --max-attached-sqlite-databases=%s' % (tmpfolder,Q_EXECUTABLE,unioned_subquery,MAX_ATTACHED_SQLITE_DATABASES)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b(str(BATCH_SIZE*FILE_COUNT)))
# Now execute the same query with -C readwrite, so all files will be read directly from the qsql files
cmd = 'cd %s && %s -c 1 "select count(*) from (%s)" -C readwrite' % (tmpfolder,Q_EXECUTABLE,unioned_subquery)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b(str(BATCH_SIZE*FILE_COUNT)))
from glob import glob
files_in_folder = [os.path.basename(x) for x in glob('%s/*' % (tmpfolder))]
expected_files_in_folder = filename_list + list(map(lambda x: 'file-%s.qsql' % x,range(MAX_ATTACHED_SQLITE_DATABASES-2)))
self.assertEqual(sorted(files_in_folder),sorted(expected_files_in_folder))
self.cleanup_folder(tmpfolder)
def test_maxing_out_max_attached_database_limits__with_directly_using_qsql_files(self):
MAX_ATTACHED_SQLITE_DATABASES = 10
BATCH_SIZE = 50
FILE_COUNT = MAX_ATTACHED_SQLITE_DATABASES * 2
numbers_as_text = batch([str(x) for x in range(1,1+BATCH_SIZE*FILE_COUNT)],n=BATCH_SIZE)
content_list = map(six.b,["\n".join(x)+'\n' for x in numbers_as_text])
filename_list = list(map(lambda x: 'file-%s' % x,range(FILE_COUNT)))
d = collections.OrderedDict(zip(filename_list, content_list))
tmpfolder = self.create_folder_with_files(d,'split-files','attach-limit')
#expected_cache_filename = os.path.join(tmpfile_folder,tmpfile_filename + '.qsql')
# Prepare qsql for each of the files (separately, just for simplicity)
for fn in filename_list:
cmd = 'cd %s && %s -c 1 "select count(*) from %s" -C readwrite' % (tmpfolder,Q_EXECUTABLE,fn)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
# Now execute a big query which uses the created qsql files
unioned_subquery = " UNION ALL ".join(["select * from %s/%s.qsql" % (tmpfolder,filename) for filename in filename_list])
cmd = 'cd %s && %s -c 1 "select count(*) from (%s)" -C readwrite' % (tmpfolder,Q_EXECUTABLE,unioned_subquery)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b(str(BATCH_SIZE*FILE_COUNT)))
self.cleanup_folder(tmpfolder)
def test_too_many_open_files_for_one_table(self):
# Previously file opening was parallel, causing too-many-open-files
MAX_ALLOWED_FILES = 500
BATCH_SIZE = 2
FILE_COUNT = MAX_ALLOWED_FILES + 1
numbers_as_text = batch([str(x) for x in range(1,1+BATCH_SIZE*FILE_COUNT)],n=BATCH_SIZE)
content_list = map(six.b,["\n".join(x) for x in numbers_as_text])
filename_list = list(map(lambda x: 'file-%s' % x,range(FILE_COUNT)))
d = collections.OrderedDict(zip(filename_list, content_list))
tmpfolder = self.create_folder_with_files(d,'split-files','attach-limit')
cmd = 'cd %s && %s -c 1 "select count(*) from * where 1 = 1 or c1 != 2" -C none' % (tmpfolder,Q_EXECUTABLE)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 82)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
x = six.b('Maximum source files for table must be %s. Table is name is %s/* Number of actual files is %s' % (MAX_ALLOWED_FILES,os.path.realpath(tmpfolder),FILE_COUNT))
print(x)
self.assertEqual(e[0],x)
self.cleanup_folder(tmpfolder)
def test_many_open_files_for_one_table(self):
# Previously file opening was parallel, causing too-many-open-files
BATCH_SIZE = 2
FILE_COUNT = 500
numbers_as_text = batch([str(x) for x in range(1,1+BATCH_SIZE*FILE_COUNT)],n=BATCH_SIZE)
content_list = map(six.b,["\n".join(x) for x in numbers_as_text])
filename_list = list(map(lambda x: 'file-%s' % x,range(FILE_COUNT)))
d = collections.OrderedDict(zip(filename_list, content_list))
tmpfolder = self.create_folder_with_files(d,'split-files','attach-limit')
#expected_cache_filename = os.path.join(tmpfile_folder,tmpfile_filename + '.qsql')
cmd = 'cd %s && %s -c 1 "select count(*) from * where 1 = 1 or c1 != 2" -C none' % (tmpfolder,Q_EXECUTABLE)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b(str(BATCH_SIZE*FILE_COUNT)))
self.cleanup_folder(tmpfolder)
def test_many_open_files_for_two_tables(self):
BATCH_SIZE = 2
FILE_COUNT = 500
numbers_as_text = batch([str(x) for x in range(1, 1 + BATCH_SIZE * FILE_COUNT)], n=BATCH_SIZE)
content_list = map(six.b, ["\n".join(x) for x in numbers_as_text])
filename_list = list(map(lambda x: 'file-%s' % x, range(FILE_COUNT)))
d = collections.OrderedDict(zip(filename_list, content_list))
tmpfolder1 = self.create_folder_with_files(d, 'split-files1', 'blah')
tmpfolder2 = self.create_folder_with_files(d, 'split-files1', 'blah')
cmd = '%s -c 1 "select count(*) from %s/* a left join %s/* b on (a.c1 = b.c1)" -C none' % (
Q_EXECUTABLE,
tmpfolder1,
tmpfolder2)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], six.b(str(BATCH_SIZE * FILE_COUNT)))
self.cleanup_folder(tmpfolder1)
self.cleanup_folder(tmpfolder2)
class GzippingTests(AbstractQTestCase):
def test_gzipped_file(self):
tmpfile = self.create_file_with_data(
six.b('\x1f\x8b\x08\x08\xf2\x18\x12S\x00\x03xxxxxx\x003\xe42\xe22\xe62\xe12\xe52\xe32\xe7\xb2\xe0\xb2\xe424\xe0\x02\x00\xeb\xbf\x8a\x13\x15\x00\x00\x00'))
cmd = Q_EXECUTABLE + ' -z "select sum(c1),avg(c1) from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertTrue(retcode == 0)
self.assertTrue(len(o) == 1)
self.assertTrue(len(e) == 0)
s = sum(range(1, 11))
self.assertTrue(o[0] == six.b('%s %s' % (s, s / 10.0)))
self.cleanup(tmpfile)
class DelimiterTests(AbstractQTestCase):
def test_delimition_mistake_with_header(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d " " "select * from %s" -H' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertNotEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 2)
self.assertTrue(e[0].startswith(six.b("Bad header row")))
self.assertTrue(six.b("Column name cannot contain commas") in e[1])
self.cleanup(tmpfile)
def test_tab_delimition_parameter(self):
tmpfile = self.create_file_with_data(
sample_data_no_header.replace(six.b(","), six.b("\t")))
cmd = Q_EXECUTABLE + ' -t "select c1,c2,c3 from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], sample_data_rows[0].replace(six.b(","), six.b("\t")))
self.assertEqual(o[1], sample_data_rows[1].replace(six.b(","), six.b("\t")))
self.assertEqual(o[2], sample_data_rows[2].replace(six.b(","), six.b("\t")))
self.cleanup(tmpfile)
def test_pipe_delimition_parameter(self):
tmpfile = self.create_file_with_data(
sample_data_no_header.replace(six.b(","), six.b("|")))
cmd = Q_EXECUTABLE + ' -p "select c1,c2,c3 from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], sample_data_rows[0].replace(six.b(","), six.b("|")))
self.assertEqual(o[1], sample_data_rows[1].replace(six.b(","), six.b("|")))
self.assertEqual(o[2], sample_data_rows[2].replace(six.b(","), six.b("|")))
self.cleanup(tmpfile)
def test_tab_delimition_parameter__with_manual_override_attempt(self):
tmpfile = self.create_file_with_data(
sample_data_no_header.replace(six.b(","), six.b("\t")))
cmd = Q_EXECUTABLE + ' -t -d , "select c1,c2,c3 from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 1)
self.assertEqual(o[0], sample_data_rows[0].replace(six.b(","), six.b("\t")))
self.assertEqual(o[1], sample_data_rows[1].replace(six.b(","), six.b("\t")))
self.assertEqual(o[2], sample_data_rows[2].replace(six.b(","), six.b("\t")))
self.assertEqual(e[0],six.b('Warning: -t parameter overrides -d parameter (,)'))
self.cleanup(tmpfile)
def test_pipe_delimition_parameter__with_manual_override_attempt(self):
tmpfile = self.create_file_with_data(
sample_data_no_header.replace(six.b(","), six.b("|")))
cmd = Q_EXECUTABLE + ' -p -d , "select c1,c2,c3 from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 1)
self.assertEqual(o[0], sample_data_rows[0].replace(six.b(","), six.b("|")))
self.assertEqual(o[1], sample_data_rows[1].replace(six.b(","), six.b("|")))
self.assertEqual(o[2], sample_data_rows[2].replace(six.b(","), six.b("|")))
self.assertEqual(e[0],six.b('Warning: -p parameter overrides -d parameter (,)'))
self.cleanup(tmpfile)
def test_output_delimiter(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , -D "|" "select c1,c2,c3 from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], sample_data_rows[0].replace(six.b(","), six.b("|")))
self.assertEqual(o[1], sample_data_rows[1].replace(six.b(","), six.b("|")))
self.assertEqual(o[2], sample_data_rows[2].replace(six.b(","), six.b("|")))
self.cleanup(tmpfile)
def test_output_delimiter_tab_parameter(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , -T "select c1,c2,c3 from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], sample_data_rows[0].replace(six.b(","), six.b("\t")))
self.assertEqual(o[1], sample_data_rows[1].replace(six.b(","), six.b("\t")))
self.assertEqual(o[2], sample_data_rows[2].replace(six.b(","), six.b("\t")))
self.cleanup(tmpfile)
def test_output_delimiter_pipe_parameter(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , -P "select c1,c2,c3 from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], sample_data_rows[0].replace(six.b(","), six.b("|")))
self.assertEqual(o[1], sample_data_rows[1].replace(six.b(","), six.b("|")))
self.assertEqual(o[2], sample_data_rows[2].replace(six.b(","), six.b("|")))
self.cleanup(tmpfile)
def test_output_delimiter_tab_parameter__with_manual_override_attempt(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , -T -D "|" "select c1,c2,c3 from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 1)
self.assertEqual(o[0], sample_data_rows[0].replace(six.b(","), six.b("\t")))
self.assertEqual(o[1], sample_data_rows[1].replace(six.b(","), six.b("\t")))
self.assertEqual(o[2], sample_data_rows[2].replace(six.b(","), six.b("\t")))
self.assertEqual(e[0], six.b('Warning: -T parameter overrides -D parameter (|)'))
self.cleanup(tmpfile)
def test_output_delimiter_pipe_parameter__with_manual_override_attempt(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , -P -D ":" "select c1,c2,c3 from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 1)
self.assertEqual(o[0], sample_data_rows[0].replace(six.b(","), six.b("|")))
self.assertEqual(o[1], sample_data_rows[1].replace(six.b(","), six.b("|")))
self.assertEqual(o[2], sample_data_rows[2].replace(six.b(","), six.b("|")))
self.assertEqual(e[0],six.b('Warning: -P parameter overrides -D parameter (:)'))
self.cleanup(tmpfile)
class AnalysisTests(AbstractQTestCase):
def test_analyze_result(self):
d = "\n".join(['%s\t%s\t%s' % (x+1,x+1,x+1) for x in range(100)])
tmpfile = self.create_file_with_data(six.b(d))
cmd = Q_EXECUTABLE + ' -c 1 "select count(*) from %s" -A' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 5)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], six.b('Table: %s' % tmpfile.name))
self.assertEqual(o[1], six.b(' Sources:'))
self.assertEqual(o[2], six.b(' source_type: file source: %s' %(tmpfile.name)))
self.assertEqual(o[3], six.b(' Fields:'))
self.assertEqual(o[4], six.b(' `c1` - text'))
self.cleanup(tmpfile)
def test_analyze_result_with_data_stream(self):
d = "\n".join(['%s\t%s\t%s' % (x+1,x+1,x+1) for x in range(100)])
tmpfile = self.create_file_with_data(six.b(d))
cmd = 'cat %s | %s -c 1 "select count(*) from -" -A' % (tmpfile.name,Q_EXECUTABLE)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 5)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], six.b('Table: -'))
self.assertEqual(o[1], six.b(' Sources:'))
self.assertEqual(o[2], six.b(' source_type: data-stream source: stdin'))
self.assertEqual(o[3], six.b(' Fields:'))
self.assertEqual(o[4], six.b(' `c1` - text'))
self.cleanup(tmpfile)
def test_column_analysis(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , "select c1 from %s" -A' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(o[0], six.b('Table: %s' % tmpfile.name))
self.assertEqual(o[1],six.b(' Sources:'))
self.assertEqual(o[2],six.b(' source_type: file source: %s' % tmpfile.name))
self.assertEqual(o[3],six.b(' Fields:'))
self.assertEqual(o[4], six.b(' `c1` - text'))
self.assertEqual(o[5], six.b(' `c2` - int'))
self.assertEqual(o[6], six.b(' `c3` - int'))
self.cleanup(tmpfile)
def test_column_analysis_with_mixed_ints_and_floats(self):
tmpfile = self.create_file_with_data(six.b("""planet_id,name,diameter_km,length_of_day_hours\n1000,Earth,12756,24\n2000,Mars,6792,24.7\n3000,Jupiter,142984,9.9"""))
cmd = Q_EXECUTABLE + ' -d , -H "select * from %s" -A' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),8)
self.assertEqual(len(e),0)
self.assertEqual(o[0], six.b('Table: %s' % tmpfile.name))
self.assertEqual(o[1],six.b(' Sources:'))
self.assertEqual(o[2],six.b(' source_type: file source: %s' % tmpfile.name))
self.assertEqual(o[3],six.b(' Fields:'))
self.assertEqual(o[4], six.b(' `planet_id` - int'))
self.assertEqual(o[5], six.b(' `name` - text'))
self.assertEqual(o[6], six.b(' `diameter_km` - int'))
self.assertEqual(o[7], six.b(' `length_of_day_hours` - real'))
self.cleanup(tmpfile)
def test_column_analysis_with_mixed_ints_and_floats_and_nulls(self):
tmpfile = self.create_file_with_data(six.b("""planet_id,name,diameter_km,length_of_day_hours\n1000,Earth,12756,24\n2000,Mars,6792,24.7\n2500,Venus,,\n3000,Jupiter,142984,9.9"""))
cmd = Q_EXECUTABLE + ' -d , -H "select * from %s" -A' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),8)
self.assertEqual(len(e),0)
self.assertEqual(o[0], six.b('Table: %s' % tmpfile.name))
self.assertEqual(o[1],six.b(' Sources:'))
self.assertEqual(o[2],six.b(' source_type: file source: %s' % tmpfile.name))
self.assertEqual(o[3],six.b(' Fields:'))
self.assertEqual(o[4], six.b(' `planet_id` - int'))
self.assertEqual(o[5], six.b(' `name` - text'))
self.assertEqual(o[6], six.b(' `diameter_km` - int'))
self.assertEqual(o[7], six.b(' `length_of_day_hours` - real'))
self.cleanup(tmpfile)
def test_column_analysis_no_header(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , "select c1 from %s" -A' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(o[0], six.b('Table: %s' % tmpfile.name))
self.assertEqual(o[1],six.b(' Sources:'))
self.assertEqual(o[2],six.b(' source_type: file source: %s' % tmpfile.name))
self.assertEqual(o[3],six.b(' Fields:'))
self.assertEqual(o[4], six.b(' `c1` - text'))
self.assertEqual(o[5], six.b(' `c2` - int'))
self.assertEqual(o[6], six.b(' `c3` - int'))
def test_column_analysis_with_unexpected_header(self):
tmpfile = self.create_file_with_data(sample_data_with_header)
cmd = Q_EXECUTABLE + ' -d , "select c1 from %s" -A' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 7)
self.assertEqual(len(e), 1)
self.assertEqual(o[0], six.b('Table: %s' % tmpfile.name))
self.assertEqual(o[1],six.b(' Sources:'))
self.assertEqual(o[2],six.b(' source_type: file source: %s' % tmpfile.name))
self.assertEqual(o[3],six.b(' Fields:'))
self.assertEqual(o[4],six.b(' `c1` - text'))
self.assertEqual(o[5],six.b(' `c2` - text'))
self.assertEqual(o[6],six.b(' `c3` - text'))
self.assertEqual(
e[0], six.b('Warning - There seems to be header line in the file, but -H has not been specified. All fields will be detected as text fields, and the header line will appear as part of the data'))
self.cleanup(tmpfile)
def test_column_analysis_for_spaces_in_header_row(self):
tmpfile = self.create_file_with_data(
header_row_with_spaces + six.b("\n") + sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , "select name,\\`value 1\\` from %s" -H -A' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 7)
self.assertEqual(o[0], six.b('Table: %s' % tmpfile.name))
self.assertEqual(o[1],six.b(' Sources:'))
self.assertEqual(o[2],six.b(' source_type: file source: %s' % tmpfile.name))
self.assertEqual(o[3],six.b(' Fields:'))
self.assertEqual(o[4], six.b(' `name` - text'))
self.assertEqual(o[5], six.b(' `value 1` - int'))
self.assertEqual(o[6], six.b(' `value2` - int'))
self.cleanup(tmpfile)
def test_column_analysis_with_header(self):
tmpfile = self.create_file_with_data(sample_data_with_header)
cmd = Q_EXECUTABLE + ' -d , "select c1 from %s" -A -H' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertNotEqual(retcode, 0)
self.assertEqual(len(o),7)
self.assertEqual(len(e),2)
self.assertEqual(o[0], six.b('Table: %s' % tmpfile.name))
self.assertEqual(o[1],six.b(' Sources:'))
self.assertEqual(o[2],six.b(' source_type: file source: %s' % tmpfile.name))
self.assertEqual(o[3],six.b(' Fields:'))
self.assertEqual(o[4], six.b(' `name` - text'))
self.assertEqual(o[5], six.b(' `value1` - int'))
self.assertEqual(o[6], six.b(' `value2` - int'))
self.assertEqual(e[0],six.b('query error: no such column: c1'))
self.assertTrue(e[1].startswith(six.b('Warning - There seems to be a ')))
self.cleanup(tmpfile)
class StdInTests(AbstractQTestCase):
def test_stdin_input(self):
cmd = six.b('printf "%s" | ' + Q_EXECUTABLE + ' -d , "select c1,c2,c3 from -"') % sample_data_no_header
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], sample_data_rows[0])
self.assertEqual(o[1], sample_data_rows[1])
self.assertEqual(o[2], sample_data_rows[2])
def test_attempt_to_unzip_stdin(self):
tmpfile = self.create_file_with_data(
six.b('\x1f\x8b\x08\x08\xf2\x18\x12S\x00\x03xxxxxx\x003\xe42\xe22\xe62\xe12\xe52\xe32\xe7\xb2\xe0\xb2\xe424\xe0\x02\x00\xeb\xbf\x8a\x13\x15\x00\x00\x00'))
cmd = 'cat %s | ' % tmpfile.name + Q_EXECUTABLE + ' -z "select sum(c1),avg(c1) from -"'
retcode, o, e = run_command(cmd)
self.assertTrue(retcode != 0)
self.assertTrue(len(o) == 0)
self.assertTrue(len(e) == 1)
self.assertEqual(e[0],six.b('Cannot decompress standard input. Pipe the input through zcat in order to decompress.'))
self.cleanup(tmpfile)
class QuotingTests(AbstractQTestCase):
def test_non_quoted_values_in_quoted_data(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data)
cmd = Q_EXECUTABLE + ' -d " " "select c1 from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),4)
self.assertTrue(o[0],'non_quoted')
self.assertTrue(o[1],'control-value-1')
self.assertTrue(o[2],'non-quoted-value')
self.assertTrue(o[3],'control-value-1')
self.cleanup(tmp_data_file)
def test_regular_quoted_values_in_quoted_data(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data)
cmd = Q_EXECUTABLE + ' -d " " "select c2 from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),4)
self.assertTrue(o[0],'regular_double_quoted')
self.assertTrue(o[1],'control-value-2')
self.assertTrue(o[2],'this is a quoted value')
self.assertTrue(o[3],'control-value-2')
self.cleanup(tmp_data_file)
def test_double_double_quoted_values_in_quoted_data(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data)
cmd = Q_EXECUTABLE + ' -d " " "select c3 from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),4)
self.assertTrue(o[0],'double_double_quoted')
self.assertTrue(o[1],'control-value-3')
self.assertTrue(o[2],'this is a "double double" quoted value')
self.assertTrue(o[3],'control-value-3')
self.cleanup(tmp_data_file)
def test_escaped_double_quoted_values_in_quoted_data(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data)
cmd = Q_EXECUTABLE + ' -d " " "select c4 from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),4)
self.assertTrue(o[0],'escaped_double_quoted')
self.assertTrue(o[1],'control-value-4')
self.assertTrue(o[2],'this is an escaped "quoted value"')
self.assertTrue(o[3],'control-value-4')
self.cleanup(tmp_data_file)
def test_none_input_quoting_mode_in_relaxed_mode(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data2)
cmd = Q_EXECUTABLE + ' -d " " -m relaxed -D , -w none -W none "select * from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b('"quoted,data",23'))
self.assertEqual(o[1],six.b('unquoted-data,54,'))
self.cleanup(tmp_data_file)
def test_none_input_quoting_mode_in_strict_mode(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data2)
cmd = Q_EXECUTABLE + ' -d " " -m strict -D , -w none "select * from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertNotEqual(retcode,0)
self.assertEqual(len(e),1)
self.assertEqual(len(o),0)
self.assertTrue(e[0].startswith(six.b('Strict mode. Column Count is expected to identical')))
self.cleanup(tmp_data_file)
def test_minimal_input_quoting_mode(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data2)
cmd = Q_EXECUTABLE + ' -d " " -D , -w minimal "select * from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b('quoted data,23'))
self.assertEqual(o[1],six.b('unquoted-data,54'))
self.cleanup(tmp_data_file)
def test_all_input_quoting_mode(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data2)
cmd = Q_EXECUTABLE + ' -d " " -D , -w all "select * from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b('quoted data,23'))
self.assertEqual(o[1],six.b('unquoted-data,54'))
self.cleanup(tmp_data_file)
def test_incorrect_input_quoting_mode(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data2)
cmd = Q_EXECUTABLE + ' -d " " -D , -w unknown_wrapping_mode "select * from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertNotEqual(retcode,0)
self.assertEqual(len(e),1)
self.assertEqual(len(o),0)
self.assertTrue(e[0].startswith(six.b('Input quoting mode can only be one of all,minimal,none')))
self.assertTrue(six.b('unknown_wrapping_mode') in e[0])
self.cleanup(tmp_data_file)
def test_none_output_quoting_mode(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data2)
cmd = Q_EXECUTABLE + ' -d " " -D , -w all -W none "select * from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b('quoted data,23'))
self.assertEqual(o[1],six.b('unquoted-data,54'))
self.cleanup(tmp_data_file)
def test_minimal_output_quoting_mode__without_need_to_quote_in_output(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data2)
cmd = Q_EXECUTABLE + ' -d " " -D , -w all -W minimal "select * from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b('quoted data,23'))
self.assertEqual(o[1],six.b('unquoted-data,54'))
self.cleanup(tmp_data_file)
def test_minimal_output_quoting_mode__with_need_to_quote_in_output_due_to_delimiter(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data2)
# output delimiter is set to space, so the output will contain it
cmd = Q_EXECUTABLE + ' -d " " -D " " -w all -W minimal "select * from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b('"quoted data" 23'))
self.assertEqual(o[1],six.b('unquoted-data 54'))
self.cleanup(tmp_data_file)
def test_minimal_output_quoting_mode__with_need_to_quote_in_output_due_to_newline(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data2_with_newline)
# Delimiter is set to colon (:), so it will not be inside the data values (this will make sure that the newline is the one causing the quoting)
cmd = Q_EXECUTABLE + " -d ':' -w all -W minimal \"select c1,c2,replace(c1,'with' || x'0a' || 'a new line inside it','NEWLINE-REMOVED') from %s\"" % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),3)
self.assertEqual(o[0],six.b('"quoted data with'))
# Notice that the third column here is not quoted, because we replaced the newline with something else
self.assertEqual(o[1],six.b('a new line inside it":23:quoted data NEWLINE-REMOVED'))
self.assertEqual(o[2],six.b('unquoted-data:54:unquoted-data'))
self.cleanup(tmp_data_file)
def test_nonnumeric_output_quoting_mode(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data2)
cmd = Q_EXECUTABLE + ' -d " " -D , -w all -W nonnumeric "select * from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b('"quoted data",23'))
self.assertEqual(o[1],six.b('"unquoted-data",54'))
self.cleanup(tmp_data_file)
def test_all_output_quoting_mode(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data2)
cmd = Q_EXECUTABLE + ' -d " " -D , -w all -W all "select * from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b('"quoted data","23"'))
self.assertEqual(o[1],six.b('"unquoted-data","54"'))
self.cleanup(tmp_data_file)
def _internal_test_consistency_of_chaining_output_to_input(self,input_data,input_wrapping_mode,output_wrapping_mode):
tmp_data_file = self.create_file_with_data(input_data)
basic_cmd = Q_EXECUTABLE + ' -w %s -W %s "select * from -"' % (input_wrapping_mode,output_wrapping_mode)
chained_cmd = 'cat %s | %s | %s | %s' % (tmp_data_file.name,basic_cmd,basic_cmd,basic_cmd)
retcode, o, e = run_command(chained_cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),2)
self.assertEqual(six.b("\n").join(o),input_data)
self.cleanup(tmp_data_file)
def test_consistency_of_chaining_minimal_wrapping_to_minimal_wrapping(self):
input_data = six.b('"quoted data" 23\nunquoted-data 54')
self._internal_test_consistency_of_chaining_output_to_input(input_data,'minimal','minimal')
def test_consistency_of_chaining_all_wrapping_to_all_wrapping(self):
input_data = six.b('"quoted data" "23"\n"unquoted-data" "54"')
self._internal_test_consistency_of_chaining_output_to_input(input_data,'all','all')
def test_input_field_quoting_and_data_types_with_encoding(self):
OUTPUT_ENCODING = 'utf-8'
# Checks combination of minimal input field quoting, with special characters that need to be decoded -
# Both content and proper data types are verified
data = six.b('111,22.22,"testing text with special characters - citt\xc3\xa0 ",http://somekindofurl.com,12.13.14.15,12.1\n')
tmp_data_file = self.create_file_with_data(data)
cmd = Q_EXECUTABLE + ' -d , "select * from %s" -E %s' % (tmp_data_file.name,OUTPUT_ENCODING)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),1)
self.assertEqual(o[0].decode('utf-8'),u'111,22.22,testing text with special characters - citt\xe0 ,http://somekindofurl.com,12.13.14.15,12.1')
cmd = Q_EXECUTABLE + ' -d , "select * from %s" -A' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),10)
self.assertEqual(o[0],six.b('Table: %s' % tmp_data_file.name))
self.assertEqual(o[1],six.b(' Sources:'))
self.assertEqual(o[2],six.b(' source_type: file source: %s' % tmp_data_file.name))
self.assertEqual(o[3],six.b(' Fields:'))
self.assertEqual(o[4],six.b(' `c1` - int'))
self.assertEqual(o[5],six.b(' `c2` - real'))
self.assertEqual(o[6],six.b(' `c3` - text'))
self.assertEqual(o[7],six.b(' `c4` - text'))
self.assertEqual(o[8],six.b(' `c5` - text'))
self.assertEqual(o[9],six.b(' `c6` - real'))
self.cleanup(tmp_data_file)
def test_multiline_double_double_quoted_values_in_quoted_data(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data)
# FIXME Need to convert \0a to proper encoding suitable for the person running the tests.
cmd = Q_EXECUTABLE + ' -d " " "select replace(c5,X\'0A\',\'::\') from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),4)
self.assertTrue(o[0],six.b('multiline_double_double_quoted'))
self.assertTrue(o[1],six.b('control-value-5'))
self.assertTrue(o[2],six.b('this is a double double quoted "multiline\n value".'))
self.assertTrue(o[3],six.b('control-value-5'))
self.cleanup(tmp_data_file)
def test_multiline_escaped_double_quoted_values_in_quoted_data(self):
tmp_data_file = self.create_file_with_data(sample_quoted_data)
# FIXME Need to convert \0a to proper encoding suitable for the person running the tests.
cmd = Q_EXECUTABLE + ' -d " " "select replace(c6,X\'0A\',\'::\') from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),4)
self.assertTrue(o[0],'multiline_escaped_double_quoted')
self.assertTrue(o[1],'control-value-6')
self.assertTrue(o[2],'this is an escaped "multiline:: value".')
self.assertTrue(o[3],'control-value-6')
self.cleanup(tmp_data_file)
def test_disable_double_double_quoted_data_flag__values(self):
# This test (and flag) is meant to verify backward comptibility only. It is possible that
# this flag will be removed completely in the future
tmp_data_file = self.create_file_with_data(double_double_quoted_data)
cmd = Q_EXECUTABLE + ' -d " " --disable-double-double-quoting "select c2 from %s" -W none' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b('double_double_quoted'))
self.assertEqual(o[1],six.b('this is a quoted value with "double'))
cmd = Q_EXECUTABLE + ' -d " " --disable-double-double-quoting "select c3 from %s" -W none' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b(''))
self.assertEqual(o[1],six.b('double'))
cmd = Q_EXECUTABLE + ' -d " " --disable-double-double-quoting "select c4 from %s" -W none' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b(''))
self.assertEqual(o[1],six.b('quotes"""'))
self.cleanup(tmp_data_file)
def test_disable_escaped_double_quoted_data_flag__values(self):
# This test (and flag) is meant to verify backward comptibility only. It is possible that
# this flag will be removed completely in the future
tmp_data_file = self.create_file_with_data(escaped_double_quoted_data)
cmd = Q_EXECUTABLE + ' -d " " --disable-escaped-double-quoting "select c2 from %s" -W none' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b('escaped_double_quoted'))
self.assertEqual(o[1],six.b('this is a quoted value with \\escaped'))
cmd = Q_EXECUTABLE + ' -d " " --disable-escaped-double-quoting "select c3 from %s" -W none' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b(''))
self.assertEqual(o[1],six.b('double'))
cmd = Q_EXECUTABLE + ' -d " " --disable-escaped-double-quoting "select c4 from %s" -W none' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b(''))
self.assertEqual(o[1],six.b('quotes\\""'))
self.cleanup(tmp_data_file)
def test_combined_quoted_data_flags__number_of_columns_detected(self):
# This test (and flags) is meant to verify backward comptibility only. It is possible that
# these flags will be removed completely in the future
tmp_data_file = self.create_file_with_data(combined_quoted_data)
cmd = Q_EXECUTABLE + ' -d " " --disable-double-double-quoting --disable-escaped-double-quoting "select * from %s" -A' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
o = o[o.index(six.b(' Fields:'))+1:]
self.assertEqual(len(o),7) # found 7 fields
cmd = Q_EXECUTABLE + ' -d " " --disable-escaped-double-quoting "select * from %s" -A' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
o = o[o.index(six.b(' Fields:'))+1:]
self.assertEqual(len(o),5) # found 5 fields
cmd = Q_EXECUTABLE + ' -d " " --disable-double-double-quoting "select * from %s" -A' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
o = o[o.index(six.b(' Fields:'))+1:]
self.assertEqual(len(o),5) # found 5 fields
cmd = Q_EXECUTABLE + ' -d " " "select * from %s" -A' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
o = o[o.index(six.b(' Fields:'))+1:]
self.assertEqual(len(o),3) # found only 3 fields, which is the correct amount
self.cleanup(tmp_data_file)
class EncodingTests(AbstractQTestCase):
def test_utf8_with_bom_encoding(self):
utf_8_data_with_bom = six.b('\xef\xbb\xbf"typeid","limit","apcost","date","checkpointId"\n"1","2","5","1,2,3,4,5,6,7","3000,3001,3002"\n"2","2","5","1,2,3,4,5,6,7","3003,3004,3005"\n')
tmp_data_file = self.create_file_with_data(utf_8_data_with_bom,encoding=None)
cmd = Q_EXECUTABLE + ' -d , -H -O -e utf-8-sig "select * from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(e),0)
self.assertEqual(len(o),3)
self.assertEqual(o[0],six.b('typeid,limit,apcost,date,checkpointId'))
self.assertEqual(o[1],six.b('1,2,5,"1,2,3,4,5,6,7","3000,3001,3002"'))
self.assertEqual(o[2],six.b('2,2,5,"1,2,3,4,5,6,7","3003,3004,3005"'))
self.cleanup(tmp_data_file)
class QrcTests(AbstractQTestCase):
def test_explicit_qrc_filename_not_found(self):
non_existent_filename = str(uuid.uuid4())
env_to_inject = { 'QRC_FILENAME': non_existent_filename}
cmd = Q_EXECUTABLE + ' "select 1"'
retcode, o, e = run_command(cmd, env_to_inject=env_to_inject)
self.assertEqual(retcode, 244)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertTrue(e[0] == six.b('QRC_FILENAME env var exists, but cannot find qrc file at %s' % non_existent_filename))
def test_explicit_qrc_filename_that_exists(self):
tmp_qrc_file = self.create_file_with_data(six.b('''[options]
output_delimiter=|
'''))
env_to_inject = { 'QRC_FILENAME': tmp_qrc_file.name}
cmd = Q_EXECUTABLE + ' "select 1,2"'
retcode, o, e = run_command(cmd, env_to_inject=env_to_inject)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertTrue(o[0] == six.b('1|2'))
self.cleanup(tmp_qrc_file)
def test_all_default_options(self):
# Create a qrc file that contains all default values inside the qrc file, but with some different values than the regular defaults
tmp_qrc_file = self.create_file_with_data(six.b('''[options]
analyze_only=True
beautify=True
caching_mode=readwrite
column_count=32
delimiter=,
disable_column_type_detection=True
disable_double_double_quoting=False
disable_escaped_double_quoting=False
encoding=ascii
formatting=xxx
gzipped=True
input_quoting_mode=all
keep_leading_whitespace_in_values=True
list_user_functions=True
max_attached_sqlite_databases=888
max_column_length_limit=8888
mode=strict
output_delimiter=|
output_encoding=utf-8
output_header=True
output_quoting_mode=all
overwrite_qsql=False
pipe_delimited=True
pipe_delimited_output=True
query_encoding=ascii
query_filename=query-filename
save_db_to_disk_filename=save-db-to-disk-filename
skip_header=True
tab_delimited=True
tab_delimited_output=true
verbose=True
with_universal_newlines=True
'''))
env_to_inject = { 'QRC_FILENAME': tmp_qrc_file.name}
cmd = Q_EXECUTABLE + ' --dump-defaults'
retcode, o, e = run_command(cmd, env_to_inject=env_to_inject)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 34)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('[options]'))
o = o[1:]
m = {}
for r in o:
key,val = r.split(six.b("="),1)
m[key] = val
self.assertEqual(m[six.b('analyze_only')],six.b('True'))
self.assertEqual(m[six.b('beautify')],six.b('True'))
self.assertEqual(m[six.b('caching_mode')],six.b('readwrite'))
self.assertEqual(m[six.b('column_count')],six.b('32'))
self.assertEqual(m[six.b('delimiter')],six.b(','))
self.assertEqual(m[six.b('disable_column_type_detection')],six.b('True'))
self.assertEqual(m[six.b('disable_double_double_quoting')],six.b('False'))
self.assertEqual(m[six.b('disable_escaped_double_quoting')],six.b('False'))
self.assertEqual(m[six.b('encoding')],six.b('ascii'))
self.assertEqual(m[six.b('formatting')],six.b('xxx'))
self.assertEqual(m[six.b('gzipped')],six.b('True'))
self.assertEqual(m[six.b('input_quoting_mode')],six.b('all'))
self.assertEqual(m[six.b('keep_leading_whitespace_in_values')],six.b('True'))
self.assertEqual(m[six.b('list_user_functions')],six.b('True'))
self.assertEqual(m[six.b('max_attached_sqlite_databases')],six.b('888'))
self.assertEqual(m[six.b('max_column_length_limit')],six.b('8888'))
self.assertEqual(m[six.b('mode')],six.b('strict'))
self.assertEqual(m[six.b('output_delimiter')],six.b('|'))
self.assertEqual(m[six.b('output_encoding')],six.b('utf-8'))
self.assertEqual(m[six.b('output_header')],six.b('True'))
self.assertEqual(m[six.b('output_quoting_mode')],six.b('all'))
self.assertEqual(m[six.b('overwrite_qsql')],six.b('False'))
self.assertEqual(m[six.b('pipe_delimited')],six.b('True'))
self.assertEqual(m[six.b('pipe_delimited_output')],six.b('True'))
self.assertEqual(m[six.b('query_encoding')],six.b('ascii'))
self.assertEqual(m[six.b('query_filename')],six.b('query-filename'))
self.assertEqual(m[six.b('save_db_to_disk_filename')],six.b('save-db-to-disk-filename'))
self.assertEqual(m[six.b('skip_header')],six.b('True'))
self.assertEqual(m[six.b('tab_delimited')],six.b('True'))
self.assertEqual(m[six.b('tab_delimited_output')],six.b('True'))
self.assertEqual(m[six.b('verbose')],six.b('True'))
self.assertEqual(m[six.b('with_universal_newlines')],six.b('True'))
self.cleanup(tmp_qrc_file)
def test_caching_readwrite_using_qrc_file(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
tmpfile_folder = os.path.dirname(tmpfile.name)
tmpfile_filename = os.path.basename(tmpfile.name)
expected_cache_filename = os.path.join(tmpfile_folder,tmpfile_filename + '.qsql')
cmd = Q_EXECUTABLE + ' -d , "select * from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(o),3)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('a,1,0'))
self.assertEqual(o[1],six.b('b,2,0'))
self.assertEqual(o[2],six.b('c,,0'))
# Ensure default does not create a cache file
self.assertTrue(not os.path.exists(expected_cache_filename))
tmp_qrc_file = self.create_file_with_data(six.b('''[options]
caching_mode=readwrite
'''))
env_to_inject = { 'QRC_FILENAME': tmp_qrc_file.name}
cmd = Q_EXECUTABLE + ' -d , "select * from %s"' % tmpfile.name
retcode, o, e = run_command(cmd, env_to_inject=env_to_inject)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),3)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('a,1,0'))
self.assertEqual(o[1],six.b('b,2,0'))
self.assertEqual(o[2],six.b('c,,0'))
# Ensure that qrc file caching is being used and caching is activated (cache file should exist)
self.assertTrue(os.path.exists(expected_cache_filename))
self.cleanup(tmp_qrc_file)
self.cleanup(tmpfile)
class QsqlUsageTests(AbstractQTestCase):
def test_concatenate_same_qsql_file_with_single_table(self):
numbers = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 10001)]
qsql_file_data = self.arrays_to_qsql_file_content([six.b('aa'), six.b('bb'), six.b('cc')], numbers)
tmpfile = self.create_file_with_data(qsql_file_data,suffix='.qsql')
cmd = Q_EXECUTABLE + ' -t "select count(*) from (select * from %s union all select * from %s)"' % (tmpfile.name,tmpfile.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('20000'))
def test_query_qsql_with_single_table(self):
numbers = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 10001)]
qsql_file_data = self.arrays_to_qsql_file_content([six.b('aa'), six.b('bb'), six.b('cc')], numbers)
tmpfile = self.create_file_with_data(qsql_file_data)
cmd = Q_EXECUTABLE + ' -t "select sum(aa),sum(bb),sum(cc) from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('50005000\t50005000\t50005000'))
def test_query_qsql_with_single_table_with_explicit_non_existent_tablename(self):
numbers = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 10001)]
qsql_file_data = self.arrays_to_qsql_file_content([six.b('aa'), six.b('bb'), six.b('cc')], numbers)
tmpfile = self.create_file_with_data(qsql_file_data)
c = sqlite3.connect(tmpfile.name)
actual_table_name = c.execute('select temp_table_name from _qcatalog').fetchall()[0][0]
c.close()
cmd = '%s -t "select sum(aa),sum(bb),sum(cc) from %s:::non-existent"' % (Q_EXECUTABLE,tmpfile.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 84)
self.assertEqual(len(o),0)
self.assertEqual(len(e),1)
self.assertEqual(e[0],six.b('Table non-existent could not be found in qsql file %s . Existing table names: %s' % (tmpfile.name,actual_table_name)))
def test_query_qsql_with_single_table_with_explicit_table_name(self):
numbers = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 10001)]
qsql_file_data = self.arrays_to_qsql_file_content([six.b('aa'), six.b('bb'), six.b('cc')], numbers)
tmpfile = self.create_file_with_data(qsql_file_data)
c = sqlite3.connect(tmpfile.name)
actual_table_name = c.execute('select temp_table_name from _qcatalog').fetchall()[0][0]
c.close()
cmd = '%s -t "select sum(aa),sum(bb),sum(cc) from %s:::%s"' % (Q_EXECUTABLE,tmpfile.name,actual_table_name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),1)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('50005000\t50005000\t50005000'))
def test_query_multi_qsql_with_single_table(self):
numbers1 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 10001)]
qsql_file_data1 = self.arrays_to_qsql_file_content([six.b('aa'), six.b('bb'), six.b('cc')], numbers1)
tmpfile1 = self.create_file_with_data(qsql_file_data1,suffix='.qsql')
numbers2 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 11)]
qsql_file_data2 = self.arrays_to_qsql_file_content([six.b('aa'), six.b('bb'), six.b('cc')], numbers2)
tmpfile2 = self.create_file_with_data(qsql_file_data2,suffix='.qsql')
cmd = Q_EXECUTABLE + ' -t "select sum(large_file.aa),sum(large_file.bb),sum(large_file.cc) from %s small_file left join %s large_file on (large_file.aa == small_file.bb)"' % (tmpfile2.name,tmpfile1.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('55\t55\t55'))
def test_query_concatenated_qsqls_each_with_single_table(self):
numbers1 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 10001)]
qsql_file_data1 = self.arrays_to_qsql_file_content([six.b('aa'), six.b('bb'), six.b('cc')], numbers1)
tmpfile1 = self.create_file_with_data(qsql_file_data1,suffix='.qsql')
numbers2 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 11)]
qsql_file_data2 = self.arrays_to_qsql_file_content([six.b('aa'), six.b('bb'), six.b('cc')], numbers2)
tmpfile2 = self.create_file_with_data(qsql_file_data2,suffix='.qsql')
cmd = Q_EXECUTABLE + ' -t "select sum(aa),sum(bb),sum(cc) from (select * from %s union all select * from %s)"' % (tmpfile2.name,tmpfile1.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('50005055\t50005055\t50005055'))
def test_concatenated_qsql_and_data_stream__column_names_mismatch(self):
N1 = 10000
N2 = 100
numbers1 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, N1 + 1)]
csv_file_data1 = self.arrays_to_csv_file_content(six.b('\t'),[six.b('aa'), six.b('bb'), six.b('cc')], numbers1)
tmpfile1 = self.create_file_with_data(csv_file_data1)
expected_cache_filename1 = '%s.qsql' % tmpfile1.name
cmd = Q_EXECUTABLE + ' -H -t "select count(*) from %s" -C readwrite' % tmpfile1.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertTrue(os.path.exists(expected_cache_filename1))
cmd = 'seq 1 %s | %s -c 1 "select count(*) from (select * from %s UNION ALL select * from -)"' % (N2, Q_EXECUTABLE,expected_cache_filename1)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 1)
self.assertEqual(len(o),0)
self.assertEqual(len(e),1)
self.assertEqual(e[0],six.b('query error: SELECTs to the left and right of UNION ALL do not have the same number of result columns'))
def test_concatenated_qsql_and_data_stream(self):
N1 = 10000
N2 = 100
numbers1 = [[six.b(str(i))] for i in range(1, N1 + 1)]
csv_file_data1 = self.arrays_to_csv_file_content(six.b('\t'),[six.b('c1')], numbers1)
tmpfile1 = self.create_file_with_data(csv_file_data1)
expected_cache_filename1 = '%s.qsql' % tmpfile1.name
cmd = Q_EXECUTABLE + ' -H -t "select count(*) from %s" -C readwrite' % tmpfile1.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertTrue(os.path.exists(expected_cache_filename1))
cmd = 'seq 1 %s | %s -t -c 1 "select count(*),sum(c1) from (select * from %s UNION ALL select * from -)"' % (N2, Q_EXECUTABLE,expected_cache_filename1)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),1)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('%s\t%s' % (N1+N2,sum(range(1,N1+1)) + sum(range(1,N2+1)))))
def test_concatenated_qsql_and_data_stream__explicit_table_name(self):
N1 = 10000
N2 = 100
numbers1 = [[six.b(str(i))] for i in range(1, N1 + 1)]
csv_file_data1 = self.arrays_to_csv_file_content(six.b('\t'),[six.b('c1')], numbers1)
tmpfile1 = self.create_file_with_data(csv_file_data1)
tmpfile1_expected_table_name = os.path.basename(tmpfile1.name)
expected_cache_filename1 = '%s.qsql' % tmpfile1.name
cmd = Q_EXECUTABLE + ' -H -t "select count(*) from %s" -C readwrite' % tmpfile1.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertTrue(os.path.exists(expected_cache_filename1))
cmd = 'seq 1 %s | %s -t -c 1 "select count(*),sum(c1) from (select * from %s:::%s UNION ALL select * from -)"' % (N2, Q_EXECUTABLE,expected_cache_filename1,tmpfile1_expected_table_name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),1)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('%s\t%s' % (N1+N2,sum(range(1,N1+1)) + sum(range(1,N2+1)))))
def test_write_to_qsql__check_chosen_table_name(self):
numbers1 = [[six.b(str(i))] for i in range(1, 10001)]
csv_file_data1 = self.arrays_to_csv_file_content(six.b('\t'),[six.b('c1')], numbers1)
tmpfile1 = self.create_file_with_data(csv_file_data1)
expected_cache_filename1 = '%s.qsql' % tmpfile1.name
cmd = Q_EXECUTABLE + ' -c 1 -H -t "select count(*) from %s" -C readwrite' % tmpfile1.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertTrue(os.path.exists(expected_cache_filename1))
c = sqlite3.connect(expected_cache_filename1)
qcatalog_entries = c.execute('select temp_table_name from _qcatalog').fetchall()
self.assertEqual(len(qcatalog_entries),1)
self.assertEqual(qcatalog_entries[0][0],os.path.basename(tmpfile1.name))
def test_concatenated_mixes_qsql_with_single_table_and_csv(self):
numbers1 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 10001)]
csv_file_data1 = self.arrays_to_csv_file_content(six.b('\t'),[six.b('aa'), six.b('bb'), six.b('cc')], numbers1)
tmpfile1 = self.create_file_with_data(csv_file_data1)
expected_cache_filename1 = '%s.qsql' % tmpfile1.name
numbers2 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 11)]
csv_file_data2 = self.arrays_to_csv_file_content(six.b('\t'),[six.b('aa'), six.b('bb'), six.b('cc')], numbers2)
tmpfile2 = self.create_file_with_data(csv_file_data2)
expected_cache_filename2 = '%s.qsql' % tmpfile2.name
cmd = Q_EXECUTABLE + ' -H -t "select count(*) from %s" -C readwrite' % tmpfile1.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertTrue(os.path.exists(expected_cache_filename1))
cmd = Q_EXECUTABLE + ' -H -t "select count(*) from %s" -C readwrite' % tmpfile2.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertTrue(os.path.exists(expected_cache_filename2))
# csv and qsql files prepared. now test all four combinations
cmd = Q_EXECUTABLE + ' -O -H -t "select count(*) cnt,sum(aa) sum_aa,sum(bb) sum_bb,sum(cc) sum_cc from (select * from %s union all select * from %s)"' % (tmpfile1.name,tmpfile2.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),2)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('cnt\tsum_aa\tsum_bb\tsum_cc'))
self.assertEqual(o[1],six.b('10010\t50005055\t50005055\t50005055'))
cmd = Q_EXECUTABLE + ' -O -H -t "select count(*) cnt,sum(aa) sum_aa,sum(bb) sum_bb,sum(cc) sum_cc from (select * from %s union all select * from %s.qsql)"' % (tmpfile1.name,tmpfile2.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),2)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('cnt\tsum_aa\tsum_bb\tsum_cc'))
self.assertEqual(o[1],six.b('10010\t50005055\t50005055\t50005055'))
cmd = Q_EXECUTABLE + ' -O -H -t "select count(*) cnt,sum(aa) sum_aa,sum(bb) sum_bb,sum(cc) sum_cc from (select * from %s.qsql union all select * from %s)"' % (tmpfile1.name,tmpfile2.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),2)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('cnt\tsum_aa\tsum_bb\tsum_cc'))
self.assertEqual(o[1],six.b('10010\t50005055\t50005055\t50005055'))
cmd = Q_EXECUTABLE + ' -O -H -t "select count(*) cnt,sum(aa) sum_aa,sum(bb) sum_bb,sum(cc) sum_cc from (select * from %s.qsql union all select * from %s.qsql)"' % (tmpfile1.name,tmpfile2.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),2)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('cnt\tsum_aa\tsum_bb\tsum_cc'))
self.assertEqual(o[1],six.b('10010\t50005055\t50005055\t50005055'))
def test_analysis_of_concatenated_mixes_qsql_with_single_table_and_csv(self):
numbers1 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 10001)]
csv_file_data1 = self.arrays_to_csv_file_content(six.b('\t'),[six.b('aa'), six.b('bb'), six.b('cc')], numbers1)
tmpfile1 = self.create_file_with_data(csv_file_data1)
expected_cache_filename1 = '%s.qsql' % tmpfile1.name
numbers2 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 11)]
csv_file_data2 = self.arrays_to_csv_file_content(six.b('\t'),[six.b('aa'), six.b('bb'), six.b('cc')], numbers2)
tmpfile2 = self.create_file_with_data(csv_file_data2)
expected_cache_filename2 = '%s.qsql' % tmpfile2.name
cmd = Q_EXECUTABLE + ' -H -t "select count(*) from %s" -C readwrite' % tmpfile1.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertTrue(os.path.exists(expected_cache_filename1))
cmd = Q_EXECUTABLE + ' -H -t "select count(*) from %s" -C readwrite' % tmpfile2.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertTrue(os.path.exists(expected_cache_filename2))
# csv and qsql files prepared
# Test function, will be used multiple times, each time with a different combination
def do_check(caching_mode,
file1_source_type,file1_table_postfix,file1_postfix,
file2_source_type,file2_table_postfix,file2_postfix):
cmd = '%s -C %s -O -H -t "select count(*) cnt,sum(aa) sum_aa,sum(bb) sum_bb,sum(cc) sum_cc from (select * from %s%s UNION ALL select * from %s%s)" -A' % (
Q_EXECUTABLE,
caching_mode,
tmpfile1.name,
file1_table_postfix,
tmpfile2.name,
file2_table_postfix)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),14)
self.assertEqual(len(e),0)
self.assertEqual(o, [
six.b('Table: %s%s' % (tmpfile1.name,file1_table_postfix)),
six.b(' Sources:'),
six.b(' source_type: %s source: %s%s' % (file1_source_type,tmpfile1.name,file1_postfix)),
six.b(' Fields:'),
six.b(' `aa` - int'),
six.b(' `bb` - int'),
six.b(' `cc` - int'),
six.b('Table: %s%s' % (tmpfile2.name,file2_table_postfix)),
six.b(' Sources:'),
six.b(' source_type: %s source: %s%s' % (file2_source_type,tmpfile2.name,file2_postfix)),
six.b(' Fields:'),
six.b(' `aa` - int'),
six.b(' `bb` - int'),
six.b(' `cc` - int')])
# now test *the analysis results* of all four combinations, adding `-C read`, so the
# qsql will be used. Running with `-C none`, would have caused the qsql not to be used even if the qsql file exists
do_check(caching_mode='read',
file1_source_type='qsql-file-with-original',file1_table_postfix='',file1_postfix='.qsql',
file2_source_type='qsql-file-with-original',file2_table_postfix='',file2_postfix='.qsql')
do_check('read',
file1_source_type='qsql-file-with-original',file1_table_postfix='',file1_postfix='.qsql',
file2_source_type='qsql-file',file2_table_postfix='.qsql',file2_postfix='.qsql')
do_check('read',
file1_source_type='qsql-file',file1_table_postfix='.qsql',file1_postfix='.qsql',
file2_source_type='qsql-file-with-original',file2_table_postfix='',file2_postfix='.qsql')
do_check('read',
file1_source_type='qsql-file',file1_table_postfix='.qsql',file1_postfix='.qsql',
file2_source_type='qsql-file',file2_table_postfix='.qsql',file2_postfix='.qsql')
# Now test the all combinations again, this time with `-C none`, to make sure that by
# default, the qsql file is not used, and -A shows that fact
do_check(caching_mode='none',
file1_source_type='file-with-unused-qsql',file1_table_postfix='',file1_postfix='',
file2_source_type='file-with-unused-qsql',file2_table_postfix='',file2_postfix='')
do_check('none',
file1_source_type='file-with-unused-qsql',file1_table_postfix='',file1_postfix='',
file2_source_type='qsql-file',file2_table_postfix='.qsql',file2_postfix='.qsql')
do_check('none',
file1_source_type='qsql-file',file1_table_postfix='.qsql',file1_postfix='.qsql',
file2_source_type='file-with-unused-qsql',file2_table_postfix='',file2_postfix='')
do_check('none',
file1_source_type='qsql-file',file1_table_postfix='.qsql',file1_postfix='.qsql',
file2_source_type='qsql-file',file2_table_postfix='.qsql',file2_postfix='.qsql')
def test_mixed_qsql_with_single_table_and_csv__missing_header_parameter_for_csv(self):
numbers1 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 10001)]
qsql_file_data1 = self.arrays_to_qsql_file_content([six.b('aa'), six.b('bb'), six.b('cc')], numbers1)
tmpfile1 = self.create_file_with_data(qsql_file_data1,suffix='.qsql')
numbers2 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 11)]
csv_file_data2 = self.arrays_to_csv_file_content(six.b('\t'),[six.b('aa'), six.b('bb'), six.b('cc')], numbers2)
tmpfile2 = self.create_file_with_data(csv_file_data2)
cmd = Q_EXECUTABLE + ' -t "select sum(aa),sum(bb),sum(cc) from (select * from %s union all select * from %s)"' % (tmpfile1.name,tmpfile2.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 1)
self.assertEqual(e[0],six.b('Warning - There seems to be header line in the file, but -H has not been specified. All fields will be detected as text fields, and the header line will appear as part of the data'))
self.assertEqual(o[0],six.b('50005055.0\t50005055.0\t50005055.0'))
def test_qsql_with_multiple_tables_direct_use(self):
numbers1 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 10001)]
qsql_filename1 = self.create_qsql_file_with_content_and_return_filename([six.b('aa'), six.b('bb'), six.b('cc')],numbers1)
expected_stored_table_name1 = os.path.basename(qsql_filename1)[:-5]
numbers2 = [[six.b(str(i)), six.b(str(i)), six.b(str(i))] for i in range(1, 11)]
qsql_filename2 = self.create_qsql_file_with_content_and_return_filename([six.b('aa'), six.b('bb'), six.b('cc')],numbers2)
expected_stored_table_name2 = os.path.basename(qsql_filename2)[:-5]
qsql_with_multiple_tables = self.generate_tmpfile_name(suffix='.qsql')
cmd = '%s -t "select sum(large_file.aa),sum(large_file.bb),sum(large_file.cc) from %s large_file left join %s small_file on (large_file.aa == small_file.bb)" -S %s' % \
(Q_EXECUTABLE,qsql_filename1,qsql_filename2,qsql_with_multiple_tables)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 4)
self.assertEqual(e[0], six.b('Going to save data into a disk database: %s' % qsql_with_multiple_tables))
self.assertTrue(e[1].startswith(six.b('Data has been saved into %s . Saving has taken' % qsql_with_multiple_tables)))
self.assertEqual(e[2],six.b('Query to run on the database: select sum(large_file.aa),sum(large_file.bb),sum(large_file.cc) from %s large_file left join %s small_file on (large_file.aa == small_file.bb);' % \
(expected_stored_table_name1,expected_stored_table_name2)))
self.assertEqual(e[3],six.b('You can run the query directly from the command line using the following command: echo "select sum(large_file.aa),sum(large_file.bb),sum(large_file.cc) from %s large_file left join %s small_file on (large_file.aa == small_file.bb)" | sqlite3 %s' % \
(expected_stored_table_name1,expected_stored_table_name2,qsql_with_multiple_tables)))
cmd = '%s -d , "select count(*) cnt,sum(aa),sum(bb),sum(cc) from %s:::%s"' % (Q_EXECUTABLE,qsql_with_multiple_tables,expected_stored_table_name1)
r, o, e = run_command(cmd)
self.assertEqual(r,0)
self.assertEqual(len(o),1)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('10000,50005000,50005000,50005000'))
def test_direct_use_of_sqlite_db_with_one_table(self):
tmpfile = self.create_file_with_data(six.b(''),suffix='.sqlite')
os.remove(tmpfile.name)
c = sqlite3.connect(tmpfile.name)
c.execute(' create table mytable (x int, y int)').fetchall()
c.execute(' insert into mytable (x,y) values (100,200),(300,400)').fetchall()
c.commit()
c.close()
cmd = Q_EXECUTABLE + ' -t "select sum(x),sum(y) from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('400\t600'))
cmd = Q_EXECUTABLE + ' -t "select sum(x),sum(y) from %s:::mytable"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('400\t600'))
def test_direct_use_of_sqlite_db_with_one_table__nonexistent_table(self):
tmpfile = self.create_file_with_data(six.b(''),suffix='.sqlite')
os.remove(tmpfile.name)
c = sqlite3.connect(tmpfile.name)
c.execute(' create table some_numbers (x int, y int)').fetchall()
c.execute(' insert into some_numbers (x,y) values (100,200),(300,400)').fetchall()
c.commit()
c.close()
cmd = Q_EXECUTABLE + ' -t "select sum(x),sum(y) from %s:::non_existent"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 85)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertEqual(e[0],six.b('Table non_existent could not be found in sqlite file %s . Existing table names: some_numbers' % (tmpfile.name)))
def test_qsql_creation_and_direct_use(self):
numbers = [[six.b(str(i)),six.b(str(i)),six.b(str(i))] for i in range(1,10001)]
file_data = self.arrays_to_csv_file_content(six.b('\t'),[six.b('aa'),six.b('bb'),six.b('cc')],numbers)
tmpfile = self.create_file_with_data(file_data)
tmpfile_folder = os.path.dirname(tmpfile.name)
tmpfile_filename = os.path.basename(tmpfile.name)
expected_cache_filename = os.path.join(tmpfile_folder,tmpfile_filename + '.qsql')
cmd = Q_EXECUTABLE + ' -H -t "select sum(aa),sum(bb),sum(cc) from %s" -H -C readwrite' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('50005000\t50005000\t50005000'))
self.assertTrue(os.path.exists(expected_cache_filename))
self.cleanup(tmpfile)
# Get the data using a comma delimiter, to make sure that column parsing was done correctlyAdding to qcatalog table:
cmd = Q_EXECUTABLE + ' -D , "select count(*),sum(aa),sum(bb),sum(cc) from %s"' % expected_cache_filename
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('10000,50005000,50005000,50005000'))
def test_analysis_of_qsql_direct_usage(self):
numbers = [[six.b(str(i)),six.b(str(i)),six.b(str(i))] for i in range(1,10001)]
file_data = self.arrays_to_csv_file_content(six.b('\t'),[six.b('aa'),six.b('bb'),six.b('cc')],numbers)
tmpfile = self.create_file_with_data(file_data)
tmpfile_folder = os.path.dirname(tmpfile.name)
tmpfile_filename = os.path.basename(tmpfile.name)
expected_cache_filename = os.path.join(tmpfile_folder,tmpfile_filename + '.qsql')
cmd = Q_EXECUTABLE + ' -H -t "select sum(aa),sum(bb),sum(cc) from %s" -H -C readwrite' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('50005000\t50005000\t50005000'))
self.assertTrue(os.path.exists(expected_cache_filename))
self.cleanup(tmpfile)
cmd = Q_EXECUTABLE + ' "select * from %s" -A' % expected_cache_filename
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 7)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('Table: %s' % expected_cache_filename))
self.assertEqual(o[1],six.b(" Sources:"))
self.assertEqual(o[2],six.b(' source_type: qsql-file source: %s' % expected_cache_filename))
self.assertEqual(o[3],six.b(" Fields:"))
self.assertEqual(o[4],six.b(' `aa` - int'))
self.assertEqual(o[5],six.b(' `bb` - int'))
self.assertEqual(o[6],six.b(' `cc` - int'))
def test_analysis_of_qsql_direct_usage2(self):
numbers = [[six.b(str(i)),six.b(str(i)),six.b(str(i))] for i in range(1,10001)]
file_data = self.arrays_to_csv_file_content(six.b('\t'),[six.b('aa'),six.b('bb'),six.b('cc')],numbers)
tmpfile = self.create_file_with_data(file_data)
tmpfile_folder = os.path.dirname(tmpfile.name)
tmpfile_filename = os.path.basename(tmpfile.name)
expected_cache_filename = os.path.join(tmpfile_folder,tmpfile_filename + '.qsql')
cmd = Q_EXECUTABLE + ' -H -t "select sum(aa),sum(bb),sum(cc) from %s" -H -C readwrite' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('50005000\t50005000\t50005000'))
self.assertTrue(os.path.exists(expected_cache_filename))
self.cleanup(tmpfile)
cmd = Q_EXECUTABLE + ' "select * from %s" -A' % expected_cache_filename
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 7)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('Table: %s' % expected_cache_filename))
self.assertEqual(o[1],six.b(" Sources:"))
self.assertEqual(o[2],six.b(' source_type: qsql-file source: %s' % expected_cache_filename))
self.assertEqual(o[3],six.b(" Fields:"))
self.assertEqual(o[4],six.b(' `aa` - int'))
self.assertEqual(o[5],six.b(' `bb` - int'))
self.assertEqual(o[6],six.b(' `cc` - int'))
def test_direct_qsql_usage_for_single_table_qsql_file(self):
disk_db_filename = self.random_tmp_filename('save-to-db','qsql')
cmd = 'seq 1 10000 | %s -t "select sum(aa),sum(bb),sum(cc) from -" -S %s' % (Q_EXECUTABLE,disk_db_filename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
cmd = '%s -D, "select count(*),sum(c1) from %s:::data_stream_stdin"' % (Q_EXECUTABLE,disk_db_filename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),1)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('10000,50005000'))
def test_direct_qsql_usage_for_single_table_qsql_file__nonexistent_table(self):
disk_db_filename = self.random_tmp_filename('save-to-db','qsql')
cmd = 'seq 1 10000 | %s -t "select sum(aa),sum(bb),sum(cc) from -" -S %s' % (Q_EXECUTABLE,disk_db_filename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
cmd = '%s -D, "select count(*),sum(c1) from %s:::unknown_table_name"' % (Q_EXECUTABLE,disk_db_filename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 85)
self.assertEqual(len(o),0)
self.assertEqual(len(e),1)
self.assertEqual(e[0],six.b('Table unknown_table_name could not be found in sqlite file %s . Existing table names: data_stream_stdin' % (disk_db_filename)))
def test_direct_qsql_usage_from_written_data_stream(self):
disk_db_filename = self.random_tmp_filename('save-to-db','qsql')
cmd = 'seq 1 10000 | %s -t "select sum(aa),sum(bb),sum(cc) from -" -S %s' % (Q_EXECUTABLE,disk_db_filename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
cmd = '%s -D, "select count(*),sum(c1) from %s:::data_stream_stdin"' % (Q_EXECUTABLE,disk_db_filename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),1)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('10000,50005000'))
def test_direct_qsql_self_join(self):
disk_db_filename = self.random_tmp_filename('save-to-db','qsql')
N = 100
cmd = 'seq 1 %s | %s -t "select count(*),sum(c1) from -" -S %s' % (N,Q_EXECUTABLE,disk_db_filename)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
cmd = '%s -D, "select count(*),sum(a.c1),sum(b.c1) from %s:::data_stream_stdin a left join %s:::data_stream_stdin b"' % (Q_EXECUTABLE,disk_db_filename,disk_db_filename)
retcode, o, e = run_command(cmd)
expected_sum = sum(range(1,N+1))*N
self.assertEqual(retcode, 0)
self.assertEqual(len(o),1)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('10000,%s,%s' % (expected_sum,expected_sum)))
class CachingTests(AbstractQTestCase):
def test_cache_empty_file(self):
file_data = six.b("a,b,c")
tmpfile = self.create_file_with_data(file_data)
tmpfile_folder = os.path.dirname(tmpfile.name)
tmpfile_filename = os.path.basename(tmpfile.name)
tmpfile_expected_table_name = os.path.basename(tmpfile.name)
expected_cache_filename = os.path.join(tmpfile_folder,tmpfile_filename + '.qsql')
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C none' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertEqual(e[0],six.b("Warning - data is empty"))
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C readwrite' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertEqual(e[0],six.b("Warning - data is empty"))
# After readwrite caching has been activated, the cache file is expected to exist
self.assertTrue(os.path.exists(expected_cache_filename))
# Read the cache file directly, to make sure it's a valid sqlite file
import sqlite3
db = sqlite3.connect(expected_cache_filename)
table_list = db.execute("select content_signature_key,temp_table_name,content_signature,creation_time,source_type,source from _qcatalog where temp_table_name == '%s'" % (tmpfile_expected_table_name)).fetchall()
self.assertTrue(len(table_list) == 1)
table_metadata = table_list[0]
results = db.execute("select * from %s" % table_metadata[1]).fetchall()
self.assertTrue(len(results) == 0)
self.cleanup(tmpfile)
def test_reading_the_wrong_cache__original_file_having_different_data(self):
file_data1 = six.b("a,b,c\n10,20,30\n30,40,50")
tmpfile1 = self.create_file_with_data(file_data1)
tmpfile1_folder = os.path.dirname(tmpfile1.name)
tmpfile1_filename = os.path.basename(tmpfile1.name)
expected_cache_filename = os.path.join(tmpfile1_folder,tmpfile1_filename + '.qsql')
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C readwrite' % tmpfile1.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 2)
self.assertEqual(len(e), 0)
self.assertTrue(o[0], six.b('10'))
self.assertEqual(o[1], six.b('30'))
# Ensure cache has been created
self.assertTrue(os.path.exists(expected_cache_filename))
# Overwrite the original file
file_data2 = six.b("a,b,c\n10,20,30\n30,40,50\n50,60,70")
self.write_file(tmpfile1.name,file_data2)
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C read' % tmpfile1.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 81)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertEqual(e[0], six.b('%s vs %s.qsql: Content Signatures differ at inferer.rows (actual analysis data differs)' % \
(tmpfile1.name,tmpfile1.name)))
def test_reading_the_wrong_cache__original_file_having_different_delimiter(self):
file_data1 = six.b("a,b,c\n10,20,30\n30,40,50")
tmpfile1 = self.create_file_with_data(file_data1)
tmpfile1_folder = os.path.dirname(tmpfile1.name)
tmpfile1_filename = os.path.basename(tmpfile1.name)
expected_cache_filename = os.path.join(tmpfile1_folder,tmpfile1_filename + '.qsql')
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C readwrite' % tmpfile1.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 2)
self.assertEqual(len(e), 0)
self.assertTrue(o[0], six.b('10'))
self.assertEqual(o[1], six.b('30'))
# Ensure cache has been created
self.assertTrue(os.path.exists(expected_cache_filename))
# Overwrite the original file
file_data2 = six.b("a\tb\tc\n10\t20\t30\n30\t40\t50")
self.write_file(tmpfile1.name,file_data2)
cmd = Q_EXECUTABLE + ' -H -t "select a from %s" -C read' % tmpfile1.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 80)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
x = six.b("%s vs %s.qsql: Content Signatures for table %s differ at input_delimiter (source value '\t' disk signature value ',')" % \
(tmpfile1.name,tmpfile1.name,tmpfile1.name))
self.assertEqual(e[0], x)
def test_rename_cache_and_read_from_it(self):
# create a file, along with its qsql
file_data1 = six.b("a,b,c\n10,20,30\n30,40,50")
tmpfile1 = self.create_file_with_data(file_data1)
tmpfile1_folder = os.path.dirname(tmpfile1.name)
tmpfile1_filename = os.path.basename(tmpfile1.name)
expected_cache_filename1 = os.path.join(tmpfile1_folder,tmpfile1_filename + '.qsql')
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C readwrite' % tmpfile1.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 2)
self.assertEqual(len(e), 0)
self.assertTrue(o[0], six.b('10'))
self.assertEqual(o[1], six.b('30'))
# Ensure cache has been created
self.assertTrue(os.path.exists(expected_cache_filename1))
tmp_fn = self.generate_tmpfile_name("aa","qsql")
os.rename(expected_cache_filename1,tmp_fn)
cmd = '%s "select a from %s"' % (Q_EXECUTABLE,tmp_fn)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 2)
self.assertEqual(len(e), 0)
self.assertTrue(o[0], six.b('10'))
self.assertEqual(o[1], six.b('30'))
def test_reading_the_wrong_cache__qsql_file_not_having_a_matching_content_signature(self):
# create a file, along with its qsql
file_data1 = six.b("a,b,c\n10,20,30\n30,40,50")
tmpfile1 = self.create_file_with_data(file_data1)
tmpfile1_folder = os.path.dirname(tmpfile1.name)
tmpfile1_filename = os.path.basename(tmpfile1.name)
expected_cache_filename1 = os.path.join(tmpfile1_folder,tmpfile1_filename + '.qsql')
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C readwrite' % tmpfile1.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 2)
self.assertEqual(len(e), 0)
self.assertTrue(o[0], six.b('10'))
self.assertEqual(o[1], six.b('30'))
# Ensure cache has been created
self.assertTrue(os.path.exists(expected_cache_filename1))
file_data2 = six.b("c,d,e\n10,20,30\n30,40,50")
# create another file with a different header, along with its qsql
tmpfile2 = self.create_file_with_data(file_data2)
tmpfile2_folder = os.path.dirname(tmpfile2.name)
tmpfile2_filename = os.path.basename(tmpfile2.name)
expected_cache_filename2 = os.path.join(tmpfile2_folder,tmpfile2_filename + '.qsql')
cmd = Q_EXECUTABLE + ' -H -d , "select c from %s" -C readwrite' % tmpfile2.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 2)
self.assertEqual(len(e), 0)
self.assertTrue(o[0], six.b('10'))
self.assertEqual(o[1], six.b('30'))
# Ensure cache has been created
self.assertTrue(os.path.exists(expected_cache_filename2))
# now take the second qsql file as if it was the first. Execution on file 1 should fail, since the qsql file
# does not really contain the table we're after
os.remove(expected_cache_filename1)
os.rename(expected_cache_filename2,expected_cache_filename1)
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C read' % tmpfile1.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 80)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
x = six.b("%s vs %s.qsql: Content Signatures for table %s differ at inferer.header_row (source value '['a', 'b', 'c']' disk signature value '['c', 'd', 'e']')" % (tmpfile1.name,tmpfile1.name,tmpfile1.name))
self.assertEqual(e[0], x)
def test_reading_the_wrong_cache__qsql_file_not_having_any_content_signature(self):
# create a file, along with its qsql
file_data1 = six.b("a,b,c\n10,20,30\n30,40,50")
tmpfile1 = self.create_file_with_data(file_data1)
tmpfile1_folder = os.path.dirname(tmpfile1.name)
tmpfile1_filename = os.path.basename(tmpfile1.name)
expected_cache_filename1 = os.path.join(tmpfile1_folder,tmpfile1_filename + '.qsql')
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C readwrite' % tmpfile1.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 2)
self.assertEqual(len(e), 0)
self.assertTrue(o[0], six.b('10'))
self.assertEqual(o[1], six.b('30'))
# Ensure cache has been created
self.assertTrue(os.path.exists(expected_cache_filename1))
file_data2 = six.b("c,d,e\n10,20,30\n30,40,50")
# delete qcatalog content, so no entries will be available
c = sqlite3.connect(expected_cache_filename1)
c.execute('delete from _qcatalog').fetchall()
c.commit()
c.close()
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C read' % tmpfile1.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 97)
self.assertEqual(len(o),0)
self.assertEqual(len(e),1)
self.assertEqual(e[0],six.b("Could not autodetect table name in qsql file. File contains no record of a table"))
def test_cache_full_flow(self):
file_data = six.b("a,b,c\n10,20,30\n30,40,50")
tmpfile = self.create_file_with_data(file_data)
tmpfile_folder = os.path.dirname(tmpfile.name)
tmpfile_filename = os.path.basename(tmpfile.name)
expected_tmpfile_table_name = tmpfile_filename
expected_cache_filename = os.path.join(tmpfile_folder,tmpfile_filename + '.qsql')
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C none' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 2)
self.assertEqual(len(e), 0)
self.assertTrue(o[0],six.b('10'))
self.assertEqual(o[1],six.b('30'))
# Ensure cache has not been created
self.assertTrue(not os.path.exists(expected_cache_filename))
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C read' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 2)
self.assertEqual(len(e), 0)
self.assertTrue(o[0],six.b('10'))
self.assertEqual(o[1],six.b('30'))
# Ensure cache has not been created, as cache mode is "read" only
self.assertTrue(not os.path.exists(expected_cache_filename))
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C readwrite' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 2)
self.assertEqual(len(e), 0)
self.assertTrue(o[0],six.b('10'))
self.assertEqual(o[1],six.b('30'))
# After readwrite caching has been activated, the cache file is expected to exist
self.assertTrue(os.path.exists(expected_cache_filename))
# Read the cache file directly, to make sure it's a valid sqlite file
db = sqlite3.connect(expected_cache_filename)
table_list = db.execute("select content_signature_key,temp_table_name,content_signature,creation_time,source_type,source from _qcatalog where temp_table_name == '%s'" % expected_tmpfile_table_name).fetchall()
self.assertTrue(len(table_list) == 1)
table_metadata = table_list[0]
results = db.execute("select * from %s" % table_metadata[1]).fetchall()
self.assertEqual(results[0],(10,20,30))
self.assertEqual(results[1],(30,40,50))
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C read' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 2)
self.assertEqual(len(e), 0)
self.assertTrue(o[0],six.b('10'))
self.assertEqual(o[1],six.b('30'))
# After readwrite caching has been activated, the cache file is expected to exist
self.assertTrue(os.path.exists(expected_cache_filename))
self.cleanup(tmpfile)
def test_cache_full_flow_with_concatenated_files(self):
file_data1 = six.b("a,b,c\n10,11,12\n20,21,22")
tmpfile1 = self.create_file_with_data(file_data1)
tmpfile1_folder = os.path.dirname(tmpfile1.name)
tmpfile1_filename = os.path.basename(tmpfile1.name)
expected_cache_filename1 = os.path.join(tmpfile1_folder,tmpfile1_filename + '.qsql')
file_data2 = six.b("a,b,c\n30,31,32\n40,41,42")
tmpfile2 = self.create_file_with_data(file_data2)
tmpfile2_folder = os.path.dirname(tmpfile2.name)
tmpfile2_filename = os.path.basename(tmpfile2.name)
expected_cache_filename2 = os.path.join(tmpfile2_folder,tmpfile2_filename + '.qsql')
cmd = Q_EXECUTABLE + ' -O -H -d , "select * from (select * from %s UNION ALL select * from %s)" -C readwrite' % (tmpfile1.name,tmpfile2.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 5)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('a,b,c'))
self.assertEqual(o[1],six.b('10,11,12'))
self.assertEqual(o[2],six.b('20,21,22'))
self.assertEqual(o[3],six.b('30,31,32'))
self.assertEqual(o[4],six.b('40,41,42'))
self.assertTrue(os.path.exists(expected_cache_filename1))
self.assertTrue(os.path.exists(expected_cache_filename2))
self.cleanup(tmpfile1)
self.cleanup(tmpfile2)
def test_analyze_result_with_cache_file(self):
file_data = six.b("a,b,c\n10,20,30\n30,40,50")
tmpfile = self.create_file_with_data(file_data)
tmpfile_folder = os.path.dirname(tmpfile.name)
tmpfile_filename = os.path.basename(tmpfile.name)
expected_cache_filename = os.path.join(tmpfile_folder,tmpfile_filename + '.qsql')
# Ensure cache has not been created yet
self.assertTrue(not os.path.exists(expected_cache_filename))
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C readwrite' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 2)
self.assertEqual(len(e), 0)
self.assertTrue(o[0],six.b('10'))
self.assertEqual(o[1],six.b('30'))
# Ensure cache is now created
self.assertTrue(os.path.exists(expected_cache_filename))
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C read -A' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),7)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('Table: %s' % tmpfile.name))
self.assertEqual(o[1],six.b(' Sources:'))
self.assertEqual(o[2],six.b(' source_type: qsql-file-with-original source: %s.qsql' % tmpfile.name))
self.assertEqual(o[3],six.b(' Fields:'))
self.assertEqual(o[4],six.b(' `a` - int'))
self.assertEqual(o[5],six.b(' `b` - int'))
self.assertEqual(o[6],six.b(' `c` - int'))
# delete the newly created cache
os.remove(expected_cache_filename)
# Now rerun the analysis without the cache file
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C read -A' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o),7)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('Table: %s' % tmpfile.name))
self.assertEqual(o[1],six.b(' Sources:'))
self.assertEqual(o[2],six.b(' source_type: file source: %s' % tmpfile.name))
self.assertEqual(o[3],six.b(' Fields:'))
self.assertEqual(o[4],six.b(' `a` - int'))
self.assertEqual(o[5],six.b(' `b` - int'))
self.assertEqual(o[6],six.b(' `c` - int'))
self.cleanup(tmpfile)
def test_partial_caching_exists(self):
file1_data = six.b("a,b,c\n10,20,30\n30,40,50\n60,70,80")
tmpfile1 = self.create_file_with_data(file1_data)
tmpfile1_folder = os.path.dirname(tmpfile1.name)
tmpfile1_filename = os.path.basename(tmpfile1.name)
expected_cache_filename1 = os.path.join(tmpfile1_folder,tmpfile1_filename + '.qsql')
file2_data = six.b("b,x\n10,linewith10\n20,linewith20\n30,linewith30\n40,linewith40")
tmpfile2 = self.create_file_with_data(file2_data)
tmpfile2_folder = os.path.dirname(tmpfile2.name)
tmpfile2_filename = os.path.basename(tmpfile2.name)
expected_cache_filename2 = os.path.join(tmpfile2_folder,tmpfile2_filename + '.qsql')
# Use only first file, and cache
cmd = Q_EXECUTABLE + ' -H -d , "select a from %s" -C readwrite' % tmpfile1.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 0)
self.assertTrue(o[0],six.b('10'))
self.assertEqual(o[1],six.b('30'))
# Ensure cache has been created for file 1
self.assertTrue(os.path.exists(expected_cache_filename1))
# Use both files with read caching, one should be read from cache, the other from the file
cmd = Q_EXECUTABLE + ' -H -d , "select file1.a,file1.b,file1.c,file2.x from %s file1 left join %s file2 on (file1.b = file2.b)" -C read' % (tmpfile1.name,tmpfile2.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('10,20,30,linewith20'))
self.assertEqual(o[1],six.b('30,40,50,linewith40'))
self.assertEqual(o[2],six.b('60,70,80,'))
# Ensure cache has NOT been created for file 2
self.assertTrue(not os.path.exists(expected_cache_filename2))
# Now rerun the query, this time with readwrite caching, so the second file cache will be written
cmd = Q_EXECUTABLE + ' -H -d , "select file1.a,file1.b,file1.c,file2.x from %s file1 left join %s file2 on (file1.b = file2.b)" -C readwrite' % (tmpfile1.name,tmpfile2.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('10,20,30,linewith20'))
self.assertEqual(o[1],six.b('30,40,50,linewith40'))
self.assertEqual(o[2],six.b('60,70,80,'))
# Ensure cache has now been created for file 2
self.assertTrue(os.path.exists(expected_cache_filename2))
self.cleanup(tmpfile1)
self.cleanup(tmpfile2)
class UserFunctionTests(AbstractQTestCase):
def test_regexp_int_data_handling(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , "select c2 from %s where regexp(\'^1\',c2)"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b("1"))
self.cleanup(tmpfile)
def test_percentile_func(self):
cmd = 'seq 1000 1999 | %s "select substr(c1,0,3),percentile(c1,0),percentile(c1,0.5),percentile(c1,1) from - group by substr(c1,0,3)" -c 1' % Q_EXECUTABLE
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 10)
self.assertEqual(len(e), 0)
output_table = [l.split(six.b(" ")) for l in o]
group_labels = [int(row[0]) for row in output_table]
minimum_values = [float(row[1]) for row in output_table]
median_values = [float(row[2]) for row in output_table]
max_values = [float(row[3]) for row in output_table]
base_values = list(range(1000,2000,100))
self.assertEqual(group_labels,list(range(10,20)))
self.assertEqual(minimum_values,base_values)
self.assertEqual(median_values,list(map(lambda x: x + 49.5,base_values)))
self.assertEqual(max_values,list(map(lambda x: x + 99,base_values)))
def test_regexp_null_data_handling(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , "select count(*) from %s where regexp(\'^\',c2)"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b("2"))
self.cleanup(tmpfile)
def test_md5_function(self):
cmd = 'seq 1 4 | %s -c 1 -d , "select c1,md5(c1,\'utf-8\') from -"' % Q_EXECUTABLE
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(o),4)
self.assertEqual(len(e),0)
self.assertEqual(tuple(o[0].split(six.b(','),1)),(six.b('1'),six.b('c4ca4238a0b923820dcc509a6f75849b')))
self.assertEqual(tuple(o[1].split(six.b(','),1)),(six.b('2'),six.b('c81e728d9d4c2f636f067f89cc14862c')))
self.assertEqual(tuple(o[2].split(six.b(','),1)),(six.b('3'),six.b('eccbc87e4b5ce2fe28308fd9f2a7baf3')))
self.assertEqual(tuple(o[3].split(six.b(','),1)),(six.b('4'),six.b('a87ff679a2f3e71d9181a67b7542122c')))
def test_stddev_functions(self):
tmpfile = self.create_file_with_data(six.b("\n".join(map(str,[234,354,3234,123,4234,234,634,56,65]))))
cmd = '%s -c 1 -d , "select round(stddev_pop(c1),10),round(stddev_sample(c1),10) from %s"' % (Q_EXECUTABLE,tmpfile.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(o),1)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('1479.7015464838,1569.4604964764'))
self.cleanup(tmpfile)
def test_sqrt_function(self):
cmd = 'seq 1 5 | %s -c 1 -d , "select round(sqrt(c1),10) from -"' % Q_EXECUTABLE
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(o),5)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('1.0'))
self.assertEqual(o[1],six.b('1.4142135624'))
self.assertEqual(o[2],six.b('1.7320508076'))
self.assertEqual(o[3],six.b('2.0'))
self.assertEqual(o[4],six.b('2.2360679775'))
def test_power_function(self):
cmd = 'seq 1 5 | %s -c 1 -d , "select round(power(c1,2.5),10) from -"' % Q_EXECUTABLE
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(o),5)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('1.0'))
self.assertEqual(o[1],six.b('5.6568542495'))
self.assertEqual(o[2],six.b('15.5884572681'))
self.assertEqual(o[3],six.b('32.0'))
self.assertEqual(o[4],six.b('55.9016994375'))
def test_file_functions(self):
filenames = [
"file1",
"file2.csv",
"/var/tmp/file3",
"/var/tmp/file4.gz",
""
]
data = "\n".join(filenames)
cmd = 'echo "%s" | %s -c 1 -d , "select file_folder(c1),file_ext(c1),file_basename(c1),file_basename_no_ext(c1) from -"' % (data,Q_EXECUTABLE)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(o),5)
self.assertEqual(len(e),0)
self.assertEqual(o,[
b',,file1,file1',
b',.csv,file2.csv,file2',
b'/var/tmp,,file3,file3',
b'/var/tmp,.gz,file4.gz,file4',
b',,,'
])
def test_sha1_function(self):
cmd = 'seq 1 4 | %s -c 1 -d , "select c1,sha1(c1) from -"' % Q_EXECUTABLE
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(o),4)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('1,356a192b7913b04c54574d18c28d46e6395428ab'))
self.assertEqual(o[1],six.b('2,da4b9237bacccdf19c0760cab7aec4a8359010b0'))
self.assertEqual(o[2],six.b('3,77de68daecd823babbb58edb1c8e14d7106e83bb'))
self.assertEqual(o[3],six.b('4,1b6453892473a467d07372d45eb05abc2031647a'))
def test_regexp_extract_function(self):
query = """
select
regexp_extract('was ([0-9]+) seconds and ([0-9]+) ms',c1,0),
regexp_extract('was ([0-9]+) seconds and ([0-9]+) ms',c1,1),
regexp_extract('non-existent-(regexp)',c1,0)
from
-
"""
cmd = 'echo "Duration was 322 seconds and 240 ms" | %s -c 1 -d , "%s"' % (Q_EXECUTABLE,query)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(o),1)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('322,240,'))
def test_sha_function(self):
cmd = 'seq 1 4 | %s -c 1 -d , "select c1,sha(c1,1,\'utf-8\') as sha1,sha(c1,224,\'utf-8\') as sha224,sha(c1,256,\'utf-8\') as sha256 from -"' % Q_EXECUTABLE
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(o),4)
self.assertEqual(len(e),0)
self.assertEqual(o[0],six.b('1,356a192b7913b04c54574d18c28d46e6395428ab,e25388fde8290dc286a6164fa2d97e551b53498dcbf7bc378eb1f178,6b86b273ff34fce19d6b804eff5a3f5747ada4eaa22f1d49c01e52ddb7875b4b'))
self.assertEqual(o[1],six.b('2,da4b9237bacccdf19c0760cab7aec4a8359010b0,58b2aaa0bfae7acc021b3260e941117b529b2e69de878fd7d45c61a9,d4735e3a265e16eee03f59718b9b5d03019c07d8b6c51f90da3a666eec13ab35'))
self.assertEqual(o[2],six.b('3,77de68daecd823babbb58edb1c8e14d7106e83bb,4cfc3a1811fe40afa401b25ef7fa0379f1f7c1930a04f8755d678474,4e07408562bedb8b60ce05c1decfe3ad16b72230967de01f640b7e4729b49fce'))
self.assertEqual(o[3],six.b('4,1b6453892473a467d07372d45eb05abc2031647a,271f93f45e9b4067327ed5c8cd30a034730aaace4382803c3e1d6c2f,4b227777d4dd1fc61c6f884f48641d02b4d121d3fd328cb08b5531fcacdabf8a'))
class MultiHeaderTests(AbstractQTestCase):
def test_output_header_when_multiple_input_headers_exist(self):
TMPFILE_COUNT = 5
tmpfiles = [self.create_file_with_data(sample_data_with_header) for x in range(TMPFILE_COUNT)]
tmpfilenames = " UNION ALL ".join(map(lambda x:"select * from %s" % x.name, tmpfiles))
cmd = Q_EXECUTABLE + ' -d , "select name,value1,value2 from (%s) order by name" -H -O' % tmpfilenames
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), TMPFILE_COUNT*3+1)
self.assertEqual(o[0], six.b("name,value1,value2"))
for i in range (TMPFILE_COUNT):
self.assertEqual(o[1+i],sample_data_rows[0])
for i in range (TMPFILE_COUNT):
self.assertEqual(o[TMPFILE_COUNT+1+i],sample_data_rows[1])
for i in range (TMPFILE_COUNT):
self.assertEqual(o[TMPFILE_COUNT*2+1+i],sample_data_rows[2])
for oi in o[1:]:
self.assertTrue(six.b('name') not in oi)
for i in range(TMPFILE_COUNT):
self.cleanup(tmpfiles[i])
def test_output_header_when_extra_header_column_names_are_different__concatenation_replacement(self):
tmpfile1 = self.create_file_with_data(sample_data_with_header)
tmpfile2 = self.create_file_with_data(generate_sample_data_with_header(six.b('othername,value1,value2')))
cmd = Q_EXECUTABLE + ' -d , "select name,value1,value2 from (select * from %s union all select * from %s) order by name" -H -O' % (tmpfile1.name,tmpfile2.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 7)
self.assertEqual(len(e), 0)
self.assertTrue(o, [
six.b('name,value1,value2'),
six.b('a,1,0'),
six.b('a,1,0'),
six.b('b,2,0'),
six.b('b,2,0'),
six.b('c,,0'),
six.b('c,,0')
])
self.cleanup(tmpfile1)
self.cleanup(tmpfile2)
def test_output_header_when_extra_header_has_different_number_of_columns(self):
tmpfile1 = self.create_file_with_data(sample_data_with_header)
tmpfile2 = self.create_file_with_data(generate_sample_data_with_header(six.b('name,value1')))
cmd = Q_EXECUTABLE + ' -d , "select name,value1,value2 from (select * from %s UNION ALL select * from %s) order by name" -H -O' % (tmpfile1.name,tmpfile2.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 7)
self.assertEqual(len(e), 0)
self.assertTrue(o, [
six.b('name,value1,value2'),
six.b('a,1,0'),
six.b('a,1,0'),
six.b('b,2,0'),
six.b('b,2,0'),
six.b('c,,0'),
six.b('c,,0')
])
self.cleanup(tmpfile1)
self.cleanup(tmpfile2)
class ParsingModeTests(AbstractQTestCase):
def test_strict_mode_column_count_mismatch_error(self):
tmpfile = self.create_file_with_data(uneven_ls_output)
cmd = Q_EXECUTABLE + ' -m strict "select count(*) from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertNotEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertTrue(six.b("Column Count is expected to identical") in e[0])
self.cleanup(tmpfile)
def test_strict_mode_too_large_specific_column_count(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , -m strict -c 4 "select count(*) from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertNotEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertEqual(
e[0], six.b("Strict mode. Column count is expected to be 4 but is 3"))
self.cleanup(tmpfile)
def test_strict_mode_too_small_specific_column_count(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , -m strict -c 2 "select count(*) from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertNotEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertEqual(
e[0], six.b("Strict mode. Column count is expected to be 2 but is 3"))
self.cleanup(tmpfile)
def test_relaxed_mode_missing_columns_in_header(self):
tmpfile = self.create_file_with_data(
sample_data_with_missing_header_names)
cmd = Q_EXECUTABLE + ' -d , -m relaxed "select count(*) from %s" -H -A' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 7)
self.assertEqual(len(e), 0)
self.assertEqual(o[0],six.b('Table: %s' % tmpfile.name))
self.assertEqual(o[1],six.b(' Sources:'))
self.assertEqual(o[2],six.b(' source_type: file source: %s') % six.b(tmpfile.name))
self.assertEqual(o[3],six.b(' Fields:'))
self.assertEqual(o[4],six.b(' `name` - text'))
self.assertEqual(o[5],six.b(' `value1` - int'))
self.assertEqual(o[6],six.b(' `c3` - int'))
self.cleanup(tmpfile)
def test_strict_mode_missing_columns_in_header(self):
tmpfile = self.create_file_with_data(
sample_data_with_missing_header_names)
cmd = Q_EXECUTABLE + ' -d , -m strict "select count(*) from %s" -H -A' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertNotEqual(retcode, 0)
self.assertEqual(len(o), 0)
self.assertEqual(len(e), 1)
self.assertEqual(
e[0], six.b('Strict mode. Header row contains less columns than expected column count(2 vs 3)'))
self.cleanup(tmpfile)
def test_output_delimiter_with_missing_fields(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , "select * from %s" -D ";"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], six.b('a;1;0'))
self.assertEqual(o[1], six.b('b;2;0'))
self.assertEqual(o[2], six.b('c;;0'))
self.cleanup(tmpfile)
def test_handling_of_null_integers(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , "select avg(c2) from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], six.b('1.5'))
self.cleanup(tmpfile)
def test_empty_integer_values_converted_to_null(self):
tmpfile = self.create_file_with_data(sample_data_no_header)
cmd = Q_EXECUTABLE + ' -d , "select * from %s where c2 is null"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], six.b('c,,0'))
self.cleanup(tmpfile)
def test_empty_string_values_not_converted_to_null(self):
tmpfile = self.create_file_with_data(
sample_data_with_empty_string_no_header)
cmd = Q_EXECUTABLE + ' -d , "select * from %s where c2 == %s"' % (
tmpfile.name, "''")
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], six.b('c,,0'))
self.cleanup(tmpfile)
def test_relaxed_mode_detected_columns(self):
tmpfile = self.create_file_with_data(uneven_ls_output)
cmd = Q_EXECUTABLE + ' -m relaxed "select count(*) from %s" -A' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
column_rows = o[o.index(six.b(' Fields:'))+1:]
self.assertEqual(len(column_rows), 11)
column_tuples = [x.strip().split(six.b(" ")) for x in column_rows]
column_info = [(x[0], x[2]) for x in column_tuples]
column_names = [x[0] for x in column_tuples]
column_types = [x[2] for x in column_tuples]
self.assertEqual(column_names, [six.b('`c{}`'.format(x)) for x in range(1, 12)])
self.assertEqual(column_types, list(map(lambda x:six.b(x),[
'text', 'int', 'text', 'text', 'int', 'text', 'int', 'int', 'text', 'text', 'text'])))
self.cleanup(tmpfile)
def test_relaxed_mode_detected_columns_with_specific_column_count(self):
tmpfile = self.create_file_with_data(uneven_ls_output)
cmd = Q_EXECUTABLE + ' -m relaxed "select count(*) from %s" -A -c 9' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
column_rows = o[o.index(six.b(' Fields:'))+1:]
self.assertEqual(len(column_rows), 9)
column_tuples = [x.strip().split(six.b(" ")) for x in column_rows]
column_info = [(x[0], x[2]) for x in column_tuples]
column_names = [x[0] for x in column_tuples]
column_types = [x[2] for x in column_tuples]
self.assertEqual(column_names, [six.b('`c{}`'.format(x)) for x in range(1, 10)])
self.assertEqual(
column_types, list(map(lambda x:six.b(x),['text', 'int', 'text', 'text', 'int', 'text', 'int', 'int', 'text'])))
self.cleanup(tmpfile)
def test_relaxed_mode_last_column_data_with_specific_column_count(self):
tmpfile = self.create_file_with_data(uneven_ls_output)
cmd = Q_EXECUTABLE + ' -m relaxed "select c9 from %s" -c 9' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 9)
self.assertEqual(len(e), 0)
expected_output = list(map(lambda x:six.b(x),["/selinux", "/mnt", "/srv", "/lost+found", '"/initrd.img.old -> /boot/initrd.img-3.8.0-19-generic"',
"/cdrom", "/home", '"/vmlinuz -> boot/vmlinuz-3.8.0-19-generic"', '"/initrd.img -> boot/initrd.img-3.8.0-19-generic"']))
self.assertEqual(o, expected_output)
self.cleanup(tmpfile)
def test_1_column_warning_in_relaxed_mode(self):
tmpfile = self.create_file_with_data(one_column_data)
cmd = Q_EXECUTABLE + ' -m relaxed "select c1 from %s" -d ,' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b('data without commas 1'))
self.assertEqual(o[1],six.b('data without commas 2'))
self.cleanup(tmpfile)
def test_1_column_warning_in_strict_mode(self):
tmpfile = self.create_file_with_data(one_column_data)
cmd = Q_EXECUTABLE + ' -m relaxed "select c1 from %s" -d , -m strict' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b('data without commas 1'))
self.assertEqual(o[1],six.b('data without commas 2'))
self.cleanup(tmpfile)
def test_1_column_warning_suppression_in_relaxed_mode_when_column_count_is_specific(self):
tmpfile = self.create_file_with_data(one_column_data)
cmd = Q_EXECUTABLE + ' -m relaxed "select c1 from %s" -d , -m relaxed -c 1' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b('data without commas 1'))
self.assertEqual(o[1],six.b('data without commas 2'))
self.cleanup(tmpfile)
def test_1_column_warning_suppression_in_strict_mode_when_column_count_is_specific(self):
tmpfile = self.create_file_with_data(one_column_data)
cmd = Q_EXECUTABLE + ' -m relaxed "select c1 from %s" -d , -m strict -c 1' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o),2)
self.assertEqual(o[0],six.b('data without commas 1'))
self.assertEqual(o[1],six.b('data without commas 2'))
self.cleanup(tmpfile)
def test_fluffy_mode__as_relaxed_mode(self):
tmpfile = self.create_file_with_data(uneven_ls_output)
cmd = Q_EXECUTABLE + ' -m relaxed "select c9 from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 9)
self.assertEqual(len(e), 0)
expected_output = list(map(lambda x:six.b(x),["/selinux", "/mnt", "/srv", "/lost+found",
"/initrd.img.old", "/cdrom", "/home", "/vmlinuz", "/initrd.img"]))
self.assertEqual(o, expected_output)
self.cleanup(tmpfile)
def test_relaxed_mode_column_count_mismatch__was_previously_fluffy_mode_test(self):
data_row = six.b("column1 column2 column3 column4")
data_list = [data_row] * 1000
data_list[950] = six.b("column1 column2 column3 column4 column5")
tmpfile = self.create_file_with_data(six.b("\n").join(data_list))
cmd = Q_EXECUTABLE + ' -m relaxed "select * from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
self.assertEqual(len(o),1000)
self.assertEqual(len(e),0)
self.assertEqual(o[950],six.b('column1 column2 column3 "column4 column5"'))
self.cleanup(tmpfile)
def test_strict_mode_column_count_mismatch__less_columns(self):
data_row = six.b("column1 column2 column3 column4")
data_list = [data_row] * 1000
data_list[750] = six.b("column1 column3 column4")
tmpfile = self.create_file_with_data(six.b("\n").join(data_list))
cmd = Q_EXECUTABLE + ' -m strict "select * from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertNotEqual(retcode,0)
self.assertEqual(len(o),0)
self.assertEqual(len(e),1)
self.assertTrue(e[0].startswith(six.b("Strict mode - Expected 4 columns instead of 3 columns")))
self.assertTrue(six.b(' row 751.') in e[0])
self.cleanup(tmpfile)
def test_strict_mode_column_count_mismatch__more_columns(self):
data_row = six.b("column1 column2 column3 column4")
data_list = [data_row] * 1000
data_list[750] = six.b("column1 column2 column3 column4 column5")
tmpfile = self.create_file_with_data(six.b("\n").join(data_list))
cmd = Q_EXECUTABLE + ' -m strict "select * from %s"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertNotEqual(retcode,0)
self.assertEqual(len(o),0)
self.assertEqual(len(e),1)
self.assertTrue(e[0].startswith(six.b("Strict mode - Expected 4 columns instead of 5 columns")))
self.assertTrue(six.b(' row 751.') in e[0])
self.cleanup(tmpfile)
class FormattingTests(AbstractQTestCase):
def test_column_formatting(self):
# TODO Decide if this breaking change is reasonable
#cmd = 'seq 1 10 | ' + Q_EXECUTABLE + ' -f 1=%4.3f,2=%4.3f "select sum(c1),avg(c1) from -" -c 1'
cmd = 'seq 1 10 | ' + Q_EXECUTABLE + ' -f 1={:4.3f},2={:4.3f} "select sum(c1),avg(c1) from -" -c 1'
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 1)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], six.b('55.000 5.500'))
def test_column_formatting_with_output_header(self):
perl_regex = "'s/1\n/column_name\n1\n/;'"
# TODO Decide if this breaking change is reasonable
#cmd = 'seq 1 10 | perl -pe ' + perl_regex + ' | ' + Q_EXECUTABLE + ' -f 1=%4.3f,2=%4.3f "select sum(column_name) mysum,avg(column_name) myavg from -" -c 1 -H -O'
cmd = 'seq 1 10 | LANG=C perl -pe ' + perl_regex + ' | ' + Q_EXECUTABLE + ' -f 1={:4.3f},2={:4.3f} "select sum(column_name) mysum,avg(column_name) myavg from -" -c 1 -H -O'
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 2)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], six.b('mysum myavg'))
self.assertEqual(o[1], six.b('55.000 5.500'))
def py3_test_successfuly_parse_universal_newlines_without_explicit_flag(self):
def list_as_byte_list(l):
return list(map(lambda x:six.b(x),l))
expected_output = list(map(lambda x:list_as_byte_list(x),[['lifelock', 'LifeLock', '', 'web', 'Tempe', 'AZ', '1-May-07', '6850000', 'USD', 'b'],
['lifelock', 'LifeLock', '', 'web', 'Tempe', 'AZ', '1-Oct-06', '6000000', 'USD', 'a'],
['lifelock', 'LifeLock', '', 'web', 'Tempe', 'AZ', '1-Jan-08', '25000000', 'USD', 'c'],
['mycityfaces', 'MyCityFaces', '7', 'web', 'Scottsdale', 'AZ', '1-Jan-08', '50000', 'USD', 'seed'],
['flypaper', 'Flypaper', '', 'web', 'Phoenix', 'AZ', '1-Feb-08', '3000000', 'USD', 'a'],
['infusionsoft', 'Infusionsoft', '105', 'software', 'Gilbert', 'AZ', '1-Oct-07', '9000000', 'USD', 'a']]))
data = six.b('permalink,company,numEmps,category,city,state,fundedDate,raisedAmt,raisedCurrency,round\rlifelock,LifeLock,,web,Tempe,AZ,1-May-07,6850000,USD,b\rlifelock,LifeLock,,web,Tempe,AZ,1-Oct-06,6000000,USD,a\rlifelock,LifeLock,,web,Tempe,AZ,1-Jan-08,25000000,USD,c\rmycityfaces,MyCityFaces,7,web,Scottsdale,AZ,1-Jan-08,50000,USD,seed\rflypaper,Flypaper,,web,Phoenix,AZ,1-Feb-08,3000000,USD,a\rinfusionsoft,Infusionsoft,105,software,Gilbert,AZ,1-Oct-07,9000000,USD,a')
tmp_data_file = self.create_file_with_data(data)
cmd = Q_EXECUTABLE + ' -d , -H "select * from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 6)
actual_output = list(map(lambda row: row.split(six.b(",")),o))
self.assertEqual(actual_output,expected_output)
self.cleanup(tmp_data_file)
test_parsing_universal_newlines_without_explicit_flag = py3_test_successfuly_parse_universal_newlines_without_explicit_flag
def test_universal_newlines_parsing_flag(self):
def list_as_byte_list(l):
return list(map(lambda x:six.b(x),l))
expected_output = list(map(lambda x:list_as_byte_list(x),[['lifelock', 'LifeLock', '', 'web', 'Tempe', 'AZ', '1-May-07', '6850000', 'USD', 'b'],
['lifelock', 'LifeLock', '', 'web', 'Tempe', 'AZ', '1-Oct-06', '6000000', 'USD', 'a'],
['lifelock', 'LifeLock', '', 'web', 'Tempe', 'AZ', '1-Jan-08', '25000000', 'USD', 'c'],
['mycityfaces', 'MyCityFaces', '7', 'web', 'Scottsdale', 'AZ', '1-Jan-08', '50000', 'USD', 'seed'],
['flypaper', 'Flypaper', '', 'web', 'Phoenix', 'AZ', '1-Feb-08', '3000000', 'USD', 'a'],
['infusionsoft', 'Infusionsoft', '105', 'software', 'Gilbert', 'AZ', '1-Oct-07', '9000000', 'USD', 'a']]))
data = six.b('permalink,company,numEmps,category,city,state,fundedDate,raisedAmt,raisedCurrency,round\rlifelock,LifeLock,,web,Tempe,AZ,1-May-07,6850000,USD,b\rlifelock,LifeLock,,web,Tempe,AZ,1-Oct-06,6000000,USD,a\rlifelock,LifeLock,,web,Tempe,AZ,1-Jan-08,25000000,USD,c\rmycityfaces,MyCityFaces,7,web,Scottsdale,AZ,1-Jan-08,50000,USD,seed\rflypaper,Flypaper,,web,Phoenix,AZ,1-Feb-08,3000000,USD,a\rinfusionsoft,Infusionsoft,105,software,Gilbert,AZ,1-Oct-07,9000000,USD,a')
tmp_data_file = self.create_file_with_data(data)
cmd = Q_EXECUTABLE + ' -d , -H -U "select permalink,company,numEmps,category,city,state,fundedDate,raisedAmt,raisedCurrency,round from %s"' % tmp_data_file.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode,0)
if len(e) == 2 or len(e) == 1:
# In python 3.7, there's a deprecation warning for the 'U' file opening mode, which is ok for now
self.assertIn(len(e), [1,2])
self.assertTrue(b"DeprecationWarning: 'U' mode is deprecated" in e[0])
elif len(e) != 0:
# Nothing should be output to stderr in other versions
self.assertTrue(False,msg='Unidentified output in stderr')
self.assertEqual(len(o), 6)
actual_output = list(map(lambda row: row.split(six.b(",")),o))
self.assertEqual(actual_output,expected_output)
self.cleanup(tmp_data_file)
class SqlTests(AbstractQTestCase):
def test_find_example(self):
tmpfile = self.create_file_with_data(find_output)
cmd = Q_EXECUTABLE + ' "select c5,c6,sum(c7)/1024.0/1024 as total from %s group by c5,c6 order by total desc"' % tmpfile.name
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(len(e), 0)
self.assertEqual(o[0], six.b('mapred mapred 0.9389581680297852'))
self.assertEqual(o[1], six.b('root root 0.02734375'))
self.assertEqual(o[2], six.b('harel harel 0.010888099670410156'))
self.cleanup(tmpfile)
def test_join_example(self):
cmd = Q_EXECUTABLE + ' "select myfiles.c8,emails.c2 from {0}/exampledatafile myfiles join {0}/group-emails-example emails on (myfiles.c4 = emails.c1) where myfiles.c8 = \'ppp\'"'.format(EXAMPLES)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 2)
self.assertEqual(o[0], six.b('ppp dip.1@otherdomain.com'))
self.assertEqual(o[1], six.b('ppp dip.2@otherdomain.com'))
def test_join_example_with_output_header(self):
cmd = Q_EXECUTABLE + ' -O "select myfiles.c8 aaa,emails.c2 bbb from {0}/exampledatafile myfiles join {0}/group-emails-example emails on (myfiles.c4 = emails.c1) where myfiles.c8 = \'ppp\'"'.format(EXAMPLES)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(o), 3)
self.assertEqual(o[0], six.b('aaa bbb'))
self.assertEqual(o[1], six.b('ppp dip.1@otherdomain.com'))
self.assertEqual(o[2], six.b('ppp dip.2@otherdomain.com'))
def test_self_join1(self):
tmpfile = self.create_file_with_data(six.b("\n").join([six.b("{} 9000".format(i)) for i in range(0,10)]))
cmd = Q_EXECUTABLE + ' "select * from %s a1 join %s a2 on (a1.c1 = a2.c1)"' % (tmpfile.name,tmpfile.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 10)
self.cleanup(tmpfile)
def test_self_join_reuses_table(self):
tmpfile = self.create_file_with_data(six.b("\n").join([six.b("{} 9000".format(i)) for i in range(0,10)]))
cmd = Q_EXECUTABLE + ' "select * from %s a1 join %s a2 on (a1.c1 = a2.c1)" -A' % (tmpfile.name,tmpfile.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 6)
self.assertEqual(o[0],six.b('Table: %s' % tmpfile.name))
self.assertEqual(o[1],six.b(' Sources:'))
self.assertEqual(o[2],six.b(' source_type: file source: %s') % six.b(tmpfile.name))
self.assertEqual(o[3],six.b(' Fields:'))
self.assertEqual(o[4],six.b(' `c1` - int'))
self.assertEqual(o[5],six.b(' `c2` - int'))
self.cleanup(tmpfile)
def test_self_join2(self):
tmpfile1 = self.create_file_with_data(six.b("\n").join([six.b("{} 9000".format(i)) for i in range(0,10)]))
cmd = Q_EXECUTABLE + ' "select * from %s a1 join %s a2 on (a1.c2 = a2.c2)"' % (tmpfile1.name,tmpfile1.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 10*10)
self.cleanup(tmpfile1)
tmpfile2 = self.create_file_with_data(six.b("\n").join([six.b("{} 9000".format(i)) for i in range(0,10)]))
cmd = Q_EXECUTABLE + ' "select * from %s a1 join %s a2 on (a1.c2 = a2.c2) join %s a3 on (a1.c2 = a3.c2)"' % (tmpfile2.name,tmpfile2.name,tmpfile2.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 10*10*10)
self.cleanup(tmpfile2)
def test_disable_column_type_detection(self):
tmpfile = self.create_file_with_data(six.b('''regular_text,text_with_digits1,text_with_digits2,float_number
"regular text 1",67,"67",12.3
"regular text 2",067,"067",22.3
"regular text 3",123,"123",33.4
"regular text 4",-123,"-123",0122.2
'''))
# Check original column type detection
cmd = Q_EXECUTABLE + ' -A -d , -H "select * from %s"' % (tmpfile.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 8)
self.assertEqual(o[0],six.b('Table: %s' % tmpfile.name))
self.assertEqual(o[1], six.b(' Sources:'))
self.assertEqual(o[2], six.b(' source_type: file source: %s') % six.b(tmpfile.name))
self.assertEqual(o[3], six.b(' Fields:'))
self.assertEqual(o[4], six.b(' `regular_text` - text'))
self.assertEqual(o[5], six.b(' `text_with_digits1` - int'))
self.assertEqual(o[6], six.b(' `text_with_digits2` - int'))
self.assertEqual(o[7], six.b(' `float_number` - real'))
# Check column types detected when actual detection is disabled
cmd = Q_EXECUTABLE + ' -A -d , -H --as-text "select * from %s"' % (tmpfile.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 8)
self.assertEqual(o[0],six.b('Table: %s' % tmpfile.name))
self.assertEqual(o[1],six.b(' Sources:'))
self.assertEqual(o[2],six.b(' source_type: file source: %s') % six.b(tmpfile.name))
self.assertEqual(o[3],six.b(' Fields:'))
self.assertEqual(o[4],six.b(' `regular_text` - text'))
self.assertEqual(o[5],six.b(' `text_with_digits1` - text'))
self.assertEqual(o[6],six.b(' `text_with_digits2` - text'))
self.assertEqual(o[7],six.b(' `float_number` - text'))
# Get actual data with regular detection
cmd = Q_EXECUTABLE + ' -d , -H "select * from %s"' % (tmpfile.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 4)
self.assertEqual(o[0],six.b("regular text 1,67,67,12.3"))
self.assertEqual(o[1],six.b("regular text 2,67,67,22.3"))
self.assertEqual(o[2],six.b("regular text 3,123,123,33.4"))
self.assertEqual(o[3],six.b("regular text 4,-123,-123,122.2"))
# Get actual data without detection
cmd = Q_EXECUTABLE + ' -d , -H --as-text "select * from %s"' % (tmpfile.name)
retcode, o, e = run_command(cmd)
self.assertEqual(retcode, 0)
self.assertEqual(len(e), 0)
self.assertEqual(len(o), 4)
self.assertEqual(o[0],six.b("regular text 1,67,67,12.3"))
self.assertEqual(o[1],six.b("regular text 2,067,067,22.3"))
self.assertEqual(o[2],six.b("regular text 3,123,123,33.4"))
self.assertEqual(o[3],six.b("regular text 4,-123,-123,0122.2"))
self.cleanup(tmpfile)
class BasicModuleTests(AbstractQTestCase):
def test_engine_isolation(self):
tmpfile1 = self.create_file_with_data(six.b("a b c\n1 2 3\n4 5 6"))
tmpfile2 = self.create_file_with_data(six.b("d e f\n10 20 30\n40 50 60"))
# Run file 1 on engine 1
q1 = QTextAsData(QInputParams(skip_header=True,delimiter=' '))
r = q1.execute('select * from %s' % tmpfile1.name)
print("QueryQuery",file=sys.stdout)
self.assertTrue(r.status == 'ok')
self.assertEqual(len(r.warnings),0)
self.assertEqual(len(r.data),2)
self.assertEqual(r.metadata.output_column_name_list,['a','b','c'])
self.assertEqual(r.data,[(1,2,3),(4,5,6)])
self.assertTrue(tmpfile1.name in r.metadata.table_structures)
self.assertTrue(tmpfile1.name in r.metadata.new_table_structures)
self.assertEqual(r.metadata.table_structures[tmpfile1.name].atomic_fns,[tmpfile1.name])
self.assertEqual(r.metadata.table_structures[tmpfile1.name].source_type,'file')
self.assertEqual(r.metadata.table_structures[tmpfile1.name].source,tmpfile1.name)
# run file 1 on engine 2
q2 = QTextAsData(QInputParams(skip_header=True,delimiter=' '))
r2 = q2.execute('select * from %s' % tmpfile1.name)
print("QueryQuery",file=sys.stdout)
self.assertTrue(r2.status == 'ok')
self.assertEqual(len(r2.warnings),0)
self.assertEqual(len(r2.data),2)
self.assertEqual(r2.metadata.output_column_name_list,['a','b','c'])
self.assertEqual(r2.data,[(1,2,3),(4,5,6)])
self.assertTrue(tmpfile1.name in r2.metadata.table_structures)
self.assertTrue(tmpfile1.name in r2.metadata.new_table_structures)
self.assertEqual(r2.metadata.table_structures[tmpfile1.name].atomic_fns,[tmpfile1.name])
self.assertEqual(r2.metadata.table_structures[tmpfile1.name].source_type,'file')
self.assertEqual(r2.metadata.table_structures[tmpfile1.name].source,tmpfile1.name)
# run file 2 on engine 1
r3 = q1.execute('select * from %s' % tmpfile2.name)
print("QueryQuery",file=sys.stdout)
print(r3)
self.assertTrue(r3.status == 'ok')
self.assertEqual(len(r3.warnings),0)
self.assertEqual(len(r3.data),2)
self.assertEqual(r3.metadata.output_column_name_list,['d','e','f'])
self.assertEqual(r3.data,[(10,20,30),(40,50,60)])
self.assertTrue(tmpfile2.name in r3.metadata.table_structures)
self.assertTrue(tmpfile2.name in r3.metadata.new_table_structures)
self.assertEqual(r3.metadata.table_structures[tmpfile2.name].atomic_fns,[tmpfile2.name])
self.assertEqual(r3.metadata.table_structures[tmpfile2.name].source,tmpfile2.name)
self.assertEqual(r3.metadata.table_structures[tmpfile2.name].source_type,'file')
q1.done()
q2.done()
self.cleanup(tmpfile1)
self.cleanup(tmpfile2)
def test_simple_query(self):
tmpfile = self.create_file_with_data(six.b("a b c\n1 2 3\n4 5 6"))
q = QTextAsData(QInputParams(skip_header=True,delimiter=' '))
r = q.execute('select * from %s' % tmpfile.name)
self.assertTrue(r.status == 'ok')
self.assertEqual(len(r.warnings),0)
self.assertEqual(len(r.data),2)
self.assertEqual(r.metadata.output_column_name_list,['a','b','c'])
self.assertEqual(r.data,[(1,2,3),(4,5,6)])
self.assertTrue(tmpfile.name in r.metadata.table_structures)
self.assertTrue(tmpfile.name in r.metadata.new_table_structures)
self.assertEqual(r.metadata.table_structures[tmpfile.name].atomic_fns,[tmpfile.name])
self.assertEqual(r.metadata.table_structures[tmpfile.name].source_type,'file')
self.assertEqual(r.metadata.table_structures[tmpfile.name].source,tmpfile.name)
q.done()
self.cleanup(tmpfile)
def test_loaded_data_reuse(self):
tmpfile = self.create_file_with_data(six.b("a b c\n1 2 3\n4 5 6"))
q = QTextAsData(QInputParams(skip_header=True,delimiter=' '))
r1 = q.execute('select * from %s' % tmpfile.name)
r2 = q.execute('select * from %s' % tmpfile.name)
self.assertTrue(r1.status == 'ok')
self.assertEqual(len(r1.warnings),0)
self.assertEqual(len(r1.data),2)
self.assertEqual(r1.metadata.output_column_name_list,['a','b','c'])
self.assertEqual(r1.data,[(1,2,3),(4,5,6)])
self.assertTrue(tmpfile.name in r1.metadata.table_structures)
self.assertTrue(tmpfile.name in r1.metadata.new_table_structures)
self.assertEqual(r1.metadata.table_structures[tmpfile.name].atomic_fns,[tmpfile.name])
self.assertEqual(r1.metadata.table_structures[tmpfile.name].source_type,'file')
self.assertEqual(r1.metadata.table_structures[tmpfile.name].source,tmpfile.name)
self.assertTrue(r2.status == 'ok')
self.assertTrue(tmpfile.name in r2.metadata.table_structures)
self.assertTrue(tmpfile.name not in r2.metadata.new_table_structures)
self.assertEqual(r2.data,r1.data)
self.assertEqual(r2.metadata.output_column_name_list,r2.metadata.output_column_name_list)
self.assertEqual(len(r2.warnings),0)
q.done()
self.cleanup(tmpfile)
def test_stdin_injection(self):
tmpfile = self.create_file_with_data(six.b("a b c\n1 2 3\n4 5 6"))
data_streams_dict = {
'-': DataStream('stdin','-',codecs.open(tmpfile.name,'rb',encoding='utf-8'))
}
q = QTextAsData(QInputParams(skip_header=True,delimiter=' '),data_streams_dict=data_streams_dict)
r = q.execute('select * from -')
self.assertTrue(r.status == 'ok')
self.assertEqual(len(r.warnings),0)
self.assertEqual(len(r.data),2)
self.assertEqual(r.metadata.output_column_name_list,['a','b','c'])
self.assertEqual(r.data,[(1,2,3),(4,5,6)])
self.assertEqual(r.metadata.new_table_structures['-'],r.metadata.table_structures['-'])
self.assertEqual(r.metadata.table_structures['-'].column_names,['a','b','c'])
self.assertEqual(r.metadata.table_structures['-'].python_column_types,[int,int,int])
self.assertEqual(r.metadata.table_structures['-'].sqlite_column_types,['int','int','int'])
self.assertEqual(r.metadata.table_structures['-'].source_type,'data-stream')
self.assertEqual(r.metadata.table_structures['-'].source,'stdin')
q.done()
self.cleanup(tmpfile)
def test_named_stdin_injection(self):
tmpfile = self.create_file_with_data(six.b("a b c\n1 2 3\n4 5 6"))
data_streams_dict = {
'my_stdin_data': DataStream('my_stdin_data','my_stdin_data',codecs.open(tmpfile.name,'rb',encoding='utf-8'))
}
q = QTextAsData(QInputParams(skip_header=True,delimiter=' '),data_streams_dict=data_streams_dict)
r = q.execute('select a from my_stdin_data')
self.assertTrue(r.status == 'ok')
self.assertEqual(len(r.warnings),0)
self.assertEqual(len(r.data),2)
self.assertEqual(r.metadata.output_column_name_list,['a'])
self.assertEqual(r.data,[(1,),(4,)])
self.assertTrue('my_stdin_data' in r.metadata.table_structures)
self.assertTrue('my_stdin_data' in r.metadata.new_table_structures)
self.assertEqual(r.metadata.table_structures['my_stdin_data'].qtable_name,'my_stdin_data')
q.done()
self.cleanup(tmpfile)
def test_data_stream_isolation(self):
tmpfile1 = self.create_file_with_data(six.b("a b c\n1 2 3\n4 5 6"))
tmpfile2 = self.create_file_with_data(six.b("d e f\n7 8 9\n10 11 12"))
data_streams_dict = {
'a-': DataStream('a-','a-',codecs.open(tmpfile1.name, 'rb', encoding='utf-8')),
'b-': DataStream('b-','b-',codecs.open(tmpfile2.name, 'rb', encoding='utf-8'))
}
q = QTextAsData(QInputParams(skip_header=True,delimiter=' '),data_streams_dict=data_streams_dict)
r1 = q.execute('select * from a-')
self.assertTrue(r1.status == 'ok')
self.assertEqual(len(r1.warnings),0)
self.assertEqual(len(r1.data),2)
self.assertEqual(r1.metadata.output_column_name_list,['a','b','c'])
self.assertEqual(r1.data,[(1,2,3),(4,5,6)])
self.assertTrue('a-' in r1.metadata.table_structures)
self.assertEqual(len(r1.metadata.table_structures),1)
self.assertEqual(r1.metadata.table_structures['a-'].source_type, 'data-stream')
self.assertEqual(r1.metadata.table_structures['a-'].source, 'a-')
self.assertEqual(r1.metadata.table_structures['a-'].column_names, ['a','b','c'])
self.assertEqual(r1.metadata.table_structures['a-'].python_column_types, [int,int,int])
self.assertEqual(r1.metadata.table_structures['a-'].sqlite_column_types, ['int','int','int'])
r2 = q.execute('select * from b-')
self.assertTrue(r2.status == 'ok')
self.assertEqual(len(r2.warnings),0)
self.assertEqual(len(r2.data),2)
self.assertEqual(r2.metadata.output_column_name_list,['d','e','f'])
self.assertEqual(r2.data,[(7,8,9),(10,11,12)])
self.assertEqual(len(r1.metadata.table_structures),2)
self.assertTrue('b-' in r1.metadata.table_structures)
self.assertEqual(r1.metadata.table_structures['b-'].source_type, 'data-stream')
self.assertEqual(r1.metadata.table_structures['b-'].source, 'b-')
self.assertEqual(r1.metadata.table_structures['b-'].column_names, ['d','e','f'])
self.assertEqual(r1.metadata.table_structures['b-'].python_column_types, [int,int,int])
self.assertEqual(r1.metadata.table_structures['b-'].sqlite_column_types, ['int','int','int'])
q.done()
self.cleanup(tmpfile1)
self.cleanup(tmpfile2)
def test_multiple_stdin_injection(self):
tmpfile1 = self.create_file_with_data(six.b("a b c\n1 2 3\n4 5 6"))
tmpfile2 = self.create_file_with_data(six.b("d e f\n7 8 9\n10 11 12"))
data_streams_dict = {
'my_stdin_data1': DataStream('my_stdin_data1','my_stdin_data1',codecs.open(tmpfile1.name,'rb',encoding='utf-8')),
'my_stdin_data2': DataStream('my_stdin_data2','my_stdin_data2',codecs.open(tmpfile2.name,'rb',encoding='utf-8'))
}
q = QTextAsData(QInputParams(skip_header=True,delimiter=' '),data_streams_dict=data_streams_dict)
r1 = q.execute('select * from my_stdin_data1')
self.assertTrue(r1.status == 'ok')
self.assertEqual(len(r1.warnings),0)
self.assertEqual(len(r1.data),2)
self.assertEqual(r1.metadata.output_column_name_list,['a','b','c'])
self.assertEqual(r1.data,[(1,2,3),(4,5,6)])
self.assertTrue('my_stdin_data1' in r1.metadata.table_structures)
self.assertTrue('my_stdin_data1' in r1.metadata.new_table_structures)
self.assertEqual(r1.metadata.table_structures['my_stdin_data1'].qtable_name,'my_stdin_data1')
r2 = q.execute('select * from my_stdin_data2')
self.assertTrue(r2.status == 'ok')
self.assertEqual(len(r2.warnings),0)
self.assertEqual(len(r2.data),2)
self.assertEqual(r2.metadata.output_column_name_list,['d','e','f'])
self.assertEqual(r2.data,[(7,8,9),(10,11,12)])
# There should be another data load, even though it's the same 'filename' as before
self.assertTrue('my_stdin_data2' in r2.metadata.table_structures)
self.assertTrue('my_stdin_data2' in r2.metadata.new_table_structures)
self.assertEqual(r2.metadata.table_structures['my_stdin_data2'].qtable_name,'my_stdin_data2')
r3 = q.execute('select aa.*,bb.* from my_stdin_data1 aa join my_stdin_data2 bb')
self.assertTrue(r3.status == 'ok')
self.assertEqual(len(r3.warnings),0)
self.assertEqual(len(r3.data),4)
self.assertEqual(r3.metadata.output_column_name_list,['a','b','c','d','e','f'])
self.assertEqual(r3.data,[(1,2,3,7,8,9),(1,2,3,10,11,12),(4,5,6,7,8,9),(4,5,6,10,11,12)])
self.assertTrue('my_stdin_data1' in r3.metadata.table_structures)
self.assertTrue('my_stdin_data1' not in r3.metadata.new_table_structures)
q.done()
self.cleanup(tmpfile1)
self.cleanup(tmpfile2)
def test_different_input_params_for_different_files(self):
tmpfile1 = self.create_file_with_data(six.b("a b c\n1 2 3\n4 5 6"))
tmpfile2 = self.create_file_with_data(six.b("7\t8\t9\n10\t11\t12"))
q = QTextAsData(QInputParams(skip_header=True,delimiter=' '))
q.load_data(tmpfile1.name,QInputParams(skip_header=True,delimiter=' '))
q.load_data(tmpfile2.name,QInputParams(skip_header=False,delimiter='\t'))
r = q.execute('select aa.*,bb.* from %s aa join %s bb' % (tmpfile1.name,tmpfile2.name))
self.assertTrue(r.status == 'ok')
self.assertEqual(len(r.warnings),0)
self.assertEqual(len(r.data),4)
self.assertEqual(r.metadata.output_column_name_list,['a','b','c','c1','c2','c3'])
self.assertEqual(r.data,[(1,2,3,7,8,9),(1,2,3,10,11,12),(4,5,6,7,8,9),(4,5,6,10,11,12)])
self.assertTrue(tmpfile1.name not in r.metadata.new_table_structures)
self.assertTrue(tmpfile2.name not in r.metadata.new_table_structures)
q.done()
self.cleanup(tmpfile1)
self.cleanup(tmpfile2)
def test_different_input_params_for_different_files_2(self):
tmpfile1 = self.create_file_with_data(six.b("a b c\n1 2 3\n4 5 6"))
tmpfile2 = self.create_file_with_data(six.b("7\t8\t9\n10\t11\t12"))
q = QTextAsData()
q.load_data(tmpfile1.name,QInputParams(skip_header=True,delimiter=' '))
q.load_data(tmpfile2.name,QInputParams(skip_header=False,delimiter='\t'))
r = q.execute('select aa.*,bb.* from %s aa join %s bb' % (tmpfile1.name,tmpfile2.name))
self.assertTrue(r.status == 'ok')
self.assertEqual(len(r.warnings),0)
self.assertEqual(len(r.data),4)
self.assertEqual(r.metadata.output_column_name_list,['a','b','c','c1','c2','c3'])
self.assertEqual(r.data,[(1,2,3,7,8,9),(1,2,3,10,11,12),(4,5,6,7,8,9),(4,5,6,10,11,12)])
self.assertTrue(tmpfile1.name not in r.metadata.new_table_structures)
self.assertTrue(tmpfile2.name not in r.metadata.new_table_structures)
q.done()
self.cleanup(tmpfile1)
self.cleanup(tmpfile2)
def test_input_params_override(self):
tmpfile = self.create_file_with_data(six.b("a b c\n1 2 3\n4 5 6"))
default_input_params = QInputParams()
for k in default_input_params.__dict__.keys():
setattr(default_input_params,k,'GARBAGE')
q = QTextAsData(default_input_params)
r = q.execute('select * from %s' % tmpfile.name)
self.assertTrue(r.status == 'error')
overwriting_input_params = QInputParams(skip_header=True,delimiter=' ')
r2 = q.execute('select * from %s' % tmpfile.name,input_params=overwriting_input_params)
self.assertTrue(r2.status == 'ok')
self.assertEqual(len(r2.warnings),0)
self.assertEqual(len(r2.data),2)
self.assertEqual(r2.metadata.output_column_name_list,['a','b','c'])
self.assertEqual(r2.data,[(1,2,3),(4,5,6)])
self.assertTrue(tmpfile.name in r2.metadata.table_structures)
self.assertTrue(tmpfile.name in r2.metadata.new_table_structures)
self.assertEqual(r2.metadata.table_structures[tmpfile.name].atomic_fns,[tmpfile.name])
self.assertEqual(r2.metadata.table_structures[tmpfile.name].source,tmpfile.name)
self.assertEqual(r2.metadata.table_structures[tmpfile.name].source_type,'file')
q.done()
self.cleanup(tmpfile)
def test_input_params_merge(self):
input_params = QInputParams()
for k in input_params.__dict__.keys():
setattr(input_params,k,'GARBAGE')
merged_input_params = input_params.merged_with(QInputParams())
for k in merged_input_params.__dict__.keys():
self.assertTrue(getattr(merged_input_params,k) != 'GARBAGE')
for k in input_params.__dict__.keys():
self.assertTrue(getattr(merged_input_params,k) != 'GARBAGE')
def test_table_analysis_with_syntax_error(self):
q = QTextAsData()
q_output = q.analyze("bad syntax")
q.done()
self.assertTrue(q_output.status == 'error')
self.assertTrue(q_output.error.msg.startswith('query error'))
def test_execute_response(self):
tmpfile = self.create_file_with_data(six.b("a b c\n1 2 3\n4 5 6"))
q = QTextAsData()
q_output = q.execute("select a,c from %s" % tmpfile.name,QInputParams(skip_header=True))
self.assertTrue(q_output.status == 'ok')
self.assertTrue(q_output.error is None)
self.assertEqual(len(q_output.warnings),0)
self.assertEqual(len(q_output.data),2)
self.assertEqual(q_output.data,[ (1,3),(4,6) ])
self.assertTrue(q_output.metadata is not None)
metadata = q_output.metadata
self.assertEqual(metadata.output_column_name_list, [ 'a','c'])
self.assertTrue(tmpfile.name in metadata.new_table_structures)
self.assertEqual(len(metadata.table_structures),1)
table_structure = metadata.new_table_structures[tmpfile.name]
self.assertEqual(table_structure.column_names,[ 'a','b','c'])
self.assertEqual(table_structure.python_column_types,[ int,int,int])
self.assertEqual(table_structure.sqlite_column_types,[ 'int','int','int'])
self.assertEqual(table_structure.qtable_name, tmpfile.name)
self.assertEqual(table_structure.atomic_fns,[tmpfile.name])
self.assertEqual(table_structure.source_type,'file')
self.assertEqual(table_structure.source,tmpfile.name)
q.done()
self.cleanup(tmpfile)
def test_analyze_response(self):
tmpfile = self.create_file_with_data(six.b("a b c\n1 2 3\n4 5 6"))
q = QTextAsData()
q_output = q.analyze("select a,c from %s" % tmpfile.name,QInputParams(skip_header=True))
self.assertTrue(q_output.status == 'ok')
self.assertTrue(q_output.error is None)
self.assertEqual(len(q_output.warnings),0)
self.assertEqual(len(q_output.data),2)
self.assertEqual(q_output.data,[ (1,3),(4,6) ])
self.assertTrue(q_output.metadata is not None)
metadata = q_output.metadata
self.assertEqual(metadata.output_column_name_list, [ 'a','c'])
self.assertEqual(len(metadata.table_structures),1)
self.assertTrue(tmpfile.name in metadata.new_table_structures)
table_structure = metadata.table_structures[tmpfile.name]
self.assertEqual(table_structure.column_names,[ 'a','b','c'])
self.assertEqual(table_structure.python_column_types,[ int,int,int])
self.assertEqual(table_structure.sqlite_column_types,[ 'int','int','int'])
self.assertEqual(table_structure.qtable_name, tmpfile.name)
self.assertEqual(table_structure.atomic_fns,[tmpfile.name])
self.assertEqual(table_structure.source_type,'file')
self.assertEqual(table_structure.source,tmpfile.name)
q.done()
self.cleanup(tmpfile)
def test_load_data_from_string_without_previous_data_load(self):
input_str = six.u('column1,column2,column3\n') + six.u('\n').join([six.u('value1,2.5,value3')] * 1000)
data_streams_dict = {
'my_data': DataStream('my_data_stream_id','my_data',six.StringIO(input_str))
}
q = QTextAsData(default_input_params=QInputParams(skip_header=True,delimiter=','),data_streams_dict=data_streams_dict)
q_output = q.execute('select column2,column3 from my_data')
self.assertTrue(q_output.status == 'ok')
self.assertTrue(q_output.error is None)
self.assertEqual(len(q_output.warnings),0)
self.assertTrue(len(q_output.data),1000)
self.assertEqual(len(set(q_output.data)),1)
self.assertEqual(list(set(q_output.data))[0],(2.5,'value3'))
metadata = q_output.metadata
self.assertTrue(metadata.output_column_name_list,['column2','column3'])
self.assertTrue('my_data' in metadata.new_table_structures)
self.assertEqual(len(metadata.table_structures),1)
table_structure = metadata.table_structures['my_data']
self.assertEqual(table_structure.column_names,['column1','column2','column3'])
self.assertEqual(table_structure.sqlite_column_types,['text','real','text'])
self.assertEqual(table_structure.python_column_types,[str,float,str])
self.assertEqual(table_structure.qtable_name, 'my_data')
self.assertEqual(table_structure.source_type, 'data-stream')
self.assertEqual(table_structure.source, 'my_data_stream_id')
q.done()
def test_load_data_from_string_with_previous_data_load(self):
input_str = six.u('column1,column2,column3\n') + six.u('\n').join([six.u('value1,2.5,value3')] * 1000)
data_streams_dict = {
'my_data': DataStream('a','my_data',six.StringIO(input_str))
}
q = QTextAsData(default_input_params=QInputParams(skip_header=True,delimiter=','),data_streams_dict=data_streams_dict)
dl = q.load_data('my_data',QInputParams(skip_header=True,delimiter=','))
q_output = q.execute('select column2,column3 from my_data')
self.assertTrue(q_output.status == 'ok')
self.assertTrue(q_output.error is None)
self.assertEqual(len(q_output.warnings),0)
self.assertTrue(len(q_output.data),1000)
self.assertEqual(len(set(q_output.data)),1)
self.assertEqual(list(set(q_output.data))[0],(2.5,'value3'))
metadata = q_output.metadata
self.assertTrue(metadata.output_column_name_list,['column2','column3'])
self.assertTrue('my_data' not in metadata.new_table_structures)
self.assertEqual(len(metadata.table_structures),1)
table_structure = metadata.table_structures['my_data']
self.assertEqual(table_structure.column_names,['column1','column2','column3'])
self.assertEqual(table_structure.sqlite_column_types,['text','real','text'])
self.assertEqual(table_structure.python_column_types,[str,float,str])
self.assertEqual(table_structure.qtable_name, 'my_data')
q.done()
class BenchmarkAttemptResults(object):
def __init__(self, attempt, lines, columns, duration,return_code):
self.attempt = attempt
self.lines = lines
self.columns = columns
self.duration = duration
self.return_code = return_code
def __str__(self):
return "{}".format(self.__dict__)
__repr__ = __str__
class BenchmarkResults(object):
def __init__(self, lines, columns, attempt_results, mean, stddev):
self.lines = lines
self.columns = columns
self.attempt_results = attempt_results
self.mean = mean
self.stddev = stddev
def __str__(self):
return "{}".format(self.__dict__)
__repr__ = __str__
@pytest.mark.benchmark
class BenchmarkTests(AbstractQTestCase):
BENCHMARK_DIR = os.environ.get('Q_BENCHMARK_DATA_DIR')
def _ensure_benchmark_data_dir_exists(self):
try:
os.mkdir(BenchmarkTests.BENCHMARK_DIR)
except Exception as e:
pass
def _create_benchmark_file_if_needed(self):
self._ensure_benchmark_data_dir_exists()
if os.path.exists('{}/'.format(BenchmarkTests.BENCHMARK_DIR)):
return
g = GzipFile('unit-file.csv.gz')
d = g.read().decode('utf-8')
f = open('{}/benchmark-file.csv'.format(BenchmarkTests.BENCHMARK_DIR), 'w')
for i in range(100):
f.write(d)
f.close()
def _prepare_test_file(self, lines, columns):
filename = '{}/_benchmark_data__lines_{}_columns_{}.csv'.format(BenchmarkTests.BENCHMARK_DIR,lines, columns)
if os.path.exists(filename):
return filename
c = ['c{}'.format(x + 1) for x in range(columns)]
# write a header line
ff = open(filename,'w')
ff.write(",".join(c))
ff.write('\n')
ff.close()
r, o, e = run_command('head -{} {}/benchmark-file.csv | ' + Q_EXECUTABLE + ' -d , "select {} from -" >> {}'.format(lines, BenchmarkTests.BENCHMARK_DIR, ','.join(c), filename))
self.assertEqual(r, 0)
# Create file cache as part of preparation
r, o, e = run_command(Q_EXECUTABLE + ' -C readwrite -d , "select count(*) from %s"' % filename)
self.asserEqual(r, 0)
return filename
def _decide_result(self,attempt_results):
failed = list(filter(lambda a: a.return_code != 0,attempt_results))
if len(failed) == 0:
mean = sum([x.duration for x in attempt_results]) / len(attempt_results)
sum_squared = sum([(x.duration - mean)**2 for x in attempt_results])
ddof = 0
pvar = sum_squared / (len(attempt_results) - ddof)
stddev = pvar ** 0.5
else:
mean = None
stddev = None
return BenchmarkResults(
attempt_results[0].lines,
attempt_results[0].columns,
attempt_results,
mean,
stddev
)
def _perform_test_performance_matrix(self,name,generate_cmd_function):
results = []
benchmark_results_folder = os.environ.get("Q_BENCHMARK_RESULTS_FOLDER",'')
if benchmark_results_folder == "":
raise Exception("Q_BENCHMARK_RESULTS_FOLDER must be provided as an environment variable")
self._create_benchmark_file_if_needed()
for columns in [1, 5, 10, 20, 50, 100]:
for lines in [1, 10, 100, 1000, 10000, 100000, 1000000]:
attempt_results = []
for attempt in range(10):
filename = self._prepare_test_file(lines, columns)
if DEBUG:
print("Testing {}".format(filename))
t0 = time.time()
r, o, e = run_command(generate_cmd_function(filename,lines,columns))
duration = time.time() - t0
attempt_result = BenchmarkAttemptResults(attempt, lines, columns, duration, r)
attempt_results += [attempt_result]
if DEBUG:
print("Results: {}".format(attempt_result.__dict__))
final_result = self._decide_result(attempt_results)
results += [final_result]
series_fields = [six.u('lines'),six.u('columns')]
value_fields = [six.u('mean'),six.u('stddev')]
all_fields = series_fields + value_fields
output_filename = '{}/{}.benchmark-results'.format(benchmark_results_folder,name)
output_file = open(output_filename,'w')
for columns,g in itertools.groupby(sorted(results,key=lambda x:x.columns),key=lambda x:x.columns):
x = six.u("\t").join(series_fields + [six.u('{}_{}').format(name, f) for f in value_fields])
print(x,file = output_file)
for result in g:
print(six.u("\t").join(map(str,[getattr(result,f) for f in all_fields])),file=output_file)
output_file.close()
print("results have been written to : {}".format(output_filename))
if DEBUG:
print("RESULTS FOR {}".format(name))
print(open(output_filename,'r').read())
def test_q_matrix(self):
Q_BENCHMARK_NAME = os.environ.get('Q_BENCHMARK_NAME')
if Q_BENCHMARK_NAME is None:
raise Exception('Q_BENCHMARK_NAME must be provided as an env var')
def generate_q_cmd(data_filename, line_count, column_count):
Q_BENCHMARK_ADDITIONAL_PARAMS = os.environ.get('Q_BENCHMARK_ADDITIONAL_PARAMS') or ''
additional_params = ''
additional_params = additional_params + ' ' + Q_BENCHMARK_ADDITIONAL_PARAMS
return '{} -d , {} "select count(*) from {}"'.format(Q_EXECUTABLE,additional_params, data_filename)
self._perform_test_performance_matrix(Q_BENCHMARK_NAME,generate_q_cmd)
def _get_textql_version(self):
r,o,e = run_command("textql --version")
if r != 0:
raise Exception("Could not find textql")
if len(e) != 0:
raise Exception("Errors while getting textql version")
return o[0]
def _get_octosql_version(self):
r,o,e = run_command("octosql --version")
if r != 0:
raise Exception("Could not find octosql")
if len(e) != 0:
raise Exception("Errors while getting octosql version")
version = re.findall('v[0-9]+\\.[0-9]+\\.[0-9]+',str(o[0],encoding='utf-8'))[0]
return version
def test_textql_matrix(self):
def generate_textql_cmd(data_filename,line_count,column_count):
return 'textql -dlm , -sql "select count(*)" {}'.format(data_filename)
name = 'textql_%s' % self._get_textql_version()
self._perform_test_performance_matrix(name,generate_textql_cmd)
def test_octosql_matrix(self):
config_fn = self.random_tmp_filename('octosql', 'config')
def generate_octosql_cmd(data_filename,line_count,column_count):
j = """
dataSources:
- name: bmdata
type: csv
config:
path: "{}"
headerRow: false
batchSize: 10000
""".format(data_filename)[1:]
f = open(config_fn,'w')
f.write(j)
f.close()
return 'octosql -c {} -o batch-csv "select count(*) from bmdata a"'.format(config_fn)
name = 'octosql_%s' % self._get_octosql_version()
self._perform_test_performance_matrix(name,generate_octosql_cmd)
def suite():
tl = unittest.TestLoader()
basic_stuff = tl.loadTestsFromTestCase(BasicTests)
parsing_mode = tl.loadTestsFromTestCase(ParsingModeTests)
sql = tl.loadTestsFromTestCase(SqlTests)
formatting = tl.loadTestsFromTestCase(FormattingTests)
basic_module_stuff = tl.loadTestsFromTestCase(BasicModuleTests)
save_db_to_disk_tests = tl.loadTestsFromTestCase(SaveDbToDiskTests)
user_functions_tests = tl.loadTestsFromTestCase(UserFunctionTests)
multi_header_tests = tl.loadTestsFromTestCase(MultiHeaderTests)
return unittest.TestSuite([basic_module_stuff, basic_stuff, parsing_mode, sql, formatting,save_db_to_disk_tests,multi_header_tests,user_functions_tests])
if __name__ == '__main__':
if len(sys.argv) > 1:
suite = unittest.TestSuite()
if '.' in sys.argv[1]:
c,m = sys.argv[1].split(".")
suite.addTest(globals()[c](m))
else:
tl = unittest.TestLoader()
tc = tl.loadTestsFromTestCase(globals()[sys.argv[1]])
suite = unittest.TestSuite([tc])
else:
suite = suite()
test_runner = unittest.TextTestRunner(verbosity=2)
result = test_runner.run(suite)
sys.exit(not result.wasSuccessful())
| 252,236
|
Python
|
.py
| 4,278
| 49.759467
| 540
| 0.627706
|
harelba/q
| 10,180
| 421
| 117
|
GPL-3.0
|
9/5/2024, 5:11:42 PM (Europe/Amsterdam)
|
12,332
|
q.py
|
harelba_q/bin/q.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2021 Harel Ben-Attia
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details (doc/LICENSE contains
# a copy of it)
#
#
# Name : q (With respect to The Q Continuum)
# Author : Harel Ben-Attia - harelba@gmail.com, harelba @ github, @harelba on twitter
#
#
# q allows performing SQL-like statements on tabular text data.
#
# Its purpose is to bring SQL expressive power to manipulating text data using the Linux command line.
#
# Full Documentation and details in https://harelba.github.io/q/
#
# Run with --help for command line details
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import OrderedDict
from sqlite3.dbapi2 import OperationalError
from uuid import uuid4
q_version = '3.1.6'
#__all__ = [ 'QTextAsData' ]
import os
import sys
import sqlite3
import glob
from argparse import ArgumentParser
import codecs
import locale
import time
import re
from six.moves import configparser, range, filter
import traceback
import csv
import uuid
import math
import six
import io
import json
import datetime
import hashlib
if six.PY2:
assert False, 'Python 2 is not longer supported by q'
long = int
unicode = six.text_type
DEBUG = bool(os.environ.get('Q_DEBUG', None)) or '-V' in sys.argv
SQL_DEBUG = False
if DEBUG:
def xprint(*args,**kwargs):
print(datetime.datetime.utcnow().isoformat()," DEBUG ",*args,file=sys.stderr,**kwargs)
def iprint(*args,**kwargs):
print(datetime.datetime.utcnow().isoformat()," INFO ",*args,file=sys.stderr,**kwargs)
def sqlprint(*args,**kwargs):
pass
else:
def xprint(*args,**kwargs): pass
def iprint(*args,**kwargs): pass
def sqlprint(*args,**kwargs): pass
if SQL_DEBUG:
def sqlprint(*args,**kwargs):
print(datetime.datetime.utcnow().isoformat(), " SQL ", *args, file=sys.stderr, **kwargs)
def get_stdout_encoding(encoding_override=None):
if encoding_override is not None and encoding_override != 'none':
return encoding_override
if sys.stdout.isatty():
return sys.stdout.encoding
else:
return locale.getpreferredencoding()
SHOW_SQL = False
sha_algorithms = {
1 : hashlib.sha1,
224: hashlib.sha224,
256: hashlib.sha256,
386: hashlib.sha384,
512: hashlib.sha512
}
def sha(data,algorithm,encoding):
try:
f = sha_algorithms[algorithm]
return f(six.text_type(data).encode(encoding)).hexdigest()
except Exception as e:
print(e)
# For backward compatibility only (doesn't handle encoding well enough)
def sha1(data):
return hashlib.sha1(six.text_type(data).encode('utf-8')).hexdigest()
# TODO Add caching of compiled regexps - Will be added after benchmarking capability is baked in
def regexp(regular_expression, data):
if data is not None:
if not isinstance(data, str) and not isinstance(data, unicode):
data = str(data)
return re.search(regular_expression, data) is not None
else:
return False
def regexp_extract(regular_expression, data,group_number):
if data is not None:
if not isinstance(data, str) and not isinstance(data, unicode):
data = str(data)
m = re.search(regular_expression, data)
if m is not None:
return m.groups()[group_number]
else:
return False
def md5(data,encoding):
m = hashlib.md5()
m.update(six.text_type(data).encode(encoding))
return m.hexdigest()
def sqrt(data):
return math.sqrt(data)
def power(data,p):
return data**p
def file_ext(data):
if data is None:
return None
return os.path.splitext(data)[1]
def file_folder(data):
if data is None:
return None
return os.path.split(data)[0]
def file_basename(data):
if data is None:
return None
return os.path.split(data)[1]
def file_basename_no_ext(data):
if data is None:
return None
return os.path.split(os.path.splitext(data)[0])[-1]
def percentile(l, p):
# TODO Alpha implementation, need to provide multiple interpolation methods, and add tests
if not l:
return None
k = p*(len(l) - 1)
f = math.floor(k)
c = math.ceil(k)
if c == f:
return l[int(k)]
return (c-k) * l[int(f)] + (k-f) * l[int(c)]
# TODO Streaming Percentile to prevent memory consumption blowup for large datasets
class StrictPercentile(object):
def __init__(self):
self.values = []
self.p = None
def step(self,value,p):
if self.p is None:
self.p = p
self.values.append(value)
def finalize(self):
if len(self.values) == 0 or (self.p < 0 or self.p > 1):
return None
else:
return percentile(sorted(self.values),self.p)
class StdevPopulation(object):
def __init__(self):
self.M = 0.0
self.S = 0.0
self.k = 0
def step(self, value):
try:
# Ignore nulls
if value is None:
return
val = float(value) # if fails, skips this iteration, which also ignores nulls
tM = self.M
self.k += 1
self.M += ((val - tM) / self.k)
self.S += ((val - tM) * (val - self.M))
except ValueError:
# TODO propagate udf errors to console
raise Exception("Data is not numeric when calculating stddev (%s)" % value)
def finalize(self):
if self.k <= 1: # avoid division by zero
return None
else:
return math.sqrt(self.S / (self.k))
class StdevSample(object):
def __init__(self):
self.M = 0.0
self.S = 0.0
self.k = 0
def step(self, value):
try:
# Ignore nulls
if value is None:
return
val = float(value) # if fails, skips this iteration, which also ignores nulls
tM = self.M
self.k += 1
self.M += ((val - tM) / self.k)
self.S += ((val - tM) * (val - self.M))
except ValueError:
# TODO propagate udf errors to console
raise Exception("Data is not numeric when calculating stddev (%s)" % value)
def finalize(self):
if self.k <= 1: # avoid division by zero
return None
else:
return math.sqrt(self.S / (self.k-1))
class FunctionType(object):
REGULAR = 1
AGG = 2
class UserFunctionDef(object):
def __init__(self,func_type,name,usage,description,func_or_obj,param_count):
self.func_type = func_type
self.name = name
self.usage = usage
self.description = description
self.func_or_obj = func_or_obj
self.param_count = param_count
user_functions = [
UserFunctionDef(FunctionType.REGULAR,
"regexp","regexp(<regular_expression>,<expr>) = <1|0>",
"Find regexp in string expression. Returns 1 if found or 0 if not",
regexp,
2),
UserFunctionDef(FunctionType.REGULAR,
"regexp_extract","regexp_extract(<regular_expression>,<expr>,group_number) = <substring|null>",
"Get regexp capture group content",
regexp_extract,
3),
UserFunctionDef(FunctionType.REGULAR,
"sha","sha(<expr>,<encoding>,<algorithm>) = <hex-string-of-sha>",
"Calculate sha of some expression. Algorithm can be one of 1,224,256,384,512. For now encoding must be manually provided. Will use the input encoding automatically in the future.",
sha,
3),
UserFunctionDef(FunctionType.REGULAR,
"sha1","sha1(<expr>) = <hex-string-of-sha>",
"Exists for backward compatibility only, since it doesn't handle encoding properly. Calculates sha1 of some expression",
sha1,
1),
UserFunctionDef(FunctionType.REGULAR,
"md5","md5(<expr>,<encoding>) = <hex-string-of-md5>",
"Calculate md5 of expression. Returns a hex-string of the result. Currently requires to manually provide the encoding of the data. Will be taken automatically from the input encoding in the future.",
md5,
2),
UserFunctionDef(FunctionType.REGULAR,
"sqrt","sqrt(<expr>) = <square-root>",
"Calculate the square root of the expression",
sqrt,
1),
UserFunctionDef(FunctionType.REGULAR,
"power","power(<expr1>,<expr2>) = <expr1-to-the-power-of-expr2>",
"Raise expr1 to the power of expr2",
power,
2),
UserFunctionDef(FunctionType.REGULAR,
"file_ext","file_ext(<expr>) = <filename-extension-or-empty-string>",
"Get the extension of a filename",
file_ext,
1),
UserFunctionDef(FunctionType.REGULAR,
"file_folder","file_folder(<expr>) = <folder-name-of-filename>",
"Get the folder part of a filename",
file_folder,
1),
UserFunctionDef(FunctionType.REGULAR,
"file_basename","file_basename(<expr>) = <basename-of-filename-including-extension>",
"Get the basename of a filename, including extension if any",
file_basename,
1),
UserFunctionDef(FunctionType.REGULAR,
"file_basename_no_ext","file_basename_no_ext(<expr>) = <basename-of-filename-without-extension>",
"Get the basename of a filename, without the extension if there is one",
file_basename_no_ext,
1),
UserFunctionDef(FunctionType.AGG,
"percentile","percentile(<expr>,<percentile-in-the-range-0-to-1>) = <percentile-value>",
"Calculate the strict percentile of a set of a values.",
StrictPercentile,
2),
UserFunctionDef(FunctionType.AGG,
"stddev_pop","stddev_pop(<expr>) = <stddev-value>",
"Calculate the population standard deviation of a set of values",
StdevPopulation,
1),
UserFunctionDef(FunctionType.AGG,
"stddev_sample","stddev_sample(<expr>) = <stddev-value>",
"Calculate the sample standard deviation of a set of values",
StdevSample,
1)
]
def print_user_functions():
for udf in user_functions:
print("Function: %s" % udf.name)
print(" Usage: %s" % udf.usage)
print(" Description: %s" % udf.description)
class Sqlite3DBResults(object):
def __init__(self,query_column_names,results):
self.query_column_names = query_column_names
self.results = results
def __str__(self):
return "Sqlite3DBResults<result_count=%d,query_column_names=%s>" % (len(self.results),str(self.query_column_names))
__repr__ = __str__
def get_sqlite_type_affinity(sqlite_type):
sqlite_type = sqlite_type.upper()
if 'INT' in sqlite_type:
return 'INTEGER'
elif 'CHAR' in sqlite_type or 'TEXT' in sqlite_type or 'CLOB' in sqlite_type:
return 'TEXT'
elif 'BLOB' in sqlite_type:
return 'BLOB'
elif 'REAL' in sqlite_type or 'FLOA' in sqlite_type or 'DOUB' in sqlite_type:
return 'REAL'
else:
return 'NUMERIC'
def sqlite_type_to_python_type(sqlite_type):
SQLITE_AFFINITY_TO_PYTHON_TYPE_NAMES = {
'INTEGER': long,
'TEXT': unicode,
'BLOB': bytes,
'REAL': float,
'NUMERIC': float
}
return SQLITE_AFFINITY_TO_PYTHON_TYPE_NAMES[get_sqlite_type_affinity(sqlite_type)]
class Sqlite3DB(object):
# TODO Add metadata table with qsql file version
QCATALOG_TABLE_NAME = '_qcatalog'
NUMERIC_COLUMN_TYPES = {int, long, float}
PYTHON_TO_SQLITE_TYPE_NAMES = { str: 'TEXT', int: 'INT', long : 'INT' , float: 'REAL', None: 'TEXT' }
def __str__(self):
return "Sqlite3DB<url=%s>" % self.sqlite_db_url
__repr__ = __str__
def __init__(self, db_id, sqlite_db_url, sqlite_db_filename, create_qcatalog, show_sql=SHOW_SQL):
self.show_sql = show_sql
self.create_qcatalog = create_qcatalog
self.db_id = db_id
# TODO Is this needed anymore?
self.sqlite_db_filename = sqlite_db_filename
self.sqlite_db_url = sqlite_db_url
self.conn = sqlite3.connect(self.sqlite_db_url, uri=True)
self.last_temp_table_id = 10000
self.cursor = self.conn.cursor()
self.add_user_functions()
if create_qcatalog:
self.create_qcatalog_table()
else:
xprint('Not creating qcatalog for db_id %s' % db_id)
def retrieve_all_table_names(self):
return [x[0] for x in self.execute_and_fetch("select tbl_name from sqlite_master where type='table'").results]
def get_sqlite_table_info(self,table_name):
return self.execute_and_fetch('PRAGMA table_info(%s)' % table_name).results
def get_sqlite_database_list(self):
return self.execute_and_fetch('pragma database_list').results
def find_new_table_name(self,planned_table_name):
existing_table_names = self.retrieve_all_table_names()
possible_indices = range(1,1000)
for index in possible_indices:
if index == 1:
suffix = ''
else:
suffix = '_%s' % index
table_name_attempt = '%s%s' % (planned_table_name,suffix)
if table_name_attempt not in existing_table_names:
xprint("Found free table name %s in db %s for planned table name %s" % (table_name_attempt,self.db_id,planned_table_name))
return table_name_attempt
# TODO Add test for this
raise Exception('Cannot find free table name in db %s for planned table name %s' % (self.db_id,planned_table_name))
def create_qcatalog_table(self):
if not self.qcatalog_table_exists():
xprint("qcatalog table does not exist. Creating it")
r = self.conn.execute("""CREATE TABLE %s (
qcatalog_entry_id text not null primary key,
content_signature_key text,
temp_table_name text,
content_signature text,
creation_time text,
source_type text,
source text)""" % self.QCATALOG_TABLE_NAME).fetchall()
else:
xprint("qcatalog table already exists. No need to create it")
def qcatalog_table_exists(self):
return sqlite_table_exists(self.conn,self.QCATALOG_TABLE_NAME)
def calculate_content_signature_key(self,content_signature):
assert type(content_signature) == OrderedDict
pp = json.dumps(content_signature,sort_keys=True)
xprint("Calculating content signature for:",pp,six.b(pp))
return hashlib.sha1(six.b(pp)).hexdigest()
def add_to_qcatalog_table(self, temp_table_name, content_signature, creation_time,source_type, source):
assert source is not None
assert source_type is not None
content_signature_key = self.calculate_content_signature_key(content_signature)
xprint("db_id: %s Adding to qcatalog table: %s. Calculated signature key %s" % (self.db_id, temp_table_name,content_signature_key))
r = self.execute_and_fetch(
'INSERT INTO %s (qcatalog_entry_id,content_signature_key, temp_table_name,content_signature,creation_time,source_type,source) VALUES (?,?,?,?,?,?,?)' % self.QCATALOG_TABLE_NAME,
(str(uuid4()),content_signature_key,temp_table_name,json.dumps(content_signature),creation_time,source_type,source))
# Ensure transaction is completed
self.conn.commit()
def get_from_qcatalog(self, content_signature):
content_signature_key = self.calculate_content_signature_key(content_signature)
xprint("Finding table in db_id %s that matches content signature key %s" % (self.db_id,content_signature_key))
field_names = ["content_signature_key", "temp_table_name", "content_signature", "creation_time","source_type","source","qcatalog_entry_id"]
q = "SELECT %s FROM %s where content_signature_key = ?" % (",".join(field_names),self.QCATALOG_TABLE_NAME)
r = self.execute_and_fetch(q,(content_signature_key,))
if r is None:
return None
if len(r.results) == 0:
return None
if len(r.results) > 1:
raise Exception("Bug - Exactly one result should have been provided: %s" % str(r.results))
d = dict(zip(field_names,r.results[0]))
return d
def get_from_qcatalog_using_table_name(self, temp_table_name):
xprint("getting from qcatalog using table name")
field_names = ["content_signature", "temp_table_name","creation_time","source_type","source","content_signature_key","qcatalog_entry_id"]
q = "SELECT %s FROM %s where temp_table_name = ?" % (",".join(field_names),self.QCATALOG_TABLE_NAME)
xprint("Query from qcatalog %s params %s" % (q,str(temp_table_name,)))
r = self.execute_and_fetch(q,(temp_table_name,))
xprint("results: ",r.results)
if r is None:
return None
if len(r.results) == 0:
return None
if len(r.results) > 1:
raise Exception("Bug - Exactly one result should have been provided: %s" % str(r.results))
d = dict(zip(field_names,r.results[0]))
# content_signature should be the first in the list of field_names
cs = OrderedDict(json.loads(r.results[0][0]))
if self.calculate_content_signature_key(cs) != d['content_signature_key']:
raise Exception('Table contains an invalid entry - content signature key is not matching the actual content signature')
return d
def get_all_from_qcatalog(self):
xprint("getting from qcatalog using table name")
field_names = ["temp_table_name", "content_signature", "creation_time","source_type","source","qcatalog_entry_id"]
q = "SELECT %s FROM %s" % (",".join(field_names),self.QCATALOG_TABLE_NAME)
xprint("Query from qcatalog %s" % q)
r = self.execute_and_fetch(q)
if r is None:
return None
def convert(res):
d = dict(zip(field_names, res))
cs = OrderedDict(json.loads(res[1]))
d['content_signature_key'] = self.calculate_content_signature_key(cs)
return d
rr = [convert(r) for r in r.results]
return rr
def done(self):
xprint("Closing database %s" % self.db_id)
try:
self.conn.commit()
self.conn.close()
xprint("Database %s closed" % self.db_id)
except Exception as e:
xprint("Could not close database %s" % self.db_id)
raise
def add_user_functions(self):
for udf in user_functions:
if type(udf.func_or_obj) == type(object):
self.conn.create_aggregate(udf.name,udf.param_count,udf.func_or_obj)
elif type(udf.func_or_obj) == type(md5):
self.conn.create_function(udf.name,udf.param_count,udf.func_or_obj)
else:
raise Exception("Invalid user function definition %s" % str(udf))
def is_numeric_type(self, column_type):
return column_type in Sqlite3DB.NUMERIC_COLUMN_TYPES
def update_many(self, sql, params):
try:
sqlprint(sql, " params: " + str(params))
self.cursor.executemany(sql, params)
_ = self.cursor.fetchall()
finally:
pass # cursor.close()
def execute_and_fetch(self, q,params = None):
try:
try:
if self.show_sql:
print(repr(q))
if params is None:
r = self.cursor.execute(q)
else:
r = self.cursor.execute(q,params)
if self.cursor.description is not None:
# we decode the column names, so they can be encoded to any output format later on
query_column_names = [c[0] for c in self.cursor.description]
else:
query_column_names = None
result = self.cursor.fetchall()
finally:
pass # cursor.close()
except OperationalError as e:
raise SqliteOperationalErrorException("Failed executing sqlite query %s with params %s . error: %s" % (q,params,str(e)),e)
return Sqlite3DBResults(query_column_names,result)
def _get_as_list_str(self, l):
return ",".join(['"%s"' % x.replace('"', '""') for x in l])
def generate_insert_row(self, table_name, column_names):
col_names_str = self._get_as_list_str(column_names)
question_marks = ", ".join(["?" for i in range(0, len(column_names))])
return 'INSERT INTO %s (%s) VALUES (%s)' % (table_name, col_names_str, question_marks)
# Get a list of column names so order will be preserved (Could have used OrderedDict, but
# then we would need python 2.7)
def generate_create_table(self, table_name, column_names, column_dict):
# Convert dict from python types to db types
column_name_to_db_type = dict(
(n, Sqlite3DB.PYTHON_TO_SQLITE_TYPE_NAMES[t]) for n, t in six.iteritems(column_dict))
column_defs = ','.join(['"%s" %s' % (
n.replace('"', '""'), column_name_to_db_type[n]) for n in column_names])
return 'CREATE TABLE %s (%s)' % (table_name, column_defs)
def generate_temp_table_name(self):
# WTF - From my own past mutable-self
self.last_temp_table_id += 1
tn = "temp_table_%s" % self.last_temp_table_id
return tn
def generate_drop_table(self, table_name):
return "DROP TABLE %s" % table_name
def drop_table(self, table_name):
return self.execute_and_fetch(self.generate_drop_table(table_name))
def attach_and_copy_table(self, from_db, relevant_table,stop_after_analysis):
xprint("Attaching %s into db %s and copying table %s into it" % (from_db,self,relevant_table))
temp_db_id = 'temp_db_id'
q = "attach '%s' as %s" % (from_db.sqlite_db_url,temp_db_id)
xprint("Attach query: %s" % q)
c = self.execute_and_fetch(q)
new_temp_table_name = 'temp_table_%s' % (self.last_temp_table_id + 1)
fully_qualified_table_name = '%s.%s' % (temp_db_id,relevant_table)
if stop_after_analysis:
limit = ' limit 100'
else:
limit = ''
copy_query = 'create table %s as select * from %s %s' % (new_temp_table_name,fully_qualified_table_name,limit)
copy_results = self.execute_and_fetch(copy_query)
xprint("Copied %s.%s into %s in db_id %s. Results %s" % (temp_db_id,relevant_table,new_temp_table_name,self.db_id,copy_results))
self.last_temp_table_id += 1
xprint("Copied table into %s. Detaching db that was attached temporarily" % self.db_id)
q = "detach database %s" % temp_db_id
xprint("detach query: %s" % q)
c = self.execute_and_fetch(q)
xprint(c)
return new_temp_table_name
class CouldNotConvertStringToNumericValueException(Exception):
def __init__(self, msg):
self.msg = msg
def __str(self):
return repr(self.msg)
class SqliteOperationalErrorException(Exception):
def __init__(self, msg,original_error):
self.msg = msg
self.original_error = original_error
def __str(self):
return repr(self.msg) + "//" + repr(self.original_error)
class IncorrectDefaultValueException(Exception):
def __init__(self, option_type,option,actual_value):
self.option_type = option_type
self.option = option
self.actual_value = actual_value
def __str__(self):
return repr(self)
class NonExistentTableNameInQsql(Exception):
def __init__(self, qsql_filename,table_name,existing_table_names):
self.qsql_filename = qsql_filename
self.table_name = table_name
self.existing_table_names = existing_table_names
class NonExistentTableNameInSqlite(Exception):
def __init__(self, qsql_filename,table_name,existing_table_names):
self.qsql_filename = qsql_filename
self.table_name = table_name
self.existing_table_names = existing_table_names
class TooManyTablesInQsqlException(Exception):
def __init__(self, qsql_filename,existing_table_names):
self.qsql_filename = qsql_filename
self.existing_table_names = existing_table_names
class NoTableInQsqlExcption(Exception):
def __init__(self, qsql_filename):
self.qsql_filename = qsql_filename
class TooManyTablesInSqliteException(Exception):
def __init__(self, qsql_filename,existing_table_names):
self.qsql_filename = qsql_filename
self.existing_table_names = existing_table_names
class NoTablesInSqliteException(Exception):
def __init__(self, sqlite_filename):
self.sqlite_filename = sqlite_filename
class ColumnMaxLengthLimitExceededException(Exception):
def __init__(self, msg):
self.msg = msg
def __str(self):
return repr(self.msg)
class CouldNotParseInputException(Exception):
def __init__(self, msg):
self.msg = msg
def __str(self):
return repr(self.msg)
class BadHeaderException(Exception):
def __init__(self, msg):
self.msg = msg
def __str(self):
return repr(self.msg)
class EncodedQueryException(Exception):
def __init__(self, msg):
self.msg = msg
def __str(self):
return repr(self.msg)
class CannotUnzipDataStreamException(Exception):
def __init__(self):
pass
class UniversalNewlinesExistException(Exception):
def __init__(self):
pass
class EmptyDataException(Exception):
def __init__(self):
pass
class MissingHeaderException(Exception):
def __init__(self,msg):
self.msg = msg
class InvalidQueryException(Exception):
def __init__(self,msg):
self.msg = msg
class TooManyAttachedDatabasesException(Exception):
def __init__(self,msg):
self.msg = msg
class FileNotFoundException(Exception):
def __init__(self, msg):
self.msg = msg
def __str(self):
return repr(self.msg)
class UnknownFileTypeException(Exception):
def __init__(self, msg):
self.msg = msg
def __str(self):
return repr(self.msg)
class ColumnCountMismatchException(Exception):
def __init__(self, msg):
self.msg = msg
class ContentSignatureNotFoundException(Exception):
def __init__(self, msg):
self.msg = msg
class StrictModeColumnCountMismatchException(Exception):
def __init__(self,atomic_fn, expected_col_count,actual_col_count,lines_read):
self.atomic_fn = atomic_fn
self.expected_col_count = expected_col_count
self.actual_col_count = actual_col_count
self.lines_read = lines_read
class FluffyModeColumnCountMismatchException(Exception):
def __init__(self,atomic_fn, expected_col_count,actual_col_count,lines_read):
self.atomic_fn = atomic_fn
self.expected_col_count = expected_col_count
self.actual_col_count = actual_col_count
self.lines_read = lines_read
class ContentSignatureDiffersException(Exception):
def __init__(self,original_filename, other_filename, filenames_str,key,source_value,signature_value):
self.original_filename = original_filename
self.other_filename = other_filename
self.filenames_str = filenames_str
self.key = key
self.source_value = source_value
self.signature_value = signature_value
class ContentSignatureDataDiffersException(Exception):
def __init__(self,msg):
self.msg = msg
class InvalidQSqliteFileException(Exception):
def __init__(self,msg):
self.msg = msg
class MaximumSourceFilesExceededException(Exception):
def __init__(self,msg):
self.msg = msg
# Simplistic Sql "parsing" class... We'll eventually require a real SQL parser which will provide us with a parse tree
#
# A "qtable" is a filename which behaves like an SQL table...
class Sql(object):
def __init__(self, sql, data_streams):
# Currently supports only standard SELECT statements
# Holds original SQL
self.sql = sql
# Holds sql parts
self.sql_parts = sql.split()
self.data_streams = data_streams
self.qtable_metadata_dict = OrderedDict()
# Set of qtable names
self.qtable_names = []
# Dict from qtable names to their positions in sql_parts. Value here is a *list* of positions,
# since it is possible that the same qtable_name (file) is referenced in multiple positions
# and we don't want the database table to be recreated for each
# reference
self.qtable_name_positions = {}
# Dict from qtable names to their effective (actual database) table
# names
self.qtable_name_effective_table_names = {}
self.query_column_names = None
# Go over all sql parts
idx = 0
while idx < len(self.sql_parts):
# Get the part string
part = self.sql_parts[idx]
# If it's a FROM or a JOIN
if part.upper() in ['FROM', 'JOIN']:
# and there is nothing after it,
if idx == len(self.sql_parts) - 1:
# Just fail
raise InvalidQueryException(
'FROM/JOIN is missing a table name after it')
qtable_name = self.sql_parts[idx + 1]
# Otherwise, the next part contains the qtable name. In most cases the next part will be only the qtable name.
# We handle one special case here, where this is a subquery as a column: "SELECT (SELECT ... FROM qtable),100 FROM ...".
# In that case, there will be an ending paranthesis as part of the name, and we want to handle this case gracefully.
# This is obviously a hack of a hack :) Just until we have
# complete parsing capabilities
if ')' in qtable_name:
leftover = qtable_name[qtable_name.index(')'):]
self.sql_parts.insert(idx + 2, leftover)
qtable_name = qtable_name[:qtable_name.index(')')]
self.sql_parts[idx + 1] = qtable_name
if qtable_name[0] != '(':
normalized_qtable_name = self.normalize_qtable_name(qtable_name)
xprint("Normalized qtable name for %s is %s" % (qtable_name,normalized_qtable_name))
self.qtable_names += [normalized_qtable_name]
if normalized_qtable_name not in self.qtable_name_positions.keys():
self.qtable_name_positions[normalized_qtable_name] = []
self.qtable_name_positions[normalized_qtable_name].append(idx + 1)
self.sql_parts[idx + 1] = normalized_qtable_name
idx += 2
else:
idx += 1
else:
idx += 1
xprint("Final sql parts: %s" % self.sql_parts)
def normalize_qtable_name(self,qtable_name):
if self.data_streams.is_data_stream(qtable_name):
return qtable_name
if ':::' in qtable_name:
qsql_filename, table_name = qtable_name.split(":::", 1)
return '%s:::%s' % (os.path.realpath(os.path.abspath(qsql_filename)),table_name)
else:
return os.path.realpath(os.path.abspath(qtable_name))
def set_effective_table_name(self, qtable_name, effective_table_name):
if qtable_name in self.qtable_name_effective_table_names.keys():
if self.qtable_name_effective_table_names[qtable_name] != effective_table_name:
raise Exception(
"Already set effective table name for qtable %s. Trying to change the effective table name from %s to %s" %
(qtable_name,self.qtable_name_effective_table_names[qtable_name],effective_table_name))
xprint("Setting effective table name for %s - effective table name is set to %s" % (qtable_name,effective_table_name))
self.qtable_name_effective_table_names[
qtable_name] = effective_table_name
def get_effective_sql(self,table_name_mapping=None):
if len(list(filter(lambda x: x is None, self.qtable_name_effective_table_names))) != 0:
assert False, 'There are qtables without effective tables'
effective_sql = [x for x in self.sql_parts]
xprint("Effective table names",self.qtable_name_effective_table_names)
for qtable_name, positions in six.iteritems(self.qtable_name_positions):
xprint("Positions for qtable name %s are %s" % (qtable_name,positions))
for pos in positions:
if table_name_mapping is not None:
x = self.qtable_name_effective_table_names[qtable_name]
effective_sql[pos] = table_name_mapping[x]
else:
effective_sql[pos] = self.qtable_name_effective_table_names[qtable_name]
return " ".join(effective_sql)
def get_qtable_name_effective_table_names(self):
return self.qtable_name_effective_table_names
def execute_and_fetch(self, db):
x = self.get_effective_sql()
xprint("Final query: %s" % x)
db_results_obj = db.execute_and_fetch(x)
return db_results_obj
def materialize_using(self,loaded_table_structures_dict):
xprint("Materializing sql object: %s" % str(self.qtable_names))
xprint("loaded table structures dict %s" % loaded_table_structures_dict)
for qtable_name in self.qtable_names:
table_structure = loaded_table_structures_dict[qtable_name]
table_name_in_disk_db = table_structure.get_table_name_for_querying()
effective_table_name = '%s.%s' % (table_structure.db_id, table_name_in_disk_db)
# for a single file - no need to create a union, just use the table name
self.set_effective_table_name(qtable_name, effective_table_name)
xprint("Materialized filename %s to effective table name %s" % (qtable_name,effective_table_name))
class TableColumnInferer(object):
def __init__(self, input_params):
self.inferred = False
self.mode = input_params.parsing_mode
self.rows = []
self.skip_header = input_params.skip_header
self.header_row = None
self.header_row_filename = None
self.expected_column_count = input_params.expected_column_count
self.input_delimiter = input_params.delimiter
self.disable_column_type_detection = input_params.disable_column_type_detection
def _generate_content_signature(self):
return OrderedDict({
"inferred": self.inferred,
"mode": self.mode,
"rows": "\n".join([",".join(x) for x in self.rows]),
"skip_header": self.skip_header,
"header_row": self.header_row,
"expected_column_count": self.expected_column_count,
"input_delimiter": self.input_delimiter,
"disable_column_type_detection": self.disable_column_type_detection
})
def analyze(self, filename, col_vals):
if self.inferred:
assert False, "Already inferred columns"
if self.skip_header and self.header_row is None:
self.header_row = col_vals
self.header_row_filename = filename
else:
self.rows.append(col_vals)
if len(self.rows) < 100:
return False
self.do_analysis()
return True
def force_analysis(self):
# This method is called whenever there is no more data, and an analysis needs
# to be performed immediately, regardless of the amount of sample data that has
# been collected
self.do_analysis()
def determine_type_of_value(self, value):
if self.disable_column_type_detection:
return str
if value is not None:
value = value.strip()
if value == '' or value is None:
return None
try:
i = int(value)
if type(i) == long:
return long
else:
return int
except:
pass
try:
f = float(value)
return float
except:
pass
return str
def determine_type_of_value_list(self, value_list):
type_list = [self.determine_type_of_value(v) for v in value_list]
all_types = set(type_list)
if len(set(type_list)) == 1:
# all the sample lines are of the same type
return type_list[0]
else:
# check for the number of types without nulls,
type_list_without_nulls = list(filter(
lambda x: x is not None, type_list))
# If all the sample lines are of the same type,
if len(set(type_list_without_nulls)) == 1:
# return it
return type_list_without_nulls[0]
else:
# If there are only two types, one float an one int, then choose a float type
if len(set(type_list_without_nulls)) == 2 and float in type_list_without_nulls and int in type_list_without_nulls:
return float
return str
def do_analysis(self):
if self.mode == 'strict':
self._do_strict_analysis()
elif self.mode in ['relaxed']:
self._do_relaxed_analysis()
else:
raise Exception('Unknown parsing mode %s' % self.mode)
if self.column_count == 1 and self.expected_column_count != 1 and self.expected_column_count is not None:
print(f"Warning: column count is one (expected column count is {self.expected_column_count} - did you provide the correct delimiter?", file=sys.stderr)
self.infer_column_types()
self.infer_column_names()
self.inferred = True
def validate_column_names(self, value_list):
column_name_errors = []
for v in value_list:
if v is None:
# we allow column names to be None, in relaxed mode it'll be filled with default names.
# RLRL
continue
if ',' in v:
column_name_errors.append(
(v, "Column name cannot contain commas"))
continue
if self.input_delimiter in v:
column_name_errors.append(
(v, "Column name cannot contain the input delimiter. Please make sure you've set the correct delimiter"))
continue
if '\n' in v:
column_name_errors.append(
(v, "Column name cannot contain newline"))
continue
if v != v.strip():
column_name_errors.append(
(v, "Column name contains leading/trailing spaces"))
continue
try:
v.encode("utf-8", "strict").decode("utf-8")
except:
column_name_errors.append(
(v, "Column name must be UTF-8 Compatible"))
continue
# We're checking for column duplication for each field in order to be able to still provide it along with other errors
if len(list(filter(lambda x: x == v,value_list))) > 1:
entry = (v, "Column name is duplicated")
# Don't duplicate the error report itself
if entry not in column_name_errors:
column_name_errors.append(entry)
continue
nul_index = v.find("\x00")
if nul_index >= 0:
column_name_errors.append(
(v, "Column name cannot contain NUL"))
continue
t = self.determine_type_of_value(v)
if t != str:
column_name_errors.append((v, "Column name must be a string"))
return column_name_errors
def infer_column_names(self):
if self.header_row is not None:
column_name_errors = self.validate_column_names(self.header_row)
if len(column_name_errors) > 0:
raise BadHeaderException("Header must contain only strings and not numbers or empty strings: '%s'\n%s" % (
",".join(self.header_row), "\n".join(["'%s': %s" % (x, y) for x, y in column_name_errors])))
# use header row in order to name columns
if len(self.header_row) < self.column_count:
if self.mode == 'strict':
raise ColumnCountMismatchException("Strict mode. Header row contains less columns than expected column count(%s vs %s)" % (
len(self.header_row), self.column_count))
elif self.mode in ['relaxed']:
# in relaxed mode, add columns to fill the missing ones
self.header_row = self.header_row + \
['c%s' % (x + len(self.header_row) + 1)
for x in range(self.column_count - len(self.header_row))]
elif len(self.header_row) > self.column_count:
if self.mode == 'strict':
raise ColumnCountMismatchException("Strict mode. Header row contains more columns than expected column count (%s vs %s)" % (
len(self.header_row), self.column_count))
elif self.mode in ['relaxed']:
# In relaxed mode, just cut the extra column names
self.header_row = self.header_row[:self.column_count]
self.column_names = self.header_row
else:
# Column names are cX starting from 1
self.column_names = ['c%s' % (i + 1)
for i in range(self.column_count)]
def _do_relaxed_analysis(self):
column_count_list = [len(col_vals) for col_vals in self.rows]
if len(self.rows) == 0:
if self.header_row is None:
self.column_count = 0
else:
self.column_count = len(self.header_row)
else:
if self.expected_column_count is not None:
self.column_count = self.expected_column_count
else:
# If not specified, we'll take the largest row in the sample rows
self.column_count = max(column_count_list)
def get_column_count_summary(self, column_count_list):
counts = {}
for column_count in column_count_list:
counts[column_count] = counts.get(column_count, 0) + 1
return six.u(", ").join([six.u("{} rows with {} columns".format(v, k)) for k, v in six.iteritems(counts)])
def _do_strict_analysis(self):
column_count_list = [len(col_vals) for col_vals in self.rows]
if len(set(column_count_list)) != 1:
raise ColumnCountMismatchException('Strict mode. Column Count is expected to identical. Multiple column counts exist at the first part of the file. Try to check your delimiter, or change to relaxed mode. Details: %s' % (
self.get_column_count_summary(column_count_list)))
self.column_count = len(self.rows[0])
if self.expected_column_count is not None and self.column_count != self.expected_column_count:
raise ColumnCountMismatchException('Strict mode. Column count is expected to be %s but is %s' % (
self.expected_column_count, self.column_count))
self.infer_column_types()
def infer_column_types(self):
assert self.column_count > -1
self.column_types = []
self.column_types2 = []
for column_number in range(self.column_count):
column_value_list = [
row[column_number] if column_number < len(row) else None for row in self.rows]
column_type = self.determine_type_of_value_list(column_value_list)
self.column_types.append(column_type)
column_value_list2 = [row[column_number] if column_number < len(
row) else None for row in self.rows[1:]]
column_type2 = self.determine_type_of_value_list(
column_value_list2)
self.column_types2.append(column_type2)
comparison = map(
lambda x: x[0] == x[1], zip(self.column_types, self.column_types2))
if False in comparison and not self.skip_header:
number_of_column_types = len(set(self.column_types))
if number_of_column_types == 1 and list(set(self.column_types))[0] == str:
print('Warning - There seems to be header line in the file, but -H has not been specified. All fields will be detected as text fields, and the header line will appear as part of the data', file=sys.stderr)
def get_column_dict(self):
return OrderedDict(zip(self.column_names, self.column_types))
def get_column_count(self):
return self.column_count
def get_column_names(self):
return self.column_names
def get_column_types(self):
return self.column_types
def py3_encoded_csv_reader(encoding, f, dialect,row_data_only=False,**kwargs):
try:
xprint("f is %s" % str(f))
xprint("dialect is %s" % dialect)
csv_reader = csv.reader(f, dialect, **kwargs)
if row_data_only:
for row in csv_reader:
yield row
else:
for row in csv_reader:
yield (f.filename(),f.isfirstline(),row)
except UnicodeDecodeError as e1:
raise CouldNotParseInputException(e1)
except ValueError as e:
# TODO Add test for this
if str(e) is not None and str(e).startswith('could not convert string to'):
raise CouldNotConvertStringToNumericValueException(str(e))
else:
raise CouldNotParseInputException(str(e))
except Exception as e:
if str(e).startswith("field larger than field limit"):
raise ColumnMaxLengthLimitExceededException(str(e))
elif 'universal-newline' in str(e):
raise UniversalNewlinesExistException()
else:
raise
encoded_csv_reader = py3_encoded_csv_reader
def normalized_filename(filename):
return filename
class TableCreatorState(object):
INITIALIZED = 'INITIALIZED'
ANALYZED = 'ANALYZED'
FULLY_READ = 'FULLY_READ'
class MaterializedStateType(object):
UNKNOWN = 'unknown'
DELIMITED_FILE = 'delimited-file'
QSQL_FILE = 'qsql-file'
SQLITE_FILE = 'sqlite-file'
DATA_STREAM = 'data-stream'
class TableSourceType(object):
DELIMITED_FILE = 'file'
DELIMITED_FILE_WITH_UNUSED_QSQL = 'file-with-unused-qsql'
QSQL_FILE = 'qsql-file'
QSQL_FILE_WITH_ORIGINAL = 'qsql-file-with-original'
SQLITE_FILE = 'sqlite-file'
DATA_STREAM = 'data-stream'
def skip_BOM(f):
try:
BOM = f.buffer.read(3)
if BOM != six.b('\xef\xbb\xbf'):
# TODO Add test for this (propagates to try:except)
raise Exception('Value of BOM is not as expected - Value is "%s"' % str(BOM))
except Exception as e:
# TODO Add a test for this
raise Exception('Tried to skip BOM for "utf-8-sig" encoding and failed. Error message is ' + str(e))
def detect_qtable_name_source_info(qtable_name,data_streams,read_caching_enabled):
data_stream = data_streams.get_for_filename(qtable_name)
xprint("Found data stream %s" % data_stream)
if data_stream is not None:
return MaterializedStateType.DATA_STREAM, TableSourceType.DATA_STREAM,(data_stream,)
if ':::' in qtable_name:
qsql_filename, table_name = qtable_name.split(":::", 1)
if not os.path.exists(qsql_filename):
raise FileNotFoundException("Could not find file %s" % qsql_filename)
if is_qsql_file(qsql_filename):
return MaterializedStateType.QSQL_FILE, TableSourceType.QSQL_FILE, (qsql_filename, table_name,)
if is_sqlite_file(qsql_filename):
return MaterializedStateType.SQLITE_FILE, TableSourceType.SQLITE_FILE, (qsql_filename, table_name,)
raise UnknownFileTypeException("Cannot detect the type of table %s" % qtable_name)
else:
if is_qsql_file(qtable_name):
return MaterializedStateType.QSQL_FILE, TableSourceType.QSQL_FILE, (qtable_name, None)
if is_sqlite_file(qtable_name):
return MaterializedStateType.SQLITE_FILE, TableSourceType.SQLITE_FILE, (qtable_name, None)
matching_qsql_file_candidate = qtable_name + '.qsql'
table_source_type = TableSourceType.DELIMITED_FILE
if is_qsql_file(matching_qsql_file_candidate):
if read_caching_enabled:
xprint("Found matching qsql file for original file %s (matching file %s) and read caching is enabled. Using it" % (qtable_name,matching_qsql_file_candidate))
return MaterializedStateType.QSQL_FILE, TableSourceType.QSQL_FILE_WITH_ORIGINAL, (matching_qsql_file_candidate, None)
else:
xprint("Found matching qsql file for original file %s (matching file %s), but read caching is disabled. Not using it" % (qtable_name,matching_qsql_file_candidate))
table_source_type = TableSourceType.DELIMITED_FILE_WITH_UNUSED_QSQL
return MaterializedStateType.DELIMITED_FILE,table_source_type ,(qtable_name, None)
def is_sqlite_file(filename):
if not os.path.exists(filename):
return False
f = open(filename,'rb')
magic = f.read(16)
f.close()
return magic == six.b("SQLite format 3\x00")
def sqlite_table_exists(cursor,table_name):
results = cursor.execute("select count(*) from sqlite_master where type='table' and tbl_name == '%s'" % table_name).fetchall()
return results[0][0] == 1
def is_qsql_file(filename):
if not is_sqlite_file(filename):
return False
db = Sqlite3DB('check_qsql_db',filename,filename,create_qcatalog=False)
qcatalog_exists = db.qcatalog_table_exists()
db.done()
return qcatalog_exists
def normalize_filename_to_table_name(filename):
xprint("Normalizing filename %s" % filename)
if filename[0].isdigit():
xprint("Filename starts with a digit, adding prefix")
filename = 't_%s' % filename
if filename.lower().endswith(".qsql"):
filename = filename[:-5]
elif filename.lower().endswith('.sqlite'):
filename = filename[:-7]
elif filename.lower().endswith('.sqlite3'):
filename = filename[:-8]
return filename.replace("-","_dash_").replace(".","_dot_").replace('?','_qm_').replace("/","_slash_").replace("\\","_backslash_").replace(":","_colon_").replace(" ","_space_").replace("+","_plus_")
def validate_content_signature(original_filename, source_signature,other_filename, content_signature,scope=None,dump=False):
if dump:
xprint("Comparing: source value: %s target value: %s" % (source_signature,content_signature))
s = "%s vs %s:" % (original_filename,other_filename)
if scope is None:
scope = []
for k in source_signature:
if type(source_signature[k]) == OrderedDict:
validate_content_signature(original_filename, source_signature[k],other_filename, content_signature[k],scope + [k])
else:
if k not in content_signature:
raise ContentSignatureDataDiffersException("%s Content Signatures differ. %s is missing from content signature" % (s,k))
if source_signature[k] != content_signature[k]:
if k == 'rows':
raise ContentSignatureDataDiffersException("%s Content Signatures differ at %s.%s (actual analysis data differs)" % (s,".".join(scope),k))
else:
raise ContentSignatureDiffersException(original_filename, other_filename, original_filename,".".join(scope + [k]),source_signature[k],content_signature[k])
class DelimitedFileReader(object):
def __init__(self,atomic_fns, input_params, dialect, f = None,external_f_name = None):
if f is not None:
assert len(atomic_fns) == 0
self.atomic_fns = atomic_fns
self.input_params = input_params
self.dialect = dialect
self.f = f
self.lines_read = 0
self.file_number = -1
self.skipped_bom = False
self.is_open = f is not None
self.external_f = f is not None
self.external_f_name = external_f_name
def get_lines_read(self):
return self.lines_read
def get_size_hash(self):
if self.atomic_fns is None or len(self.atomic_fns) == 0:
return "data-stream-size"
else:
return ",".join(map(str,[os.stat(atomic_fn).st_size for atomic_fn in self.atomic_fns]))
def get_last_modification_time_hash(self):
if self.atomic_fns is None or len(self.atomic_fns) == 0:
return "data stream-lmt"
else:
x = ",".join(map(lambda x: ':%s:' % x,[os.stat(x).st_mtime_ns for x in self.atomic_fns]))
res = hashlib.sha1(six.b(x)).hexdigest() + '///' + x
xprint("Hash of last modification time is %s" % res)
return res
def open_file(self):
if self.external_f:
xprint("External f has been provided. No need to open the file")
return
# TODO Support universal newlines for gzipped and stdin data as well
xprint("XX Opening file %s" % ",".join(self.atomic_fns))
import fileinput
def q_openhook(filename, mode):
if self.input_params.gzipped_input or filename.endswith('.gz'):
import gzip
f = gzip.open(filename,mode='rt',encoding=self.input_params.input_encoding)
else:
if six.PY3:
if self.input_params.with_universal_newlines:
f = io.open(filename, 'rU', newline=None, encoding=self.input_params.input_encoding)
else:
f = io.open(filename, 'r', newline=None, encoding=self.input_params.input_encoding)
else:
if self.input_params.with_universal_newlines:
file_opening_mode = 'rbU'
else:
file_opening_mode = 'rb'
f = open(filename, file_opening_mode)
if self.input_params.input_encoding == 'utf-8-sig' and not self.skipped_bom:
skip_BOM(f)
return f
f = fileinput.input(self.atomic_fns,mode='rb',openhook=q_openhook)
self.f = f
self.is_open = True
xprint("Actually opened file %s" % self.f)
return f
def close_file(self):
if not self.is_open:
# TODO Convert to assertion
raise Exception("Bug - file should already be open: %s" % ",".join(self.atomic_fns))
self.f.close()
xprint("XX Closed file %s" % ",".join(self.atomic_fns))
def generate_rows(self):
csv_reader = encoded_csv_reader(self.input_params.input_encoding, self.f, dialect=self.dialect,row_data_only=self.external_f)
try:
# TODO Some order with regard to separating data-streams for actual files
if self.external_f:
for col_vals in csv_reader:
self.lines_read += 1
yield self.external_f_name,0, self.lines_read == 0, col_vals
else:
for file_name,is_first_line,col_vals in csv_reader:
if is_first_line:
self.file_number = self.file_number + 1
self.lines_read += 1
yield file_name,self.file_number,is_first_line,col_vals
except ColumnMaxLengthLimitExceededException as e:
msg = "Column length is larger than the maximum. Offending file is '%s' - Line is %s, counting from 1 (encoding %s). The line number is the raw line number of the file, ignoring whether there's a header or not" % (",".join(self.atomic_fns),self.lines_read + 1,self.input_params.input_encoding)
raise ColumnMaxLengthLimitExceededException(msg)
except UniversalNewlinesExistException as e2:
# No need to translate the exception, but we want it to be explicitly defined here for clarity
raise UniversalNewlinesExistException()
class MaterializedState(object):
def __init__(self, table_source_type,qtable_name, engine_id):
xprint("Creating new MS: %s %s" % (id(self), qtable_name))
self.table_source_type = table_source_type
self.qtable_name = qtable_name
self.engine_id = engine_id
self.db_to_use = None
self.db_id = None
self.source_type = None
self.source = None
self.mfs_structure = None
self.start_time = None
self.end_time = None
self.duration = None
self.effective_table_name = None
def get_materialized_state_type(self):
return MaterializedStateType.UNKNOWN
def get_planned_table_name(self):
assert False, 'not implemented'
def autodetect_table_name(self):
xprint("Autodetecting table name. db_to_use=%s" % self.db_to_use)
existing_table_names = self.db_to_use.retrieve_all_table_names()
xprint("Existing table names: %s" % existing_table_names)
possible_indices = range(1,1000)
for index in possible_indices:
if index == 1:
suffix = ''
else:
suffix = '_%s' % index
table_name_attempt = '%s%s' % (self.get_planned_table_name(),suffix)
xprint("Table name attempt: index=%s name=%s" % (index,table_name_attempt))
if table_name_attempt not in existing_table_names:
xprint("Found free table name %s for source type %s source %s" % (table_name_attempt,self.source_type,self.source))
return table_name_attempt
raise Exception('Cannot find free table name for source type %s source %s' % (self.source_type,self.source))
def initialize(self):
self.start_time = time.time()
def finalize(self):
self.end_time = time.time()
self.duration = self.end_time - self.start_time
def choose_db_to_use(self,forced_db_to_use=None,stop_after_analysis=False):
assert False, 'not implemented'
def make_data_available(self,stop_after_analysis):
assert False, 'not implemented'
class MaterializedDelimitedFileState(MaterializedState):
def __init__(self, table_source_type,qtable_name, input_params, dialect_id,engine_id,target_table_name=None):
super().__init__(table_source_type,qtable_name,engine_id)
self.input_params = input_params
self.dialect_id = dialect_id
self.target_table_name = target_table_name
self.content_signature = None
self.atomic_fns = None
self.can_store_as_cached = None
def get_materialized_state_type(self):
return MaterializedStateType.DELIMITED_FILE
def initialize(self):
super(MaterializedDelimitedFileState, self).initialize()
self.atomic_fns = self.materialize_file_list(self.qtable_name)
self.delimited_file_reader = DelimitedFileReader(self.atomic_fns,self.input_params,self.dialect_id)
self.source_type = self.table_source_type
self.source = ",".join(self.atomic_fns)
return
def materialize_file_list(self,qtable_name):
materialized_file_list = []
unfound_files = []
# First check if the file exists without globbing. This will ensure that we don't support non-existent files
if os.path.exists(qtable_name):
# If it exists, then just use it
found_files = [qtable_name]
else:
# If not, then try with globs (and sort for predictability)
found_files = list(sorted(glob.glob(qtable_name)))
# If no files
if len(found_files) == 0:
unfound_files += [qtable_name]
materialized_file_list += found_files
# If there are no files to go over,
if len(unfound_files) == 1:
raise FileNotFoundException(
"No files matching '%s' have been found" % unfound_files[0])
elif len(unfound_files) > 1:
# TODO Add test for this
raise FileNotFoundException(
"The following files have not been found for table %s: %s" % (qtable_name,",".join(unfound_files)))
# deduplicate with matching qsql files
filtered_file_list = list(filter(lambda x: not x.endswith('.qsql'),materialized_file_list))
xprint("Filtered qsql files from glob search. Original file count: %s new file count: %s" % (len(materialized_file_list),len(filtered_file_list)))
l = len(filtered_file_list)
# If this proves to be a problem for users in terms of usability, then we'll just materialize the files
# into the adhoc db, as with the db attach limit of sqlite
if l > 500:
msg = "Maximum source files for table must be 500. Table is name is %s Number of actual files is %s" % (qtable_name,l)
raise MaximumSourceFilesExceededException(msg)
absolute_path_list = [os.path.abspath(x) for x in filtered_file_list]
return absolute_path_list
def choose_db_to_use(self,forced_db_to_use=None,stop_after_analysis=False):
if forced_db_to_use is not None:
self.db_id = forced_db_to_use.db_id
self.db_to_use = forced_db_to_use
self.can_store_as_cached = False
assert self.target_table_name is None
self.target_table_name = self.autodetect_table_name()
return
self.can_store_as_cached = True
self.db_id = '%s' % self._generate_db_name(self.atomic_fns[0])
xprint("Database id is %s" % self.db_id)
self.db_to_use = Sqlite3DB(self.db_id, 'file:%s?mode=memory&cache=shared' % self.db_id, 'memory<%s>' % self.db_id,create_qcatalog=True)
if self.target_table_name is None:
self.target_table_name = self.autodetect_table_name()
def __analyze_delimited_file(self,database_info):
xprint("Analyzing delimited file")
if self.target_table_name is not None:
target_sqlite_table_name = self.target_table_name
else:
assert False
xprint("Target sqlite table name is %s" % target_sqlite_table_name)
# Create the matching database table and populate it
table_creator = TableCreator(self.qtable_name, self.delimited_file_reader,self.input_params, sqlite_db=database_info.sqlite_db,
target_sqlite_table_name=target_sqlite_table_name)
table_creator.perform_analyze(self.dialect_id)
xprint("after perform_analyze")
self.content_signature = table_creator._generate_content_signature()
now = datetime.datetime.utcnow().isoformat()
database_info.sqlite_db.add_to_qcatalog_table(target_sqlite_table_name,
self.content_signature,
now,
self.source_type,
self.source)
return table_creator
def _generate_disk_db_filename(self, filenames_str):
fn = '%s.qsql' % (os.path.abspath(filenames_str).replace("+","__"))
return fn
def _get_should_read_from_cache(self, disk_db_filename):
disk_db_file_exists = os.path.exists(disk_db_filename)
should_read_from_cache = self.input_params.read_caching and disk_db_file_exists
return should_read_from_cache
def calculate_should_read_from_cache(self):
# TODO cache filename is chosen according to first filename only, which makes multi-file (glob) caching difficult
# cache writing is blocked for now in these cases. Will be added in the future (see save_cache_to_disk_if_needed)
disk_db_filename = self._generate_disk_db_filename(self.atomic_fns[0])
should_read_from_cache = self._get_should_read_from_cache(disk_db_filename)
xprint("should read from cache %s" % should_read_from_cache)
return disk_db_filename,should_read_from_cache
def get_planned_table_name(self):
return normalize_filename_to_table_name(os.path.basename(self.atomic_fns[0]))
def make_data_available(self,stop_after_analysis):
xprint("In make_data_available. db_id %s db_to_use %s" % (self.db_id,self.db_to_use))
assert self.db_id is not None
disk_db_filename, should_read_from_cache = self.calculate_should_read_from_cache()
xprint("disk_db_filename=%s should_read_from_cache=%s" % (disk_db_filename,should_read_from_cache))
database_info = DatabaseInfo(self.db_id,self.db_to_use, needs_closing=True)
xprint("db %s (%s) has been added to the database list" % (self.db_id, self.db_to_use))
self.delimited_file_reader.open_file()
table_creator = self.__analyze_delimited_file(database_info)
self.mfs_structure = MaterializedStateTableStructure(self.qtable_name, self.atomic_fns, self.db_id,
table_creator.column_inferer.get_column_names(),
table_creator.column_inferer.get_column_types(),
None,
self.target_table_name,
self.source_type,
self.source,
self.get_planned_table_name())
content_signature = table_creator.content_signature
content_signature_key = self.db_to_use.calculate_content_signature_key(content_signature)
xprint("table creator signature key: %s" % content_signature_key)
relevant_table = self.db_to_use.get_from_qcatalog(content_signature)['temp_table_name']
if not stop_after_analysis:
table_creator.perform_read_fully(self.dialect_id)
self.save_cache_to_disk_if_needed(disk_db_filename, table_creator)
self.delimited_file_reader.close_file()
return database_info, relevant_table
def save_cache_to_disk_if_needed(self, disk_db_filename, table_creator):
if len(self.atomic_fns) > 1:
xprint("Cannot save cache for multi-files for now, deciding auto-naming for cache is challenging. Will be added in the future.")
return
effective_write_caching = self.input_params.write_caching
if effective_write_caching:
if self.can_store_as_cached:
assert self.table_source_type != TableSourceType.DELIMITED_FILE_WITH_UNUSED_QSQL
xprint("Going to write file cache for %s. Disk filename is %s" % (",".join(self.atomic_fns), disk_db_filename))
self._store_qsql(table_creator.sqlite_db, disk_db_filename)
else:
xprint("Database has been provided externally. Skipping storing a cached version of the data")
def _store_qsql(self, source_sqlite_db, disk_db_filename):
xprint("Storing data as disk db")
disk_db_conn = sqlite3.connect(disk_db_filename)
with disk_db_conn:
source_sqlite_db.conn.backup(disk_db_conn)
xprint("Written db to disk: disk db filename %s" % (disk_db_filename))
disk_db_conn.close()
def _generate_db_name(self, qtable_name):
return 'e_%s_fn_%s' % (self.engine_id,normalize_filename_to_table_name(qtable_name))
class MaterialiedDataStreamState(MaterializedDelimitedFileState):
def __init__(self, table_source_type, qtable_name, input_params, dialect_id, engine_id, data_stream, stream_target_db): ## should pass adhoc_db
assert data_stream is not None
super().__init__(table_source_type, qtable_name, input_params, dialect_id, engine_id,target_table_name=None)
self.data_stream = data_stream
self.stream_target_db = stream_target_db
self.target_table_name = None
def get_planned_table_name(self):
return 'data_stream_%s' % (normalize_filename_to_table_name(self.source))
def get_materialized_state_type(self):
return MaterializedStateType.DATA_STREAM
def initialize(self):
self.start_time = time.time()
if self.input_params.gzipped_input:
raise CannotUnzipDataStreamException()
self.source_type = self.table_source_type
self.source = self.data_stream.stream_id
self.delimited_file_reader = DelimitedFileReader([], self.input_params, self.dialect_id, f=self.data_stream.stream,external_f_name=self.source)
def choose_db_to_use(self,forced_db_to_use=None,stop_after_analysis=False):
assert forced_db_to_use is None
self.db_id = self.stream_target_db.db_id
self.db_to_use = self.stream_target_db
self.target_table_name = self.autodetect_table_name()
return
def calculate_should_read_from_cache(self):
# No disk_db_filename, and no reading from cache when reading a datastream
return None, False
def finalize(self):
super(MaterialiedDataStreamState, self).finalize()
def save_cache_to_disk_if_needed(self, disk_db_filename, table_creator):
xprint("Saving to cache is disabled for data streams")
return
class MaterializedSqliteState(MaterializedState):
def __init__(self,table_source_type,qtable_name,sqlite_filename,table_name, engine_id):
super(MaterializedSqliteState, self).__init__(table_source_type,qtable_name,engine_id)
self.sqlite_filename = sqlite_filename
self.table_name = table_name
self.table_name_autodetected = None
def initialize(self):
super(MaterializedSqliteState, self).initialize()
self.table_name_autodetected = False
if self.table_name is None:
self.table_name = self.autodetect_table_name()
self.table_name_autodetected = True
return
self.validate_table_name()
def get_planned_table_name(self):
if self.table_name_autodetected:
return normalize_filename_to_table_name(os.path.basename(self.qtable_name))
else:
return self.table_name
def autodetect_table_name(self):
db = Sqlite3DB('temp_db','file:%s?immutable=1' % self.sqlite_filename,self.sqlite_filename,create_qcatalog=False)
try:
table_names = list(sorted(db.retrieve_all_table_names()))
if len(table_names) == 1:
return table_names[0]
elif len(table_names) == 0:
raise NoTablesInSqliteException(self.sqlite_filename)
else:
raise TooManyTablesInSqliteException(self.sqlite_filename,table_names)
finally:
db.done()
def validate_table_name(self):
db = Sqlite3DB('temp_db', 'file:%s?immutable=1' % self.sqlite_filename, self.sqlite_filename,
create_qcatalog=False)
try:
table_names = list(db.retrieve_all_table_names())
if self.table_name.lower() not in map(lambda x:x.lower(),table_names):
raise NonExistentTableNameInSqlite(self.sqlite_filename, self.table_name, table_names)
finally:
db.done()
def finalize(self):
super(MaterializedSqliteState, self).finalize()
def get_materialized_state_type(self):
return MaterializedStateType.SQLITE_FILE
def _generate_qsql_only_db_name__temp(self, filenames_str):
return 'e_%s_fn_%s' % (self.engine_id,hashlib.sha1(six.b(filenames_str)).hexdigest())
def choose_db_to_use(self,forced_db_to_use=None,stop_after_analysis=False):
self.source = self.sqlite_filename
self.source_type = self.table_source_type
self.db_id = '%s' % self._generate_qsql_only_db_name__temp(self.qtable_name)
x = 'file:%s?immutable=1' % self.sqlite_filename
self.db_to_use = Sqlite3DB(self.db_id, x, self.sqlite_filename,create_qcatalog=False)
if forced_db_to_use:
xprint("Forced sqlite db_to_use %s" % forced_db_to_use)
new_table_name = forced_db_to_use.attach_and_copy_table(self.db_to_use,self.table_name,stop_after_analysis)
self.table_name = new_table_name
self.db_id = forced_db_to_use.db_id
self.db_to_use = forced_db_to_use
return
def make_data_available(self,stop_after_analysis):
xprint("db %s (%s) has been added to the database list" % (self.db_id, self.db_to_use))
database_info,relevant_table = DatabaseInfo(self.db_id,self.db_to_use, needs_closing=True), self.table_name
column_names, column_types, sqlite_column_types = self._extract_information()
self.mfs_structure = MaterializedStateTableStructure(self.qtable_name, [self.qtable_name], self.db_id,
column_names, column_types, sqlite_column_types,
self.table_name,
self.source_type,self.source,
self.get_planned_table_name())
return database_info, relevant_table
def _extract_information(self):
table_list = self.db_to_use.retrieve_all_table_names()
if len(table_list) == 1:
table_name = table_list[0][0]
xprint("Only one table in sqlite database, choosing it: %s" % table_name)
else:
# self.table_name has either beein autodetected, or validated as an existing table up the stack
table_name = self.table_name
xprint("Multiple tables in sqlite file. Using provided table name %s" % self.table_name)
table_info = self.db_to_use.get_sqlite_table_info(table_name)
xprint('Table info is %s' % table_info)
column_names = list(map(lambda x: x[1], table_info))
sqlite_column_types = list(map(lambda x: x[2].lower(),table_info))
column_types = list(map(lambda x: sqlite_type_to_python_type(x[2]), table_info))
xprint("Column names and types for table %s: %s" % (table_name, list(zip(column_names, zip(sqlite_column_types,column_types)))))
self.content_signature = OrderedDict()
return column_names, column_types, sqlite_column_types
class MaterializedQsqlState(MaterializedState):
def __init__(self,table_source_type,qtable_name,qsql_filename,table_name, engine_id,input_params,dialect_id):
super(MaterializedQsqlState, self).__init__(table_source_type,qtable_name,engine_id)
self.qsql_filename = qsql_filename
self.table_name = table_name
# These are for cases where the qsql file is just a cache and the original is still there, used for content
# validation
self.input_params = input_params
self.dialect_id = dialect_id
self.table_name_autodetected = None
def initialize(self):
super(MaterializedQsqlState, self).initialize()
self.table_name_autodetected = False
if self.table_name is None:
self.table_name = self.autodetect_table_name()
self.table_name_autodetected = True
return
self.validate_table_name()
def get_planned_table_name(self):
if self.table_name_autodetected:
return normalize_filename_to_table_name(os.path.basename(self.qtable_name))
else:
return self.table_name
def autodetect_table_name(self):
db = Sqlite3DB('temp_db','file:%s?immutable=1' % self.qsql_filename,self.qsql_filename,create_qcatalog=False)
assert db.qcatalog_table_exists()
try:
qcatalog_entries = db.get_all_from_qcatalog()
if len(qcatalog_entries) == 0:
raise NoTableInQsqlExcption(self.qsql_filename)
elif len(qcatalog_entries) == 1:
return qcatalog_entries[0]['temp_table_name']
else:
# TODO Add a test for this
table_names = list(sorted([x['temp_table_name'] for x in qcatalog_entries]))
raise TooManyTablesInQsqlException(self.qsql_filename,table_names)
finally:
db.done()
def validate_table_name(self):
db = Sqlite3DB('temp_db', 'file:%s?immutable=1' % self.qsql_filename, self.qsql_filename,
create_qcatalog=False)
assert db.qcatalog_table_exists()
try:
entry = db.get_from_qcatalog_using_table_name(self.table_name)
if entry is None:
qcatalog_entries = db.get_all_from_qcatalog()
table_names = list(sorted([x['temp_table_name'] for x in qcatalog_entries]))
raise NonExistentTableNameInQsql(self.qsql_filename,self.table_name,table_names)
finally:
db.done()
def finalize(self):
super(MaterializedQsqlState, self).finalize()
def get_materialized_state_type(self):
return MaterializedStateType.QSQL_FILE
def _generate_qsql_only_db_name__temp(self, filenames_str):
return 'e_%s_fn_%s' % (self.engine_id,hashlib.sha1(six.b(filenames_str)).hexdigest())
def choose_db_to_use(self,forced_db_to_use=None,stop_after_analysis=False):
self.source = self.qsql_filename
self.source_type = self.table_source_type
self.db_id = '%s' % self._generate_qsql_only_db_name__temp(self.qtable_name)
x = 'file:%s?immutable=1' % self.qsql_filename
self.db_to_use = Sqlite3DB(self.db_id, x, self.qsql_filename,create_qcatalog=False)
if forced_db_to_use:
xprint("Forced qsql to use forced_db: %s" % forced_db_to_use)
# TODO RLRL Move query to Sqlite3DB
all_table_names = [(x[0],x[1]) for x in self.db_to_use.execute_and_fetch("select content_signature_key,temp_table_name from %s" % self.db_to_use.QCATALOG_TABLE_NAME).results]
csk,t = list(filter(lambda x: x[1] == self.table_name,all_table_names))[0]
xprint("Copying table %s from db_id %s" % (t,self.db_id))
d = self.db_to_use.get_from_qcatalog_using_table_name(t)
new_table_name = forced_db_to_use.attach_and_copy_table(self.db_to_use,self.table_name,stop_after_analysis)
xprint("CS",d['content_signature'])
cs = OrderedDict(json.loads(d['content_signature']))
forced_db_to_use.add_to_qcatalog_table(new_table_name, cs, d['creation_time'],
d['source_type'], d['source'])
self.table_name = new_table_name
self.db_id = forced_db_to_use.db_id
self.db_to_use = forced_db_to_use
return
def make_data_available(self,stop_after_analysis):
xprint("db %s (%s) has been added to the database list" % (self.db_id, self.db_to_use))
database_info,relevant_table = self._read_table_from_cache(stop_after_analysis)
column_names, column_types, sqlite_column_types = self._extract_information()
self.mfs_structure = MaterializedStateTableStructure(self.qtable_name, [self.qtable_name], self.db_id,
column_names, column_types, sqlite_column_types,
self.table_name,
self.source_type,self.source,
self.get_planned_table_name())
return database_info, relevant_table
def _extract_information(self):
assert self.db_to_use.qcatalog_table_exists()
table_info = self.db_to_use.get_sqlite_table_info(self.table_name)
xprint('table_name=%s Table info is %s' % (self.table_name,table_info))
x = self.db_to_use.get_from_qcatalog_using_table_name(self.table_name)
column_names = list(map(lambda x: x[1], table_info))
sqlite_column_types = list(map(lambda x: x[2].lower(),table_info))
column_types = list(map(lambda x: sqlite_type_to_python_type(x[2]), table_info))
self.content_signature = OrderedDict(
**json.loads(x['content_signature']))
xprint('Inferred column names and types from qsql: %s' % list(zip(column_names, zip(sqlite_column_types,column_types))))
return column_names, column_types, sqlite_column_types
def _backing_original_file_exists(self):
return '%s.qsql' % self.qtable_name == self.qsql_filename
def _read_table_from_cache(self, stop_after_analysis):
if self._backing_original_file_exists():
xprint("Found a matching source file for qsql file with qtable name %s. Checking content signature by creating a temp MFDS + analysis" % self.qtable_name)
mdfs = MaterializedDelimitedFileState(TableSourceType.DELIMITED_FILE,self.qtable_name,self.input_params,self.dialect_id,self.engine_id,target_table_name=None)
mdfs.initialize()
mdfs.choose_db_to_use(forced_db_to_use=None,stop_after_analysis=stop_after_analysis)
_,_ = mdfs.make_data_available(stop_after_analysis=True)
original_file_content_signature = mdfs.content_signature
original_file_content_signature_key = self.db_to_use.calculate_content_signature_key(original_file_content_signature)
qcatalog_entry = self.db_to_use.get_from_qcatalog_using_table_name(self.table_name)
if qcatalog_entry is None:
raise Exception('missing content signature!')
xprint("Actual Signature Key: %s Expected Signature Key: %s" % (qcatalog_entry['content_signature_key'],original_file_content_signature_key))
actual_content_signature = json.loads(qcatalog_entry['content_signature'])
xprint("Validating content signatures: original %s vs qsql %s" % (original_file_content_signature,actual_content_signature))
validate_content_signature(self.qtable_name, original_file_content_signature, self.qsql_filename, actual_content_signature,dump=True)
mdfs.finalize()
return DatabaseInfo(self.db_id,self.db_to_use, needs_closing=True), self.table_name
class MaterializedStateTableStructure(object):
def __init__(self,qtable_name, atomic_fns, db_id, column_names, python_column_types, sqlite_column_types, table_name_for_querying,source_type,source,planned_table_name):
self.qtable_name = qtable_name
self.atomic_fns = atomic_fns
self.db_id = db_id
self.column_names = column_names
self.python_column_types = python_column_types
self.table_name_for_querying = table_name_for_querying
self.source_type = source_type
self.source = source
self.planned_table_name = planned_table_name
if sqlite_column_types is not None:
self.sqlite_column_types = sqlite_column_types
else:
self.sqlite_column_types = [Sqlite3DB.PYTHON_TO_SQLITE_TYPE_NAMES[t].lower() for t in python_column_types]
def get_table_name_for_querying(self):
return self.table_name_for_querying
def __str__(self):
return "MaterializedStateTableStructure<%s>" % self.__dict__
__repr__ = __str__
class TableCreator(object):
def __str__(self):
return "TableCreator<%s>" % str(self)
__repr__ = __str__
def __init__(self, qtable_name, delimited_file_reader,input_params,sqlite_db=None,target_sqlite_table_name=None):
self.qtable_name = qtable_name
self.delimited_file_reader = delimited_file_reader
self.db_id = sqlite_db.db_id
self.sqlite_db = sqlite_db
self.target_sqlite_table_name = target_sqlite_table_name
self.skip_header = input_params.skip_header
self.gzipped = input_params.gzipped_input
self.table_created = False
self.encoding = input_params.input_encoding
self.mode = input_params.parsing_mode
self.expected_column_count = input_params.expected_column_count
self.input_delimiter = input_params.delimiter
self.with_universal_newlines = input_params.with_universal_newlines
self.column_inferer = TableColumnInferer(input_params)
self.pre_creation_rows = []
self.buffered_inserts = []
self.effective_column_names = None
# Column type indices for columns that contain numeric types. Lazily initialized
# so column inferer can do its work before this information is needed
self.numeric_column_indices = None
self.state = TableCreatorState.INITIALIZED
self.content_signature = None
def _generate_content_signature(self):
if self.state != TableCreatorState.ANALYZED:
# TODO Change to assertion
raise Exception('Bug - Wrong state %s. Table needs to be analyzed before a content signature can be calculated' % self.state)
size = self.delimited_file_reader.get_size_hash()
last_modification_time = self.delimited_file_reader.get_last_modification_time_hash()
m = OrderedDict({
"_signature_version": "v1",
"skip_header": self.skip_header,
"gzipped": self.gzipped,
"with_universal_newlines": self.with_universal_newlines,
"encoding": self.encoding,
"mode": self.mode,
"expected_column_count": self.expected_column_count,
"input_delimiter": self.input_delimiter,
"inferer": self.column_inferer._generate_content_signature(),
"original_file_size": size,
"last_modification_time": last_modification_time
})
return m
def validate_extra_header_if_needed(self, file_number, filename,col_vals):
xprint("HHX validate",file_number,filename,col_vals)
if not self.skip_header:
xprint("No need to validate header")
return False
if file_number == 0:
xprint("First file, no need to validate extra header")
return False
header_already_exists = self.column_inferer.header_row is not None
if header_already_exists:
xprint("Validating extra header")
if tuple(self.column_inferer.header_row) != tuple(col_vals):
raise BadHeaderException("Extra header '{}' in file '{}' mismatches original header '{}' from file '{}'. Table name is '{}'".format(
",".join(col_vals),filename,
",".join(self.column_inferer.header_row),
self.column_inferer.header_row_filename,
self.qtable_name))
xprint("header already exists: %s" % self.column_inferer.header_row)
else:
xprint("Header doesn't already exist")
return header_already_exists
def _populate(self,dialect,stop_after_analysis=False):
total_data_lines_read = 0
try:
try:
for file_name,file_number,is_first_line,col_vals in self.delimited_file_reader.generate_rows():
if is_first_line:
if self.validate_extra_header_if_needed(file_number,file_name,col_vals):
continue
self._insert_row(file_name, col_vals)
if stop_after_analysis:
if self.column_inferer.inferred:
xprint("Stopping after analysis")
return
if self.delimited_file_reader.get_lines_read() == 0 and self.skip_header:
raise MissingHeaderException("Header line is expected but missing in file %s" % ",".join(self.delimited_file_reader.atomic_fns))
total_data_lines_read += self.delimited_file_reader.lines_read - (1 if self.skip_header else 0)
xprint("Total Data lines read %s" % total_data_lines_read)
except StrictModeColumnCountMismatchException as e:
raise ColumnCountMismatchException(
'Strict mode - Expected %s columns instead of %s columns in file %s row %s. Either use relaxed modes or check your delimiter' % (
e.expected_col_count, e.actual_col_count, normalized_filename(e.atomic_fn), e.lines_read))
except FluffyModeColumnCountMismatchException as e:
raise ColumnCountMismatchException(
'Deprecated fluffy mode - Too many columns in file %s row %s (%s fields instead of %s fields). Consider moving to either relaxed or strict mode' % (
normalized_filename(e.atomic_fn), e.lines_read, e.actual_col_count, e.expected_col_count))
finally:
self._flush_inserts()
if not self.table_created:
self.column_inferer.force_analysis()
self._do_create_table(self.qtable_name)
self.sqlite_db.conn.commit()
def perform_analyze(self, dialect):
xprint("Analyzing... %s" % dialect)
if self.state == TableCreatorState.INITIALIZED:
self._populate(dialect,stop_after_analysis=True)
self.state = TableCreatorState.ANALYZED
self.content_signature = self._generate_content_signature()
content_signature_key = self.sqlite_db.calculate_content_signature_key(self.content_signature)
xprint("Setting content signature after analysis: %s" % content_signature_key)
else:
# TODO Convert to assertion
raise Exception('Bug - Wrong state %s' % self.state)
def perform_read_fully(self, dialect):
if self.state == TableCreatorState.ANALYZED:
self._populate(dialect,stop_after_analysis=False)
self.state = TableCreatorState.FULLY_READ
else:
# TODO Convert to assertion
raise Exception('Bug - Wrong state %s' % self.state)
def _flush_pre_creation_rows(self, filename):
for i, col_vals in enumerate(self.pre_creation_rows):
if self.skip_header and i == 0:
# skip header line
continue
self._insert_row(filename, col_vals)
self._flush_inserts()
self.pre_creation_rows = []
def _insert_row(self, filename, col_vals):
# If table has not been created yet
if not self.table_created:
# Try to create it along with another "example" line of data
self.try_to_create_table(filename, col_vals)
# If the table is still not created, then we don't have enough data, just
# store the data and return
if not self.table_created:
self.pre_creation_rows.append(col_vals)
return
# The table already exists, so we can just add a new row
self._insert_row_i(col_vals)
def initialize_numeric_column_indices_if_needed(self):
# Lazy initialization of numeric column indices
if self.numeric_column_indices is None:
column_types = self.column_inferer.get_column_types()
self.numeric_column_indices = [idx for idx, column_type in enumerate(
column_types) if self.sqlite_db.is_numeric_type(column_type)]
def nullify_values_if_needed(self, col_vals):
new_vals = col_vals[:]
col_count = len(col_vals)
for i in self.numeric_column_indices:
if i >= col_count:
continue
v = col_vals[i]
if v == '':
new_vals[i] = None
return new_vals
def normalize_col_vals(self, col_vals):
# Make sure that numeric column indices are initializd
self.initialize_numeric_column_indices_if_needed()
col_vals = self.nullify_values_if_needed(col_vals)
expected_col_count = self.column_inferer.get_column_count()
actual_col_count = len(col_vals)
if self.mode == 'strict':
if actual_col_count != expected_col_count:
raise StrictModeColumnCountMismatchException(",".join(self.delimited_file_reader.atomic_fns), expected_col_count,actual_col_count,self.delimited_file_reader.get_lines_read())
return col_vals
# in all non strict mode, we add dummy data to missing columns
if actual_col_count < expected_col_count:
col_vals = col_vals + \
[None for x in range(expected_col_count - actual_col_count)]
# in relaxed mode, we merge all extra columns to the last column value
if self.mode == 'relaxed':
if actual_col_count > expected_col_count:
xxx = col_vals[:expected_col_count - 1] + \
[self.input_delimiter.join([v if v is not None else '' for v in
col_vals[expected_col_count - 1:]])]
return xxx
else:
return col_vals
assert False, "Unidentified parsing mode %s" % self.mode
def _insert_row_i(self, col_vals):
col_vals = self.normalize_col_vals(col_vals)
if self.effective_column_names is None:
self.effective_column_names = self.column_inferer.column_names[:len(col_vals)]
if len(self.effective_column_names) > 0:
self.buffered_inserts.append(col_vals)
else:
self.buffered_inserts.append([""])
if len(self.buffered_inserts) < 5000:
return
self._flush_inserts()
def _flush_inserts(self):
# If the table is still not created, then we don't have enough data
if not self.table_created:
return
if len(self.buffered_inserts) > 0:
insert_row_stmt = self.sqlite_db.generate_insert_row(
self.target_sqlite_table_name, self.effective_column_names)
self.sqlite_db.update_many(insert_row_stmt, self.buffered_inserts)
self.buffered_inserts = []
def try_to_create_table(self, filename, col_vals):
if self.table_created:
# TODO Convert to assertion
raise Exception('Table is already created')
# Add that line to the column inferer
result = self.column_inferer.analyze(filename, col_vals)
# If inferer succeeded,
if result:
self._do_create_table(filename)
else:
pass # We don't have enough information for creating the table yet
def _do_create_table(self,filename):
# Get the column definition dict from the inferer
column_dict = self.column_inferer.get_column_dict()
# Guard against empty tables (instead of preventing the creation, just create with a dummy column)
if len(column_dict) == 0:
column_dict = { 'dummy_column_for_empty_tables' : str }
ordered_column_names = [ 'dummy_column_for_empty_tables' ]
else:
ordered_column_names = self.column_inferer.get_column_names()
# Create the CREATE TABLE statement
create_table_stmt = self.sqlite_db.generate_create_table(
self.target_sqlite_table_name, ordered_column_names, column_dict)
# And create the table itself
self.sqlite_db.execute_and_fetch(create_table_stmt)
# Mark the table as created
self.table_created = True
self._flush_pre_creation_rows(filename)
def determine_max_col_lengths(m,output_field_quoting_func,output_delimiter):
if len(m) == 0:
return []
max_lengths = [0 for x in range(0, len(m[0]))]
for row_index in range(0, len(m)):
for col_index in range(0, len(m[0])):
# TODO Optimize this
new_len = len("{}".format(output_field_quoting_func(output_delimiter,m[row_index][col_index])))
if new_len > max_lengths[col_index]:
max_lengths[col_index] = new_len
return max_lengths
def print_credentials():
print("q version %s" % q_version, file=sys.stderr)
print("Python: %s" % " // ".join([str(x).strip() for x in sys.version.split("\n")]), file=sys.stderr)
print("Copyright (C) 2012-2021 Harel Ben-Attia (harelba@gmail.com, @harelba on twitter)", file=sys.stderr)
print("https://harelba.github.io/q/", file=sys.stderr)
print(file=sys.stderr)
class QWarning(object):
def __init__(self,exception,msg):
self.exception = exception
self.msg = msg
class QError(object):
def __init__(self,exception,msg,errorcode):
self.exception = exception
self.msg = msg
self.errorcode = errorcode
self.traceback = traceback.format_exc()
def __str__(self):
return "QError<errorcode=%s,msg=%s,exception=%s,traceback=%s>" % (self.errorcode,self.msg,self.exception,str(self.traceback))
__repr__ = __str__
class QMetadata(object):
def __init__(self,table_structures={},new_table_structures={},output_column_name_list=None):
self.table_structures = table_structures
self.new_table_structures = new_table_structures
self.output_column_name_list = output_column_name_list
def __str__(self):
return "QMetadata<%s" % (self.__dict__)
__repr__ = __str__
class QOutput(object):
def __init__(self,data=None,metadata=None,warnings=[],error=None):
self.data = data
self.metadata = metadata
self.warnings = warnings
self.error = error
if error is None:
self.status = 'ok'
else:
self.status = 'error'
def __str__(self):
s = []
s.append('status=%s' % self.status)
if self.error is not None:
s.append("error=%s" % self.error.msg)
if len(self.warnings) > 0:
s.append("warning_count=%s" % len(self.warnings))
if self.data is not None:
s.append("row_count=%s" % len(self.data))
else:
s.append("row_count=None")
if self.metadata is not None:
s.append("metadata=<%s>" % self.metadata)
else:
s.append("metadata=None")
return "QOutput<%s>" % ",".join(s)
__repr__ = __str__
class QInputParams(object):
def __init__(self,skip_header=False,
delimiter=' ',input_encoding='UTF-8',gzipped_input=False,with_universal_newlines=False,parsing_mode='relaxed',
expected_column_count=None,keep_leading_whitespace_in_values=False,
disable_double_double_quoting=False,disable_escaped_double_quoting=False,
disable_column_type_detection=False,
input_quoting_mode='minimal',stdin_file=None,stdin_filename='-',
max_column_length_limit=131072,
read_caching=False,
write_caching=False,
max_attached_sqlite_databases = 10):
self.skip_header = skip_header
self.delimiter = delimiter
self.input_encoding = input_encoding
self.gzipped_input = gzipped_input
self.with_universal_newlines = with_universal_newlines
self.parsing_mode = parsing_mode
self.expected_column_count = expected_column_count
self.keep_leading_whitespace_in_values = keep_leading_whitespace_in_values
self.disable_double_double_quoting = disable_double_double_quoting
self.disable_escaped_double_quoting = disable_escaped_double_quoting
self.input_quoting_mode = input_quoting_mode
self.disable_column_type_detection = disable_column_type_detection
self.max_column_length_limit = max_column_length_limit
self.read_caching = read_caching
self.write_caching = write_caching
self.max_attached_sqlite_databases = max_attached_sqlite_databases
def merged_with(self,input_params):
params = QInputParams(**self.__dict__)
if input_params is not None:
params.__dict__.update(**input_params.__dict__)
return params
def __str__(self):
return "QInputParams<%s>" % str(self.__dict__)
def __repr__(self):
return "QInputParams(...)"
class DataStream(object):
# TODO Can stream-id be removed?
def __init__(self,stream_id,filename,stream):
self.stream_id = stream_id
self.filename = filename
self.stream = stream
def __str__(self):
return "QDataStream<stream_id=%s,filename=%s,stream=%s>" % (self.stream_id,self.filename,self.stream)
__repr__ = __str__
class DataStreams(object):
def __init__(self, data_streams_dict):
assert type(data_streams_dict) == dict
self.validate(data_streams_dict)
self.data_streams_dict = data_streams_dict
def validate(self,d):
for k in d:
v = d[k]
if type(k) != str or type(v) != DataStream:
raise Exception('Bug - Invalid dict: %s' % str(d))
def get_for_filename(self, filename):
xprint("Data streams dict is %s. Trying to find %s" % (self.data_streams_dict,filename))
x = self.data_streams_dict.get(filename)
return x
def is_data_stream(self,filename):
return filename in self.data_streams_dict
class DatabaseInfo(object):
def __init__(self,db_id,sqlite_db,needs_closing):
self.db_id = db_id
self.sqlite_db = sqlite_db
self.needs_closing = needs_closing
def __str__(self):
return "DatabaseInfo<sqlite_db=%s,needs_closing=%s>" % (self.sqlite_db,self.needs_closing)
__repr__ = __str__
class QTextAsData(object):
def __init__(self,default_input_params=QInputParams(),data_streams_dict=None):
self.engine_id = str(uuid.uuid4()).replace("-","_")
self.default_input_params = default_input_params
xprint("Default input params: %s" % self.default_input_params)
self.loaded_table_structures_dict = OrderedDict()
self.databases = OrderedDict()
if data_streams_dict is not None:
self.data_streams = DataStreams(data_streams_dict)
else:
self.data_streams = DataStreams({})
# Create DB object
self.query_level_db_id = 'query_e_%s' % self.engine_id
self.query_level_db = Sqlite3DB(self.query_level_db_id,
'file:%s?mode=memory&cache=shared' % self.query_level_db_id,'<query-level-db>',create_qcatalog=True)
self.adhoc_db_id = 'adhoc_e_%s' % self.engine_id
self.adhoc_db_name = 'file:%s?mode=memory&cache=shared' % self.adhoc_db_id
self.adhoc_db = Sqlite3DB(self.adhoc_db_id,self.adhoc_db_name,'<adhoc-db>',create_qcatalog=True)
self.query_level_db.conn.execute("attach '%s' as %s" % (self.adhoc_db_name,self.adhoc_db_id))
self.add_db_to_database_list(DatabaseInfo(self.query_level_db_id,self.query_level_db,needs_closing=True))
self.add_db_to_database_list(DatabaseInfo(self.adhoc_db_id,self.adhoc_db,needs_closing=True))
def done(self):
xprint("Inside done: Database list is %s" % self.databases)
for db_id in reversed(self.databases.keys()):
database_info = self.databases[db_id]
if database_info.needs_closing:
xprint("Gonna close database %s - %s" % (db_id,self.databases[db_id]))
self.databases[db_id].sqlite_db.done()
xprint("Database %s has been closed" % db_id)
else:
xprint("No need to close database %s" % db_id)
xprint("Closed all databases")
input_quoting_modes = { 'minimal' : csv.QUOTE_MINIMAL,
'all' : csv.QUOTE_ALL,
# nonnumeric is not supported for input quoting modes, since we determine the data types
# ourselves instead of letting the csv module try to identify the types
'none' : csv.QUOTE_NONE }
def determine_proper_dialect(self,input_params):
input_quoting_mode_csv_numeral = QTextAsData.input_quoting_modes[input_params.input_quoting_mode]
if input_params.keep_leading_whitespace_in_values:
skip_initial_space = False
else:
skip_initial_space = True
dialect = {'skipinitialspace': skip_initial_space,
'delimiter': input_params.delimiter, 'quotechar': '"' }
dialect['quoting'] = input_quoting_mode_csv_numeral
dialect['doublequote'] = input_params.disable_double_double_quoting
if input_params.disable_escaped_double_quoting:
dialect['escapechar'] = '\\'
return dialect
def get_dialect_id(self,filename):
return 'q_dialect_%s' % filename
def _open_files_and_get_mfss(self,qtable_name,input_params,dialect):
materialized_file_dict = OrderedDict()
materialized_state_type,table_source_type,source_info = detect_qtable_name_source_info(qtable_name,self.data_streams,read_caching_enabled=input_params.read_caching)
xprint("Detected source type %s source info %s" % (materialized_state_type,source_info))
if materialized_state_type == MaterializedStateType.DATA_STREAM:
(data_stream,) = source_info
ms = MaterialiedDataStreamState(table_source_type,qtable_name,input_params,dialect,self.engine_id,data_stream,stream_target_db=self.adhoc_db)
effective_qtable_name = data_stream.stream_id
elif materialized_state_type == MaterializedStateType.QSQL_FILE:
(qsql_filename,table_name) = source_info
ms = MaterializedQsqlState(table_source_type,qtable_name, qsql_filename=qsql_filename, table_name=table_name,
engine_id=self.engine_id, input_params=input_params, dialect_id=dialect)
effective_qtable_name = '%s:::%s' % (qsql_filename, table_name)
elif materialized_state_type == MaterializedStateType.SQLITE_FILE:
(sqlite_filename,table_name) = source_info
ms = MaterializedSqliteState(table_source_type,qtable_name, sqlite_filename=sqlite_filename, table_name=table_name,
engine_id=self.engine_id)
effective_qtable_name = '%s:::%s' % (sqlite_filename, table_name)
elif materialized_state_type == MaterializedStateType.DELIMITED_FILE:
(source_qtable_name,_) = source_info
ms = MaterializedDelimitedFileState(table_source_type,source_qtable_name, input_params, dialect, self.engine_id)
effective_qtable_name = source_qtable_name
else:
assert False, "Unknown file type for qtable %s should have exited with an exception" % (qtable_name)
assert effective_qtable_name not in materialized_file_dict
materialized_file_dict[effective_qtable_name] = ms
xprint("MS dict: %s" % str(materialized_file_dict))
return list([item for item in materialized_file_dict.values()])
def _load_mfs(self,mfs,input_params,dialect_id,stop_after_analysis):
xprint("Loading MFS:", mfs)
materialized_state_type = mfs.get_materialized_state_type()
xprint("Detected materialized state type for %s: %s" % (mfs.qtable_name,materialized_state_type))
mfs.initialize()
if not materialized_state_type in [MaterializedStateType.DATA_STREAM]:
if stop_after_analysis or self.should_copy_instead_of_attach(input_params):
xprint("Should copy instead of attaching. Forcing db to use to adhoc db")
forced_db_to_use = self.adhoc_db
else:
forced_db_to_use = None
else:
forced_db_to_use = None
mfs.choose_db_to_use(forced_db_to_use,stop_after_analysis)
xprint("Chosen db to use: source %s source_type %s db_id %s db_to_use %s" % (mfs.source,mfs.source_type,mfs.db_id,mfs.db_to_use))
database_info,relevant_table = mfs.make_data_available(stop_after_analysis)
if not self.is_adhoc_db(mfs.db_to_use) and not self.should_copy_instead_of_attach(input_params):
if not self.already_attached_to_query_level_db(mfs.db_to_use):
self.attach_to_db(mfs.db_to_use, self.query_level_db)
self.add_db_to_database_list(database_info)
else:
xprint("DB %s is already attached to query level db. No need to attach it again.")
mfs.finalize()
xprint("MFS Loaded")
return mfs.source,mfs.source_type
def add_db_to_database_list(self,database_info):
db_id = database_info.db_id
assert db_id is not None
assert database_info.sqlite_db is not None
if db_id in self.databases:
# TODO Convert to assertion
if id(database_info.sqlite_db) != id(self.databases[db_id].sqlite_db):
raise Exception('Bug - database already in database list: db_id %s: old %s new %s' % (db_id,self.databases[db_id],database_info))
else:
return
self.databases[db_id] = database_info
def is_adhoc_db(self,db_to_use):
return db_to_use.db_id == self.adhoc_db_id
def should_copy_instead_of_attach(self,input_params):
attached_database_count = len(self.query_level_db.get_sqlite_database_list())
x = attached_database_count >= input_params.max_attached_sqlite_databases
xprint("should_copy_instead_of_attach: attached_database_count=%s should_copy=%s" % (attached_database_count,x))
return x
def _load_data(self,qtable_name,input_params=QInputParams(),stop_after_analysis=False):
xprint("Attempting to load data for materialized file names %s" % qtable_name)
q_dialect = self.determine_proper_dialect(input_params)
xprint("Dialect is %s" % q_dialect)
dialect_id = self.get_dialect_id(qtable_name)
csv.register_dialect(dialect_id, **q_dialect)
xprint("qtable metadata for loading is %s" % qtable_name)
mfss = self._open_files_and_get_mfss(qtable_name,
input_params,
dialect_id)
assert len(mfss) == 1, "one MS now encapsulated an entire table"
mfs = mfss[0]
xprint("MFS to load: %s" % mfs)
if qtable_name in self.loaded_table_structures_dict.keys():
xprint("Atomic filename %s found. no need to load" % qtable_name)
return None
xprint("qtable %s not found - loading" % qtable_name)
self._load_mfs(mfs, input_params, dialect_id, stop_after_analysis)
xprint("Loaded: source-type %s source %s mfs_structure %s" % (mfs.source_type, mfs.source, mfs.mfs_structure))
assert qtable_name not in self.loaded_table_structures_dict, "loaded_table_structures_dict has been changed to have a non-list value"
self.loaded_table_structures_dict[qtable_name] = mfs.mfs_structure
return mfs.mfs_structure
def already_attached_to_query_level_db(self,db_to_attach):
attached_dbs = list(map(lambda x:x[1],self.query_level_db.get_sqlite_database_list()))
return db_to_attach.db_id in attached_dbs
def attach_to_db(self, target_db, source_db):
q = "attach '%s' as %s" % (target_db.sqlite_db_url,target_db.db_id)
xprint("Attach query: %s" % q)
try:
c = source_db.execute_and_fetch(q)
except SqliteOperationalErrorException as e:
if 'too many attached databases' in str(e):
raise TooManyAttachedDatabasesException('There are too many attached databases. Use a proper --max-attached-sqlite-databases parameter which is below the maximum. Original error: %s' % str(e))
except Exception as e1:
raise
def detach_from_db(self, target_db, source_db):
q = "detach %s" % (target_db.db_id)
xprint("Detach query: %s" % q)
try:
c = source_db.execute_and_fetch(q)
except Exception as e1:
raise
def load_data(self,filename,input_params=QInputParams(),stop_after_analysis=False):
return self._load_data(filename,input_params,stop_after_analysis=stop_after_analysis)
def _ensure_data_is_loaded_for_sql(self,sql_object,input_params,data_streams=None,stop_after_analysis=False):
xprint("Ensuring Data load")
new_table_structures = OrderedDict()
# For each "table name"
for qtable_name in sql_object.qtable_names:
tss = self._load_data(qtable_name,input_params,stop_after_analysis=stop_after_analysis)
if tss is not None:
xprint("New Table Structures:",new_table_structures)
assert qtable_name not in new_table_structures, "new_table_structures was changed not to contain a list as a value"
new_table_structures[qtable_name] = tss
return new_table_structures
def materialize_query_level_db(self,save_db_to_disk_filename,sql_object):
# TODO More robust creation - Create the file in a separate folder and move it to the target location only after success
materialized_db = Sqlite3DB("materialized","file:%s" % save_db_to_disk_filename,save_db_to_disk_filename,create_qcatalog=False)
table_name_mapping = OrderedDict()
# For each table in the query
effective_table_names = sql_object.get_qtable_name_effective_table_names()
for i, qtable_name in enumerate(effective_table_names):
# table name, in the format db_id.table_name
effective_table_name_for_qtable_name = effective_table_names[qtable_name]
source_db_id, actual_table_name_in_db = effective_table_name_for_qtable_name.split(".", 1)
# The DatabaseInfo instance for this db
source_database = self.databases[source_db_id]
if source_db_id != self.query_level_db_id:
self.attach_to_db(source_database.sqlite_db,materialized_db)
ts = self.loaded_table_structures_dict[qtable_name]
proposed_new_table_name = ts.planned_table_name
xprint("Proposed table name is %s" % proposed_new_table_name)
new_table_name = materialized_db.find_new_table_name(proposed_new_table_name)
xprint("Materializing",source_db_id,actual_table_name_in_db,"as",new_table_name)
# Copy the table into the materialized database
xx = materialized_db.execute_and_fetch('CREATE TABLE %s AS SELECT * FROM %s' % (new_table_name,effective_table_name_for_qtable_name))
table_name_mapping[effective_table_name_for_qtable_name] = new_table_name
# TODO RLRL Preparation for writing materialized database as a qsql file
# if source_database.sqlite_db.qcatalog_table_exists():
# qcatalog_entry = source_database.sqlite_db.get_from_qcatalog_using_table_name(actual_table_name_in_db)
# # TODO RLRL Encapsulate dictionary transform inside qcatalog access methods
# materialized_db.add_to_qcatalog_table(new_table_name,OrderedDict(json.loads(qcatalog_entry['content_signature'])),
# qcatalog_entry['creation_time'],
# qcatalog_entry['source_type'],
# qcatalog_entry['source_type'])
# xprint("PQX Added to qcatalog",source_db_id,actual_table_name_in_db,'as',new_table_name)
# else:
# xprint("PQX Skipped adding to qcatalog",source_db_id,actual_table_name_in_db)
if source_db_id != self.query_level_db:
self.detach_from_db(source_database.sqlite_db,materialized_db)
return table_name_mapping
def validate_query(self,sql_object,table_structures):
for qtable_name in sql_object.qtable_names:
relevant_table_structures = [table_structures[qtable_name]]
column_names = None
column_types = None
for ts in relevant_table_structures:
names = ts.column_names
types = ts.python_column_types
xprint("Comparing column names: %s with %s" % (column_names,names))
if column_names is None:
column_names = names
else:
if column_names != names:
raise BadHeaderException("Column names differ for table %s: %s vs %s" % (
qtable_name, ",".join(column_names), ",".join(names)))
xprint("Comparing column types: %s with %s" % (column_types,types))
if column_types is None:
column_types = types
else:
if column_types != types:
raise BadHeaderException("Column types differ for table %s: %s vs %s" % (
qtable_name, ",".join(column_types), ",".join(types)))
xprint("All column names match for qtable name %s: column names: %s column types: %s" % (ts.qtable_name,column_names,column_types))
xprint("Query validated")
def _execute(self,query_str,input_params=None,data_streams=None,stop_after_analysis=False,save_db_to_disk_filename=None):
warnings = []
error = None
table_structures = []
db_results_obj = None
effective_input_params = self.default_input_params.merged_with(input_params)
if type(query_str) != unicode:
try:
# Heuristic attempt to auto convert the query to unicode before failing
query_str = query_str.decode('utf-8')
except:
error = QError(EncodedQueryException(''),"Query should be in unicode. Please make sure to provide a unicode literal string or decode it using proper the character encoding.",91)
return QOutput(error = error)
try:
# Create SQL statement
sql_object = Sql('%s' % query_str, self.data_streams)
load_start_time = time.time()
iprint("Going to ensure data is loaded. Currently loaded tables: %s" % str(self.loaded_table_structures_dict))
new_table_structures = self._ensure_data_is_loaded_for_sql(sql_object,effective_input_params,data_streams,stop_after_analysis=stop_after_analysis)
iprint("Ensured data is loaded. loaded tables: %s" % self.loaded_table_structures_dict)
self.validate_query(sql_object,self.loaded_table_structures_dict)
iprint("Query validated")
sql_object.materialize_using(self.loaded_table_structures_dict)
iprint("Materialized sql object")
if save_db_to_disk_filename is not None:
xprint("Saving query data to disk")
dump_start_time = time.time()
table_name_mapping = self.materialize_query_level_db(save_db_to_disk_filename,sql_object)
print("Data has been saved into %s . Saving has taken %4.3f seconds" % (save_db_to_disk_filename,time.time()-dump_start_time), file=sys.stderr)
effective_sql = sql_object.get_effective_sql(table_name_mapping)
print("Query to run on the database: %s;" % effective_sql, file=sys.stderr)
command_line = 'echo "%s" | sqlite3 %s' % (effective_sql,save_db_to_disk_filename)
print("You can run the query directly from the command line using the following command: %s" % command_line, file=sys.stderr)
# TODO Propagate dump results using a different output class instead of an empty one
return QOutput()
# Ensure that adhoc db is not in the middle of a transaction
self.adhoc_db.conn.commit()
all_databases = self.query_level_db.get_sqlite_database_list()
xprint("Query level db: databases %s" % all_databases)
# Execute the query and fetch the data
db_results_obj = sql_object.execute_and_fetch(self.query_level_db)
iprint("Query executed")
if len(db_results_obj.results) == 0:
warnings.append(QWarning(None, "Warning - data is empty"))
return QOutput(
data = db_results_obj.results,
metadata = QMetadata(
table_structures=self.loaded_table_structures_dict,
new_table_structures=new_table_structures,
output_column_name_list=db_results_obj.query_column_names),
warnings = warnings,
error = error)
except InvalidQueryException as e:
error = QError(e,str(e),118)
except MissingHeaderException as e:
error = QError(e,e.msg,117)
except FileNotFoundException as e:
error = QError(e,e.msg,30)
except SqliteOperationalErrorException as e:
xprint("Sqlite Operational error: %s" % e)
msg = str(e.original_error)
error = QError(e,"query error: %s" % msg,1)
if "no such column" in msg and effective_input_params.skip_header:
warnings.append(QWarning(e,'Warning - There seems to be a "no such column" error, and -H (header line) exists. Please make sure that you are using the column names from the header line and not the default (cXX) column names. Another issue might be that the file contains a BOM. Files that are encoded with UTF8 and contain a BOM can be read by specifying `-e utf-9-sig` in the command line. Support for non-UTF8 encoding will be provided in the future.'))
except ColumnCountMismatchException as e:
error = QError(e,e.msg,2)
except (UnicodeDecodeError, UnicodeError) as e:
error = QError(e,"Cannot decode data. Try to change the encoding by setting it using the -e parameter. Error:%s" % e,3)
except BadHeaderException as e:
error = QError(e,"Bad header row: %s" % e.msg,35)
except CannotUnzipDataStreamException as e:
error = QError(e,"Cannot decompress standard input. Pipe the input through zcat in order to decompress.",36)
except UniversalNewlinesExistException as e:
error = QError(e,"Data contains universal newlines. Run q with -U to use universal newlines. Please note that q still doesn't support universal newlines for .gz files or for stdin. Route the data through a regular file to use -U.",103)
# deprecated, but shouldn't be used: error = QError(e,"Standard Input must be provided in order to use it as a table",61)
except CouldNotConvertStringToNumericValueException as e:
error = QError(e,"Could not convert string to a numeric value. Did you use `-w nonnumeric` with unquoted string values? Error: %s" % e.msg,58)
except CouldNotParseInputException as e:
error = QError(e,"Could not parse the input. Please make sure to set the proper -w input-wrapping parameter for your input, and that you use the proper input encoding (-e). Error: %s" % e.msg,59)
except ColumnMaxLengthLimitExceededException as e:
error = QError(e,e.msg,31)
# deprecated, but shouldn't be used: error = QError(e,e.msg,79)
except ContentSignatureDiffersException as e:
error = QError(e,"%s vs %s: Content Signatures for table %s differ at %s (source value '%s' disk signature value '%s')" %
(e.original_filename,e.other_filename,e.filenames_str,e.key,e.source_value,e.signature_value),80)
except ContentSignatureDataDiffersException as e:
error = QError(e,e.msg,81)
except MaximumSourceFilesExceededException as e:
error = QError(e,e.msg,82)
except ContentSignatureNotFoundException as e:
error = QError(e,e.msg,83)
except NonExistentTableNameInQsql as e:
msg = "Table %s could not be found in qsql file %s . Existing table names: %s" % (e.table_name,e.qsql_filename,",".join(e.existing_table_names))
error = QError(e,msg,84)
except NonExistentTableNameInSqlite as e:
msg = "Table %s could not be found in sqlite file %s . Existing table names: %s" % (e.table_name,e.qsql_filename,",".join(e.existing_table_names))
error = QError(e,msg,85)
except TooManyTablesInQsqlException as e:
msg = "Could not autodetect table name in qsql file. Existing Tables %s" % ",".join(e.existing_table_names)
error = QError(e,msg,86)
except NoTableInQsqlExcption as e:
msg = "Could not autodetect table name in qsql file. File contains no record of a table"
error = QError(e,msg,97)
except TooManyTablesInSqliteException as e:
msg = "Could not autodetect table name in sqlite file %s . Existing tables: %s" % (e.qsql_filename,",".join(e.existing_table_names))
error = QError(e,msg,87)
except NoTablesInSqliteException as e:
msg = "sqlite file %s has no tables" % e.sqlite_filename
error = QError(e,msg,88)
except TooManyAttachedDatabasesException as e:
msg = str(e)
error = QError(e,msg,89)
except UnknownFileTypeException as e:
msg = str(e)
error = QError(e,msg,95)
except KeyboardInterrupt as e:
warnings.append(QWarning(e,"Interrupted"))
except Exception as e:
global DEBUG
if DEBUG:
xprint(traceback.format_exc())
error = QError(e,repr(e),199)
return QOutput(data=None,warnings = warnings,error = error , metadata=QMetadata(table_structures=self.loaded_table_structures_dict,new_table_structures=self.loaded_table_structures_dict,output_column_name_list=[]))
def execute(self,query_str,input_params=None,save_db_to_disk_filename=None):
r = self._execute(query_str,input_params,stop_after_analysis=False,save_db_to_disk_filename=save_db_to_disk_filename)
return r
def unload(self):
# TODO This would fail, since table structures are just value objects now. Will be fixed as part of making q a full python module
for qtable_name,table_creator in six.iteritems(self.loaded_table_structures_dict):
try:
table_creator.drop_table()
except:
# Support no-table select queries
pass
self.loaded_table_structures_dict = OrderedDict()
def analyze(self,query_str,input_params=None,data_streams=None):
q_output = self._execute(query_str,input_params,data_streams=data_streams,stop_after_analysis=True)
return q_output
def escape_double_quotes_if_needed(v):
x = v.replace(six.u('"'), six.u('""'))
return x
def quote_none_func(output_delimiter,v):
return v
def quote_minimal_func(output_delimiter,v):
if v is None:
return v
t = type(v)
if (t == str or t == unicode) and ((output_delimiter in v) or ('\n' in v) or ('"' in v)):
return six.u('"{}"').format(escape_double_quotes_if_needed(v))
return v
def quote_nonnumeric_func(output_delimiter,v):
if v is None:
return v
if type(v) == str or type(v) == unicode:
return six.u('"{}"').format(escape_double_quotes_if_needed(v))
return v
def quote_all_func(output_delimiter,v):
if type(v) == str or type(v) == unicode:
return six.u('"{}"').format(escape_double_quotes_if_needed(v))
else:
return six.u('"{}"').format(v)
class QOutputParams(object):
def __init__(self,
delimiter=' ',
beautify=False,
output_quoting_mode='minimal',
formatting=None,
output_header=False,
encoding=None):
self.delimiter = delimiter
self.beautify = beautify
self.output_quoting_mode = output_quoting_mode
self.formatting = formatting
self.output_header = output_header
self.encoding = encoding
def __str__(self):
return "QOutputParams<%s>" % str(self.__dict__)
def __repr__(self):
return "QOutputParams(...)"
class QOutputPrinter(object):
output_quoting_modes = { 'minimal' : quote_minimal_func,
'all' : quote_all_func,
'nonnumeric' : quote_nonnumeric_func,
'none' : quote_none_func }
def __init__(self,output_params,show_tracebacks=False):
self.output_params = output_params
self.show_tracebacks = show_tracebacks
self.output_field_quoting_func = QOutputPrinter.output_quoting_modes[output_params.output_quoting_mode]
def print_errors_and_warnings(self,f,results):
if results.status == 'error':
error = results.error
print(error.msg, file=f)
if self.show_tracebacks:
print(error.traceback, file=f)
for warning in results.warnings:
print("%s" % warning.msg, file=f)
def print_analysis(self,f_out,f_err,results):
self.print_errors_and_warnings(f_err,results)
if results.metadata is None:
return
if results.metadata.table_structures is None:
return
for qtable_name in results.metadata.table_structures:
table_structures = results.metadata.table_structures[qtable_name]
print("Table: %s" % qtable_name,file=f_out)
print(" Sources:",file=f_out)
dl = results.metadata.new_table_structures[qtable_name]
print(" source_type: %s source: %s" % (dl.source_type,dl.source),file=f_out)
print(" Fields:",file=f_out)
for n,t in zip(table_structures.column_names,table_structures.sqlite_column_types):
print(" `%s` - %s" % (n,t), file=f_out)
def print_output(self,f_out,f_err,results):
try:
self._print_output(f_out,f_err,results)
except (UnicodeEncodeError, UnicodeError) as e:
print("Cannot encode data. Error:%s" % e, file=f_err)
sys.exit(3)
except IOError as e:
if e.errno == 32:
# broken pipe, that's ok
pass
else:
# don't miss other problems for now
raise
except KeyboardInterrupt:
pass
def _print_output(self,f_out,f_err,results):
self.print_errors_and_warnings(f_err,results)
data = results.data
if data is None:
return
# If the user requested beautifying the output
if self.output_params.beautify:
if self.output_params.output_header:
data_with_possible_headers = data + [tuple(results.metadata.output_column_name_list)]
else:
data_with_possible_headers = data
max_lengths = determine_max_col_lengths(data_with_possible_headers,self.output_field_quoting_func,self.output_params.delimiter)
if self.output_params.formatting:
formatting_dict = dict(
[(x.split("=")[0], x.split("=")[1]) for x in self.output_params.formatting.split(",")])
else:
formatting_dict = {}
try:
if self.output_params.output_header and results.metadata.output_column_name_list is not None:
data.insert(0,results.metadata.output_column_name_list)
for rownum, row in enumerate(data):
row_str = []
skip_formatting = rownum == 0 and self.output_params.output_header
for i, col in enumerate(row):
if str(i + 1) in formatting_dict.keys() and not skip_formatting:
fmt_str = formatting_dict[str(i + 1)]
else:
if self.output_params.beautify:
fmt_str = six.u("{{0:<{}}}").format(max_lengths[i])
else:
fmt_str = six.u("{}")
if col is not None:
xx = self.output_field_quoting_func(self.output_params.delimiter,col)
row_str.append(fmt_str.format(xx))
else:
row_str.append(fmt_str.format(""))
xxxx = six.u(self.output_params.delimiter).join(row_str) + six.u("\n")
f_out.write(xxxx)
except (UnicodeEncodeError, UnicodeError) as e:
print("Cannot encode data. Error:%s" % e, file=sys.stderr)
sys.exit(3)
except TypeError as e:
print("Error while formatting output: %s" % e, file=sys.stderr)
sys.exit(4)
except IOError as e:
if e.errno == 32:
# broken pipe, that's ok
pass
else:
# don't miss other problem for now
raise
except KeyboardInterrupt:
pass
try:
# Prevent python bug when order of pipe shutdowns is reversed
f_out.flush()
except IOError as e:
pass
def get_option_with_default(p, option_type, option, default):
try:
if not p.has_option('options', option):
return default
if p.get('options',option) == 'None':
return None
if option_type == 'boolean':
r = p.getboolean('options', option)
return r
elif option_type == 'int':
r = p.getint('options', option)
return r
elif option_type == 'string':
r = p.get('options', option)
return r
else:
raise Exception("Unknown option type %s " % option_type)
except ValueError as e:
raise IncorrectDefaultValueException(option_type,option,p.get("options",option))
QRC_FILENAME_ENVVAR = 'QRC_FILENAME'
def dump_default_values_as_qrc(parser,exclusions):
m = parser.parse_args([]).__dict__
m.pop('leftover')
print("[options]",file=sys.stdout)
for k in sorted(m.keys()):
if k not in exclusions:
print("%s=%s" % (k,m[k]),file=sys.stdout)
USAGE_TEXT = """
q <flags> <query>
Example Execution for a delimited file:
q "select * from myfile.csv"
Example Execution for an sqlite3 database:
q "select * from mydatabase.sqlite:::my_table_name"
or
q "select * from mydatabase.sqlite"
if the database file contains only one table
Auto-caching of delimited files can be activated through `-C readwrite` (writes new caches if needed) or `-C read` (only reads existing cache files)
Setting the default caching mode (`-C`) can be done by writing a `~/.qrc` file. See docs for more info.
q's purpose is to bring SQL expressive power to the Linux command line and to provide easy access to text as actual data.
q allows the following:
* Performing SQL-like statements directly on tabular text data, auto-caching the data in order to accelerate additional querying on the same file
* Performing SQL statements directly on multi-file sqlite3 databases, without having to merge them or load them into memory
Changing the default values for parameters can be done by creating a `~/.qrc` file. Run q with `--dump-defaults` in order to dump a default `.qrc` file into stdout.
See https://github.com/harelba/q for more details.
"""
def run_standalone():
sqlite3.enable_callback_tracebacks(True)
p, qrc_filename = parse_qrc_file()
args, options, parser = initialize_command_line_parser(p, qrc_filename)
dump_defaults_and_stop__if_needed(options, parser)
dump_version_and_stop__if_needed(options)
STDOUT, default_input_params, q_output_printer, query_strs = parse_options(args, options)
data_streams_dict = initialize_default_data_streams()
q_engine = QTextAsData(default_input_params=default_input_params,data_streams_dict=data_streams_dict)
execute_queries(STDOUT, options, q_engine, q_output_printer, query_strs)
q_engine.done()
sys.exit(0)
def dump_version_and_stop__if_needed(options):
if options.version:
print_credentials()
sys.exit(0)
def dump_defaults_and_stop__if_needed(options, parser):
if options.dump_defaults:
dump_default_values_as_qrc(parser, ['dump-defaults', 'version'])
sys.exit(0)
def execute_queries(STDOUT, options, q_engine, q_output_printer, query_strs):
for query_str in query_strs:
if options.analyze_only:
q_output = q_engine.analyze(query_str)
q_output_printer.print_analysis(STDOUT, sys.stderr, q_output)
else:
q_output = q_engine.execute(query_str, save_db_to_disk_filename=options.save_db_to_disk_filename)
q_output_printer.print_output(STDOUT, sys.stderr, q_output)
if q_output.status == 'error':
sys.exit(q_output.error.errorcode)
def initialize_command_line_parser(p, qrc_filename):
try:
default_verbose = get_option_with_default(p, 'boolean', 'verbose', False)
default_save_db_to_disk = get_option_with_default(p, 'string', 'save_db_to_disk_filename', None)
default_caching_mode = get_option_with_default(p, 'string', 'caching_mode', 'none')
default_skip_header = get_option_with_default(p, 'boolean', 'skip_header', False)
default_delimiter = get_option_with_default(p, 'string', 'delimiter', None)
default_pipe_delimited = get_option_with_default(p, 'boolean', 'pipe_delimited', False)
default_tab_delimited = get_option_with_default(p, 'boolean', 'tab_delimited', False)
default_encoding = get_option_with_default(p, 'string', 'encoding', 'UTF-8')
default_gzipped = get_option_with_default(p, 'boolean', 'gzipped', False)
default_analyze_only = get_option_with_default(p, 'boolean', 'analyze_only', False)
default_mode = get_option_with_default(p, 'string', 'mode', "relaxed")
default_column_count = get_option_with_default(p, 'string', 'column_count', None)
default_keep_leading_whitespace_in_values = get_option_with_default(p, 'boolean',
'keep_leading_whitespace_in_values', False)
default_disable_double_double_quoting = get_option_with_default(p, 'boolean', 'disable_double_double_quoting',
True)
default_disable_escaped_double_quoting = get_option_with_default(p, 'boolean', 'disable_escaped_double_quoting',
True)
default_disable_column_type_detection = get_option_with_default(p, 'boolean', 'disable_column_type_detection',
False)
default_input_quoting_mode = get_option_with_default(p, 'string', 'input_quoting_mode', 'minimal')
default_max_column_length_limit = get_option_with_default(p, 'int', 'max_column_length_limit', 131072)
default_with_universal_newlines = get_option_with_default(p, 'boolean', 'with_universal_newlines', False)
default_output_delimiter = get_option_with_default(p, 'string', 'output_delimiter', None)
default_pipe_delimited_output = get_option_with_default(p, 'boolean', 'pipe_delimited_output', False)
default_tab_delimited_output = get_option_with_default(p, 'boolean', 'tab_delimited_output', False)
default_output_header = get_option_with_default(p, 'boolean', 'output_header', False)
default_beautify = get_option_with_default(p, 'boolean', 'beautify', False)
default_formatting = get_option_with_default(p, 'string', 'formatting', None)
default_output_encoding = get_option_with_default(p, 'string', 'output_encoding', 'none')
default_output_quoting_mode = get_option_with_default(p, 'string', 'output_quoting_mode', 'minimal')
default_list_user_functions = get_option_with_default(p, 'boolean', 'list_user_functions', False)
default_overwrite_qsql = get_option_with_default(p, 'boolean', 'overwrite_qsql', False)
default_query_filename = get_option_with_default(p, 'string', 'query_filename', None)
default_query_encoding = get_option_with_default(p, 'string', 'query_encoding', locale.getpreferredencoding())
default_max_attached_sqlite_databases = get_option_with_default(p,'int','max_attached_sqlite_databases', 10)
except IncorrectDefaultValueException as e:
print("Incorrect value '%s' for option %s in .qrc file %s (option type is %s)" % (
e.actual_value, e.option, qrc_filename, e.option_type))
sys.exit(199)
parser = ArgumentParser(prog="q",usage=USAGE_TEXT)
parser.add_argument("-v", "--version", action="store_true", help="Print version")
parser.add_argument("-V", "--verbose", default=default_verbose, action="store_true",
help="Print debug info in case of problems")
parser.add_argument("-S", "--save-db-to-disk", dest="save_db_to_disk_filename", default=default_save_db_to_disk,
help="Save database to an sqlite database file")
parser.add_argument("-C", "--caching-mode", default=default_caching_mode,
help="Choose the autocaching mode (none/read/readwrite). Autocaches files to disk db so further queries will be faster. Caching is done to a side-file with the same name of the table, but with an added extension .qsql")
parser.add_argument("--dump-defaults", action="store_true",
help="Dump all default values for parameters and exit. Can be used in order to make sure .qrc file content is being read properly.")
parser.add_argument("--max-attached-sqlite-databases", default=default_max_attached_sqlite_databases,type=int,
help="Set the maximum number of concurrently-attached sqlite dbs. This is a compile time definition of sqlite. q's performance will slow down once this limit is reached for a query, since it will perform table copies in order to avoid that limit.")
# -----------------------------------------------
input_data_option_group = parser.add_argument_group("Input Data Options")
input_data_option_group.add_argument("-H", "--skip-header", default=default_skip_header,
action="store_true",
help="Skip header row. This has been changed from earlier version - Only one header row is supported, and the header row is used for column naming")
input_data_option_group.add_argument("-d", "--delimiter", default=default_delimiter,
help="Field delimiter. If none specified, then space is used as the delimiter.")
input_data_option_group.add_argument("-p", "--pipe-delimited", default=default_pipe_delimited,
action="store_true",
help="Same as -d '|'. Added for convenience and readability")
input_data_option_group.add_argument("-t", "--tab-delimited", default=default_tab_delimited,
action="store_true",
help="Same as -d <tab>. Just a shorthand for handling standard tab delimited file You can use $'\\t' if you want (this is how Linux expects to provide tabs in the command line")
input_data_option_group.add_argument("-e", "--encoding", default=default_encoding,
help="Input file encoding. Defaults to UTF-8. set to none for not setting any encoding - faster, but at your own risk...")
input_data_option_group.add_argument("-z", "--gzipped", default=default_gzipped, action="store_true",
help="Data is gzipped. Useful for reading from stdin. For files, .gz means automatic gunzipping")
input_data_option_group.add_argument("-A", "--analyze-only", default=default_analyze_only,
action='store_true',
help="Analyze sample input and provide information about data types")
input_data_option_group.add_argument("-m", "--mode", default=default_mode,
help="Data parsing mode. fluffy, relaxed and strict. In strict mode, the -c column-count parameter must be supplied as well")
input_data_option_group.add_argument("-c", "--column-count", default=default_column_count,
help="Specific column count when using relaxed or strict mode")
input_data_option_group.add_argument("-k", "--keep-leading-whitespace", dest="keep_leading_whitespace_in_values",
default=default_keep_leading_whitespace_in_values, action="store_true",
help="Keep leading whitespace in values. Default behavior strips leading whitespace off values, in order to provide out-of-the-box usability for simple use cases. If you need to preserve whitespace, use this flag.")
input_data_option_group.add_argument("--disable-double-double-quoting",
default=default_disable_double_double_quoting, action="store_false",
help="Disable support for double double-quoting for escaping the double quote character. By default, you can use \"\" inside double quoted fields to escape double quotes. Mainly for backward compatibility.")
input_data_option_group.add_argument("--disable-escaped-double-quoting",
default=default_disable_escaped_double_quoting, action="store_false",
help="Disable support for escaped double-quoting for escaping the double quote character. By default, you can use \\\" inside double quoted fields to escape double quotes. Mainly for backward compatibility.")
input_data_option_group.add_argument("--as-text", dest="disable_column_type_detection",
default=default_disable_column_type_detection, action="store_true",
help="Don't detect column types - All columns will be treated as text columns")
input_data_option_group.add_argument("-w", "--input-quoting-mode",
default=default_input_quoting_mode,
help="Input quoting mode. Possible values are all, minimal and none. Note the slightly misleading parameter name, and see the matching -W parameter for output quoting.")
input_data_option_group.add_argument("-M", "--max-column-length-limit",
default=default_max_column_length_limit,
help="Sets the maximum column length.")
input_data_option_group.add_argument("-U", "--with-universal-newlines",
default=default_with_universal_newlines, action="store_true",
help="Expect universal newlines in the data. Limitation: -U works only with regular files for now, stdin or .gz files are not supported yet.")
# -----------------------------------------------
output_data_option_group = parser.add_argument_group("Output Options")
output_data_option_group.add_argument("-D", "--output-delimiter",
default=default_output_delimiter,
help="Field delimiter for output. If none specified, then the -d delimiter is used if present, or space if no delimiter is specified")
output_data_option_group.add_argument("-P", "--pipe-delimited-output",
default=default_pipe_delimited_output, action="store_true",
help="Same as -D '|'. Added for convenience and readability.")
output_data_option_group.add_argument("-T", "--tab-delimited-output",
default=default_tab_delimited_output, action="store_true",
help="Same as -D <tab>. Just a shorthand for outputting tab delimited output. You can use -D $'\\t' if you want.")
output_data_option_group.add_argument("-O", "--output-header", default=default_output_header,
action="store_true",
help="Output header line. Output column-names are determined from the query itself. Use column aliases in order to set your column names in the query. For example, 'select name FirstName,value1/value2 MyCalculation from ...'. This can be used even if there was no header in the input.")
output_data_option_group.add_argument("-b", "--beautify", default=default_beautify,
action="store_true",
help="Beautify output according to actual values. Might be slow...")
output_data_option_group.add_argument("-f", "--formatting", default=default_formatting,
help="Output-level formatting, in the format X=fmt,Y=fmt etc, where X,Y are output column numbers (e.g. 1 for first SELECT column etc.")
output_data_option_group.add_argument("-E", "--output-encoding",
default=default_output_encoding,
help="Output encoding. Defaults to 'none', leading to selecting the system/terminal encoding")
output_data_option_group.add_argument("-W", "--output-quoting-mode",
default=default_output_quoting_mode,
help="Output quoting mode. Possible values are all, minimal, nonnumeric and none. Note the slightly misleading parameter name, and see the matching -w parameter for input quoting.")
output_data_option_group.add_argument("-L", "--list-user-functions",
default=default_list_user_functions, action="store_true",
help="List all user functions")
parser.add_argument("--overwrite-qsql", default=default_overwrite_qsql,
help="When used, qsql files (both caches and store-to-db) will be overwritten if they already exist. Use with care.")
# -----------------------------------------------
query_option_group = parser.add_argument_group("Query Related Options")
query_option_group.add_argument("-q", "--query-filename", default=default_query_filename,
help="Read query from the provided filename instead of the command line, possibly using the provided query encoding (using -Q).")
query_option_group.add_argument("-Q", "--query-encoding", default=default_query_encoding,
help="query text encoding. Experimental. Please send your feedback on this")
# -----------------------------------------------
parser.add_argument('leftover', nargs='*')
args = parser.parse_args()
return args.leftover, args, parser
def parse_qrc_file():
p = configparser.ConfigParser()
if QRC_FILENAME_ENVVAR in os.environ:
qrc_filename = os.environ[QRC_FILENAME_ENVVAR]
if qrc_filename != 'None':
xprint("qrc filename is %s" % qrc_filename)
if os.path.exists(qrc_filename):
p.read([os.environ[QRC_FILENAME_ENVVAR]])
else:
print('QRC_FILENAME env var exists, but cannot find qrc file at %s' % qrc_filename, file=sys.stderr)
sys.exit(244)
else:
pass # special handling of 'None' env var value for QRC_FILENAME. Allows to eliminate the default ~/.qrc reading
else:
qrc_filename = os.path.expanduser('~/.qrc')
p.read([qrc_filename, '.qrc'])
return p, qrc_filename
def initialize_default_data_streams():
data_streams_dict = {
'-': DataStream('stdin', '-', sys.stdin)
}
return data_streams_dict
def parse_options(args, options):
if options.list_user_functions:
print_user_functions()
sys.exit(0)
if len(args) == 0 and options.query_filename is None:
print_credentials()
print("Must provide at least one query in the command line, or through a file with the -q parameter",
file=sys.stderr)
sys.exit(1)
if options.query_filename is not None:
if len(args) != 0:
print("Can't provide both a query file and a query on the command line", file=sys.stderr)
sys.exit(1)
try:
f = open(options.query_filename, 'rb')
query_strs = [f.read()]
f.close()
except:
print("Could not read query from file %s" % options.query_filename, file=sys.stderr)
sys.exit(1)
else:
if sys.stdin.encoding is not None:
query_strs = [x.encode(sys.stdin.encoding) for x in args]
else:
query_strs = args
if options.query_encoding is not None and options.query_encoding != 'none':
try:
for idx in range(len(query_strs)):
query_strs[idx] = query_strs[idx].decode(options.query_encoding).strip()
if len(query_strs[idx]) == 0:
print("Query cannot be empty (query number %s)" % (idx + 1), file=sys.stderr)
sys.exit(1)
except Exception as e:
print("Could not decode query number %s using the provided query encoding (%s)" % (
idx + 1, options.query_encoding), file=sys.stderr)
sys.exit(3)
###
if options.mode not in ['relaxed', 'strict']:
print("Parsing mode can either be relaxed or strict", file=sys.stderr)
sys.exit(13)
output_encoding = get_stdout_encoding(options.output_encoding)
try:
if six.PY3:
STDOUT = codecs.getwriter(output_encoding)(sys.stdout.buffer)
else:
STDOUT = codecs.getwriter(output_encoding)(sys.stdout)
except:
print("Could not create output stream using output encoding %s" % (output_encoding), file=sys.stderr)
sys.exit(200)
# If the user flagged for a tab-delimited file then set the delimiter to tab
if options.tab_delimited:
if options.delimiter is not None and options.delimiter != '\t':
print("Warning: -t parameter overrides -d parameter (%s)" % options.delimiter, file=sys.stderr)
options.delimiter = '\t'
# If the user flagged for a pipe-delimited file then set the delimiter to pipe
if options.pipe_delimited:
if options.delimiter is not None and options.delimiter != '|':
print("Warning: -p parameter overrides -d parameter (%s)" % options.delimiter, file=sys.stderr)
options.delimiter = '|'
if options.delimiter is None:
options.delimiter = ' '
elif len(options.delimiter) != 1:
print("Delimiter must be one character only", file=sys.stderr)
sys.exit(5)
if options.tab_delimited_output:
if options.output_delimiter is not None and options.output_delimiter != '\t':
print("Warning: -T parameter overrides -D parameter (%s)" % options.output_delimiter, file=sys.stderr)
options.output_delimiter = '\t'
if options.pipe_delimited_output:
if options.output_delimiter is not None and options.output_delimiter != '|':
print("Warning: -P parameter overrides -D parameter (%s)" % options.output_delimiter, file=sys.stderr)
options.output_delimiter = '|'
if options.output_delimiter:
# If output delimiter is specified, then we use it
options.output_delimiter = options.output_delimiter
else:
# Otherwise,
if options.delimiter:
# if an input delimiter is specified, then we use it as the output as
# well
options.output_delimiter = options.delimiter
else:
# if no input delimiter is specified, then we use space as the default
# (since no input delimiter means any whitespace)
options.output_delimiter = " "
try:
max_column_length_limit = int(options.max_column_length_limit)
except:
print("Max column length limit must be an integer larger than 2 (%s)" % options.max_column_length_limit,
file=sys.stderr)
sys.exit(31)
if max_column_length_limit < 3:
print("Maximum column length must be larger than 2",file=sys.stderr)
sys.exit(31)
csv.field_size_limit(max_column_length_limit)
xprint("Max column length limit is %s" % options.max_column_length_limit)
if options.input_quoting_mode not in list(QTextAsData.input_quoting_modes.keys()):
print("Input quoting mode can only be one of %s. It cannot be set to '%s'" % (
",".join(sorted(QTextAsData.input_quoting_modes.keys())), options.input_quoting_mode), file=sys.stderr)
sys.exit(55)
if options.output_quoting_mode not in list(QOutputPrinter.output_quoting_modes.keys()):
print("Output quoting mode can only be one of %s. It cannot be set to '%s'" % (
",".join(QOutputPrinter.output_quoting_modes.keys()), options.input_quoting_mode), file=sys.stderr)
sys.exit(56)
if options.column_count is not None:
expected_column_count = int(options.column_count)
if expected_column_count < 1 or expected_column_count > int(options.max_column_length_limit):
print("Column count must be between 1 and %s" % int(options.max_column_length_limit),file=sys.stderr)
sys.exit(90)
else:
# infer automatically
expected_column_count = None
if options.encoding != 'none':
try:
codecs.lookup(options.encoding)
except LookupError:
print("Encoding %s could not be found" % options.encoding, file=sys.stderr)
sys.exit(10)
if options.save_db_to_disk_filename is not None:
if options.analyze_only:
print("Cannot save database to disk when running with -A (analyze-only) option.", file=sys.stderr)
sys.exit(119)
print("Going to save data into a disk database: %s" % options.save_db_to_disk_filename, file=sys.stderr)
if os.path.exists(options.save_db_to_disk_filename):
print("Disk database file %s already exists." % options.save_db_to_disk_filename, file=sys.stderr)
sys.exit(77)
# sys.exit(78) Deprecated, but shouldn't be reused
if options.caching_mode not in ['none', 'read', 'readwrite']:
print("caching mode must be none,read or readwrite",file=sys.stderr)
sys.exit(85)
read_caching = options.caching_mode in ['read', 'readwrite']
write_caching = options.caching_mode in ['readwrite']
if options.max_attached_sqlite_databases <= 3:
print("Max attached sqlite databases must be larger than 3")
sys.exit(99)
default_input_params = QInputParams(skip_header=options.skip_header,
delimiter=options.delimiter,
input_encoding=options.encoding,
gzipped_input=options.gzipped,
with_universal_newlines=options.with_universal_newlines,
parsing_mode=options.mode,
expected_column_count=expected_column_count,
keep_leading_whitespace_in_values=options.keep_leading_whitespace_in_values,
disable_double_double_quoting=options.disable_double_double_quoting,
disable_escaped_double_quoting=options.disable_escaped_double_quoting,
input_quoting_mode=options.input_quoting_mode,
disable_column_type_detection=options.disable_column_type_detection,
max_column_length_limit=max_column_length_limit,
read_caching=read_caching,
write_caching=write_caching,
max_attached_sqlite_databases=options.max_attached_sqlite_databases)
output_params = QOutputParams(
delimiter=options.output_delimiter,
beautify=options.beautify,
output_quoting_mode=options.output_quoting_mode,
formatting=options.formatting,
output_header=options.output_header,
encoding=output_encoding)
q_output_printer = QOutputPrinter(output_params, show_tracebacks=DEBUG)
return STDOUT, default_input_params, q_output_printer, query_strs
if __name__ == '__main__':
run_standalone()
| 162,794
|
Python
|
.py
| 2,937
| 43.956418
| 471
| 0.62439
|
harelba/q
| 10,180
| 421
| 117
|
GPL-3.0
|
9/5/2024, 5:11:42 PM (Europe/Amsterdam)
|
12,333
|
pyoxidizer.bzl
|
harelba_q/pyoxidizer.bzl
|
# This file defines how PyOxidizer application building and packaging is
# performed. See PyOxidizer's documentation at
# https://pyoxidizer.readthedocs.io/en/stable/ for details of this
# configuration file format.
PYTHON_VERSION = VARS.get("PYTHON_VERSION","3.8")
Q_VERSION = VARS.get("Q_VERSION","0.0.1")
# Configuration files consist of functions which define build "targets."
# This function creates a Python executable and installs it in a destination
# directory.
def make_exe():
dist = default_python_distribution(python_version=PYTHON_VERSION)
policy = dist.make_python_packaging_policy()
policy.set_resource_handling_mode("classify")
policy.resources_location = "in-memory"
policy.resources_location_fallback = "filesystem-relative:Lib"
policy.allow_in_memory_shared_library_loading = False
python_config = dist.make_python_interpreter_config()
python_config.run_module = "bin.q"
exe = dist.to_python_executable(
name="q",
packaging_policy=policy,
config=python_config,
)
exe.pip_install(["wheel"])
exe.add_python_resources(exe.pip_install(["-r", "requirements.txt"]))
exe.add_python_resources(exe.pip_install(["-e", "."]))
exe.add_python_resources(exe.read_package_root(
path="./",
packages=["bin"],
))
return exe
def make_embedded_resources(exe):
return exe.to_embedded_resources()
def make_install(exe):
# Create an object that represents our installed application file layout.
files = FileManifest()
# Add the generated executable to our install layout in the root directory.
files.add_python_resource(".", exe)
return files
def make_msi(exe):
# See the full docs for more. But this will convert your Python executable
# into a `WiXMSIBuilder` Starlark type, which will be converted to a Windows
# .msi installer when it is built.
builder = exe.to_wix_msi_builder(
# Simple identifier of your app.
"q",
# The name of your application.
"q-text-as-data",
# The version of your application.
Q_VERSION,
# The author/manufacturer of your application.
"Harel Ben-Attia"
)
return builder
# Dynamically enable automatic code signing.
def register_code_signers():
# You will need to run with `pyoxidizer build --var ENABLE_CODE_SIGNING 1` for
# this if block to be evaluated.
if not VARS.get("ENABLE_CODE_SIGNING"):
return
# Use a code signing certificate in a .pfx/.p12 file, prompting the
# user for its path and password to open.
# pfx_path = prompt_input("path to code signing certificate file")
# pfx_password = prompt_password(
# "password for code signing certificate file",
# confirm = True
# )
# signer = code_signer_from_pfx_file(pfx_path, pfx_password)
# Use a code signing certificate in the Windows certificate store, specified
# by its SHA-1 thumbprint. (This allows you to use YubiKeys and other
# hardware tokens if they speak to the Windows certificate APIs.)
# sha1_thumbprint = prompt_input(
# "SHA-1 thumbprint of code signing certificate in Windows store"
# )
# signer = code_signer_from_windows_store_sha1_thumbprint(sha1_thumbprint)
# Choose a code signing certificate automatically from the Windows
# certificate store.
# signer = code_signer_from_windows_store_auto()
# Activate your signer so it gets called automatically.
# signer.activate()
# Call our function to set up automatic code signers.
register_code_signers()
# Tell PyOxidizer about the build targets defined above.
register_target("exe", make_exe)
register_target("resources", make_embedded_resources, depends=["exe"], default_build_script=True)
register_target("install", make_install, depends=["exe"], default=True)
register_target("msi_installer", make_msi, depends=["exe"])
# Resolve whatever targets the invoker of this configuration file is requesting
# be resolved.
resolve_targets()
| 4,040
|
Python
|
.bzl
| 90
| 40.077778
| 97
| 0.717563
|
harelba/q
| 10,180
| 421
| 117
|
GPL-3.0
|
9/5/2024, 5:11:42 PM (Europe/Amsterdam)
|
12,334
|
PullRequestReview.py
|
PyGithub_PyGithub/github/PullRequestReview.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Aaron Levine <allevin@sandia.gov> #
# Copyright 2017 Mike Miller <github@mikeage.net> #
# Copyright 2018 Darragh Bailey <daragh.bailey@gmail.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Claire Johns <42869556+johnsc1@users.noreply.github.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Gael Colas <gael.colas@plus.ai> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import Any
import github.GithubObject
import github.NamedUser
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class PullRequestReview(NonCompletableGithubObject):
"""
This class represents PullRequestReviews.
The reference can be found here
https://docs.github.com/en/rest/reference/pulls#reviews
"""
def _initAttributes(self) -> None:
self._id: Attribute[int] = NotSet
self._user: Attribute[github.NamedUser.NamedUser] = NotSet
self._body: Attribute[str] = NotSet
self._commit_id: Attribute[str] = NotSet
self._state: Attribute[str] = NotSet
self._html_url: Attribute[str] = NotSet
self._pull_request_url: Attribute[str] = NotSet
self._submitted_at: Attribute[datetime] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"id": self._id.value, "user": self._user.value})
@property
def id(self) -> int:
return self._id.value
@property
def user(self) -> github.NamedUser.NamedUser:
return self._user.value
@property
def body(self) -> str:
return self._body.value
@property
def commit_id(self) -> str:
return self._commit_id.value
@property
def state(self) -> str:
return self._state.value
@property
def html_url(self) -> str:
return self._html_url.value
@property
def pull_request_url(self) -> str:
return self._pull_request_url.value
@property
def submitted_at(self) -> datetime:
return self._submitted_at.value
def dismiss(self, message: str) -> None:
"""
:calls: `PUT /repos/{owner}/{repo}/pulls/{number}/reviews/{review_id}/dismissals <https://docs.github.com/en/rest/reference/pulls#reviews>`_
"""
post_parameters = {"message": message}
headers, data = self._requester.requestJsonAndCheck(
"PUT",
f"{self.pull_request_url}/reviews/{self.id}/dismissals",
input=post_parameters,
)
self._useAttributes(data)
def delete(self) -> None:
"""
:calls: `DELETE /repos/:owner/:repo/pulls/:number/reviews/:review_id <https://developer.github.com/v3/pulls/reviews/>`_
"""
headers, data = self._requester.requestJsonAndCheck("DELETE", f"{self.pull_request_url}/reviews/{self.id}")
def edit(self, body: str) -> None:
"""
:calls: `PUT /repos/{owner}/{repo}/pulls/{number}/reviews/{review_id}
<https://docs.github.com/en/rest/pulls/reviews#update-a-review-for-a-pull-request>`_
"""
assert isinstance(body, str), body
post_parameters = {
"body": body,
}
headers, data = self._requester.requestJsonAndCheck(
"PUT",
f"{self.pull_request_url}/reviews/{self.id}",
input=post_parameters,
)
self._useAttributes(data)
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "commit_id" in attributes: # pragma no branch
self._commit_id = self._makeStringAttribute(attributes["commit_id"])
if "state" in attributes: # pragma no branch
self._state = self._makeStringAttribute(attributes["state"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "pull_request_url" in attributes: # pragma no branch
self._pull_request_url = self._makeStringAttribute(attributes["pull_request_url"])
if "submitted_at" in attributes: # pragma no branch
self._submitted_at = self._makeDatetimeAttribute(attributes["submitted_at"])
| 7,685
|
Python
|
.py
| 138
| 49.724638
| 148
| 0.556737
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,335
|
IssuePullRequest.py
|
PyGithub_PyGithub/github/IssuePullRequest.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from typing import Any, Dict
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class IssuePullRequest(NonCompletableGithubObject):
"""
This class represents IssuePullRequests.
"""
def _initAttributes(self) -> None:
self._diff_url: Attribute[str] = NotSet
self._html_url: Attribute[str] = NotSet
self._patch_url: Attribute[str] = NotSet
@property
def diff_url(self) -> str:
return self._diff_url.value
@property
def html_url(self) -> str:
return self._html_url.value
@property
def patch_url(self) -> str:
return self._patch_url.value
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "diff_url" in attributes: # pragma no branch
self._diff_url = self._makeStringAttribute(attributes["diff_url"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "patch_url" in attributes: # pragma no branch
self._patch_url = self._makeStringAttribute(attributes["patch_url"])
| 3,971
|
Python
|
.py
| 61
| 61.442623
| 80
| 0.501282
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,336
|
GitTree.py
|
PyGithub_PyGithub/github/GitTree.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import github.GitTreeElement
from github.GithubObject import Attribute, CompletableGithubObject, NotSet
if TYPE_CHECKING:
from github.GitTreeElement import GitTreeElement
class GitTree(CompletableGithubObject):
"""
This class represents GitTrees.
The reference can be found here
https://docs.github.com/en/rest/reference/git#trees
"""
def _initAttributes(self) -> None:
self._sha: Attribute[str] = NotSet
self._tree: Attribute[list[GitTreeElement]] = NotSet
self._url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"sha": self._sha.value})
@property
def sha(self) -> str:
self._completeIfNotSet(self._sha)
return self._sha.value
@property
def tree(self) -> list[GitTreeElement]:
self._completeIfNotSet(self._tree)
return self._tree.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
@property
def _identity(self) -> str:
return self.sha
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "tree" in attributes: # pragma no branch
self._tree = self._makeListOfClassesAttribute(github.GitTreeElement.GitTreeElement, attributes["tree"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| 4,706
|
Python
|
.py
| 78
| 56.371795
| 115
| 0.525038
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,337
|
StatsCodeFrequency.py
|
PyGithub_PyGithub/github/StatsCodeFrequency.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class StatsCodeFrequency(NonCompletableGithubObject):
"""
This class represents statistics of StatsCodeFrequencies.
The reference can be found here
https://docs.github.com/en/rest/metrics/statistics?apiVersion=2022-11-28#get-the-weekly-commit-activity
"""
def _initAttributes(self) -> None:
self._week: Attribute[datetime] = NotSet
self._additions: Attribute[int] = NotSet
self._deletions: Attribute[int] = NotSet
@property
def week(self) -> datetime:
return self._week.value
@property
def additions(self) -> int:
return self._additions.value
@property
def deletions(self) -> int:
return self._deletions.value
def _useAttributes(self, attributes: tuple[int, int, int]) -> None:
self._week = self._makeTimestampAttribute(attributes[0])
self._additions = self._makeIntAttribute(attributes[1])
self._deletions = self._makeIntAttribute(attributes[2])
| 4,123
|
Python
|
.py
| 63
| 62.285714
| 107
| 0.51581
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,338
|
BranchProtection.py
|
PyGithub_PyGithub/github/BranchProtection.py
|
############################ Copyrights and license ############################
# #
# Copyright 2018 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Benjamin K <53038537+treee111@users.noreply.github.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import github.GithubObject
import github.NamedUser
import github.RequiredPullRequestReviews
import github.RequiredStatusChecks
import github.Team
from github.GithubObject import Attribute, NotSet, Opt, is_defined
from github.PaginatedList import PaginatedList
if TYPE_CHECKING:
from github.NamedUser import NamedUser
from github.RequiredPullRequestReviews import RequiredPullRequestReviews
from github.RequiredStatusChecks import RequiredStatusChecks
from github.Team import Team
class BranchProtection(github.GithubObject.CompletableGithubObject):
"""
This class represents Branch Protection.
The reference can be found here
https://docs.github.com/en/rest/reference/repos#get-branch-protection
"""
def __repr__(self) -> str:
return self.get__repr__({"url": self._url.value})
def _initAttributes(self) -> None:
self._url: Attribute[str] = NotSet
self._allow_deletions: Attribute[bool] = NotSet
self._allow_force_pushes: Attribute[bool] = NotSet
self._allow_fork_syncing: Attribute[bool] = NotSet
self._lock_branch: Attribute[bool] = NotSet
self._required_conversation_resolution: Attribute[bool] = NotSet
self._required_status_checks: Attribute[RequiredStatusChecks] = NotSet
self._enforce_admins: Attribute[bool] = NotSet
self._required_linear_history: Attribute[bool] = github.GithubObject.NotSet
self._required_pull_request_reviews: Attribute[RequiredPullRequestReviews] = NotSet
self._user_push_restrictions: Opt[str] = NotSet
self._team_push_restrictions: Opt[str] = NotSet
@property
def allow_deletions(self) -> bool:
self._completeIfNotSet(self._allow_deletions)
return self._allow_deletions.value
@property
def allow_force_pushes(self) -> bool:
self._completeIfNotSet(self._allow_force_pushes)
return self._allow_force_pushes.value
@property
def allow_fork_syncing(self) -> bool:
self._completeIfNotSet(self._allow_fork_syncing)
return self._allow_fork_syncing.value
@property
def lock_branch(self) -> bool:
self._completeIfNotSet(self._lock_branch)
return self._lock_branch.value
@property
def required_conversation_resolution(self) -> bool:
self._completeIfNotSet(self._required_conversation_resolution)
return self._required_conversation_resolution.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
@property
def required_status_checks(self) -> RequiredStatusChecks:
self._completeIfNotSet(self._required_status_checks)
return self._required_status_checks.value
@property
def enforce_admins(self) -> bool:
self._completeIfNotSet(self._enforce_admins)
return self._enforce_admins.value
@property
def required_linear_history(self) -> bool:
self._completeIfNotSet(self._required_linear_history)
return self._required_linear_history.value
@property
def required_pull_request_reviews(self) -> RequiredPullRequestReviews:
self._completeIfNotSet(self._required_pull_request_reviews)
return self._required_pull_request_reviews.value
def get_user_push_restrictions(self) -> PaginatedList[NamedUser] | None:
if not is_defined(self._user_push_restrictions):
return None
return PaginatedList(
github.NamedUser.NamedUser,
self._requester,
self._user_push_restrictions,
None,
)
def get_team_push_restrictions(self) -> PaginatedList[Team] | None:
if not is_defined(self._team_push_restrictions):
return None
return github.PaginatedList.PaginatedList(github.Team.Team, self._requester, self._team_push_restrictions, None)
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "allow_deletions" in attributes: # pragma no branch
self._allow_deletions = self._makeBoolAttribute(attributes["allow_deletions"]["enabled"])
if "allow_force_pushes" in attributes: # pragma no branch
self._allow_force_pushes = self._makeBoolAttribute(attributes["allow_force_pushes"]["enabled"])
if "allow_fork_syncing" in attributes: # pragma no branch
self._allow_fork_syncing = self._makeBoolAttribute(attributes["allow_fork_syncing"]["enabled"])
if "lock_branch" in attributes: # pragma no branch
self._lock_branch = self._makeBoolAttribute(attributes["lock_branch"]["enabled"])
if "required_conversation_resolution" in attributes: # pragma no branch
self._required_conversation_resolution = self._makeBoolAttribute(
attributes["required_conversation_resolution"]["enabled"]
)
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "required_status_checks" in attributes: # pragma no branch
self._required_status_checks = self._makeClassAttribute(
github.RequiredStatusChecks.RequiredStatusChecks,
attributes["required_status_checks"],
)
if "enforce_admins" in attributes: # pragma no branch
self._enforce_admins = self._makeBoolAttribute(attributes["enforce_admins"]["enabled"])
if "required_pull_request_reviews" in attributes: # pragma no branch
self._required_pull_request_reviews = self._makeClassAttribute(
github.RequiredPullRequestReviews.RequiredPullRequestReviews,
attributes["required_pull_request_reviews"],
)
if "required_linear_history" in attributes: # pragma no branch
self._required_linear_history = self._makeBoolAttribute(attributes["required_linear_history"]["enabled"])
if "restrictions" in attributes: # pragma no branch
self._user_push_restrictions = attributes["restrictions"]["users_url"]
self._team_push_restrictions = attributes["restrictions"]["teams_url"]
| 8,743
|
Python
|
.py
| 151
| 51.05298
| 120
| 0.622243
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,339
|
DependabotAlertDependency.py
|
PyGithub_PyGithub/github/DependabotAlertDependency.py
|
############################ Copyrights and license ############################
# #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2024 Thomas Cooper <coopernetes@proton.me> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import Any
from github.AdvisoryVulnerabilityPackage import AdvisoryVulnerabilityPackage
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class DependabotAlertDependency(NonCompletableGithubObject):
"""
This class represents a DependabotAlertDependency.
The reference can be found here
https://docs.github.com/en/rest/dependabot/alerts
"""
def _initAttributes(self) -> None:
self._package: Attribute[AdvisoryVulnerabilityPackage] = NotSet
self._manifest_path: Attribute[str] = NotSet
self._scope: Attribute[str] = NotSet
@property
def package(self) -> AdvisoryVulnerabilityPackage:
return self._package.value
@property
def manifest_path(self) -> str:
return self._manifest_path.value
@property
def scope(self) -> str:
return self._scope.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "package" in attributes:
self._package = self._makeClassAttribute(
AdvisoryVulnerabilityPackage,
attributes["package"],
)
if "manifest_path" in attributes:
self._manifest_path = self._makeStringAttribute(attributes["manifest_path"])
if "scope" in attributes:
self._scope = self._makeStringAttribute(attributes["scope"])
| 3,282
|
Python
|
.py
| 56
| 53.75
| 88
| 0.520846
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,340
|
GistFile.py
|
PyGithub_PyGithub/github/GistFile.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from typing import Any, Dict
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class GistFile(NonCompletableGithubObject):
"""
This class represents GistFiles.
"""
def _initAttributes(self) -> None:
self._content: Attribute[str] = NotSet
self._filename: Attribute[str] = NotSet
self._language: Attribute[str] = NotSet
self._raw_url: Attribute[str] = NotSet
self._size: Attribute[int] = NotSet
self._type: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"filename": self._filename.value})
@property
def content(self) -> str:
return self._content.value
@property
def filename(self) -> str:
return self._filename.value
@property
def language(self) -> str:
return self._language.value
@property
def raw_url(self) -> str:
return self._raw_url.value
@property
def size(self) -> int:
return self._size.value
@property
def type(self) -> str:
return self._type.value
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "content" in attributes: # pragma no branch
self._content = self._makeStringAttribute(attributes["content"])
if "filename" in attributes: # pragma no branch
self._filename = self._makeStringAttribute(attributes["filename"])
if "language" in attributes: # pragma no branch
self._language = self._makeStringAttribute(attributes["language"])
if "raw_url" in attributes: # pragma no branch
self._raw_url = self._makeStringAttribute(attributes["raw_url"])
if "size" in attributes: # pragma no branch
self._size = self._makeIntAttribute(attributes["size"])
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
| 4,864
|
Python
|
.py
| 82
| 54.54878
| 80
| 0.519606
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,341
|
StatsContributor.py
|
PyGithub_PyGithub/github/StatsContributor.py
|
############################ Copyrights and license ############################
# #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import Any
import github.GithubObject
import github.NamedUser
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class StatsContributor(NonCompletableGithubObject):
"""
This class represents StatsContributors.
The reference can be found here
https://docs.github.com/en/rest/reference/repos#get-all-contributor-commit-activity
"""
class Week(NonCompletableGithubObject):
"""
This class represents weekly statistics of a contributor.
"""
@property
def w(self) -> datetime:
return self._w.value
@property
def a(self) -> int:
return self._a.value
@property
def d(self) -> int:
return self._d.value
@property
def c(self) -> int:
return self._c.value
def _initAttributes(self) -> None:
self._w: Attribute[datetime] = NotSet
self._a: Attribute[int] = NotSet
self._d: Attribute[int] = NotSet
self._c: Attribute[int] = NotSet
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "w" in attributes: # pragma no branch
self._w = self._makeTimestampAttribute(attributes["w"])
if "a" in attributes: # pragma no branch
self._a = self._makeIntAttribute(attributes["a"])
if "d" in attributes: # pragma no branch
self._d = self._makeIntAttribute(attributes["d"])
if "c" in attributes: # pragma no branch
self._c = self._makeIntAttribute(attributes["c"])
@property
def author(self) -> github.NamedUser.NamedUser:
return self._author.value
@property
def total(self) -> int:
return self._total.value
@property
def weeks(self) -> list[Week]:
return self._weeks.value
def _initAttributes(self) -> None:
self._author: Attribute[github.NamedUser.NamedUser] = NotSet
self._total: Attribute[int] = NotSet
self._weeks: Attribute[list[StatsContributor.Week]] = NotSet
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "author" in attributes: # pragma no branch
self._author = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["author"])
if "total" in attributes: # pragma no branch
self._total = self._makeIntAttribute(attributes["total"])
if "weeks" in attributes: # pragma no branch
self._weeks = self._makeListOfClassesAttribute(self.Week, attributes["weeks"])
| 5,518
|
Python
|
.py
| 97
| 50.783505
| 101
| 0.537394
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,342
|
GistHistoryState.py
|
PyGithub_PyGithub/github/GistHistoryState.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import Any
import github.CommitStats
import github.Gist
import github.GithubObject
import github.NamedUser
from github.GistFile import GistFile
from github.GithubObject import Attribute, CompletableGithubObject, NotSet
class GistHistoryState(CompletableGithubObject):
"""
This class represents GistHistoryStates.
"""
def _initAttributes(self) -> None:
self._change_status: Attribute[github.CommitStats.CommitStats] = NotSet
self._comments: Attribute[int] = NotSet
self._comments_url: Attribute[str] = NotSet
self._commits_url: Attribute[str] = NotSet
self._committed_at: Attribute[datetime] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._description: Attribute[str] = NotSet
self._files: Attribute[dict[str, GistFile]] = NotSet
self._forks: Attribute[list[github.Gist.Gist]] = NotSet
self._forks_url: Attribute[str] = NotSet
self._git_pull_url: Attribute[str] = NotSet
self._git_push_url: Attribute[str] = NotSet
self._history: Attribute[list[GistHistoryState]] = NotSet
self._html_url: Attribute[str] = NotSet
self._id: Attribute[str] = NotSet
self._owner: Attribute[github.NamedUser.NamedUser] = NotSet
self._public: Attribute[bool] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._url: Attribute[str] = NotSet
self._user: Attribute[github.NamedUser.NamedUser] = NotSet
self._version: Attribute[str] = NotSet
@property
def change_status(self) -> github.CommitStats.CommitStats:
self._completeIfNotSet(self._change_status)
return self._change_status.value
@property
def comments(self) -> int:
self._completeIfNotSet(self._comments)
return self._comments.value
@property
def comments_url(self) -> str:
self._completeIfNotSet(self._comments_url)
return self._comments_url.value
@property
def commits_url(self) -> str:
self._completeIfNotSet(self._commits_url)
return self._commits_url.value
@property
def committed_at(self) -> datetime:
self._completeIfNotSet(self._committed_at)
return self._committed_at.value
@property
def created_at(self) -> datetime:
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def description(self) -> str:
self._completeIfNotSet(self._description)
return self._description.value
@property
def files(self) -> dict[str, GistFile]:
self._completeIfNotSet(self._files)
return self._files.value
@property
def forks(self) -> list[github.Gist.Gist]:
self._completeIfNotSet(self._forks)
return self._forks.value
@property
def forks_url(self) -> str:
self._completeIfNotSet(self._forks_url)
return self._forks_url.value
@property
def git_pull_url(self) -> str:
self._completeIfNotSet(self._git_pull_url)
return self._git_pull_url.value
@property
def git_push_url(self) -> str:
self._completeIfNotSet(self._git_push_url)
return self._git_push_url.value
@property
def history(self) -> list[GistHistoryState]:
self._completeIfNotSet(self._history)
return self._history.value
@property
def html_url(self) -> str:
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self) -> str:
self._completeIfNotSet(self._id)
return self._id.value
@property
def owner(self) -> github.NamedUser.NamedUser:
self._completeIfNotSet(self._owner)
return self._owner.value
@property
def public(self) -> bool:
self._completeIfNotSet(self._public)
return self._public.value
@property
def updated_at(self) -> datetime:
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
@property
def user(self) -> github.NamedUser.NamedUser:
self._completeIfNotSet(self._user)
return self._user.value
@property
def version(self) -> str:
self._completeIfNotSet(self._version)
return self._version.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "change_status" in attributes: # pragma no branch
self._change_status = self._makeClassAttribute(github.CommitStats.CommitStats, attributes["change_status"])
if "comments" in attributes: # pragma no branch
self._comments = self._makeIntAttribute(attributes["comments"])
if "comments_url" in attributes: # pragma no branch
self._comments_url = self._makeStringAttribute(attributes["comments_url"])
if "commits_url" in attributes: # pragma no branch
self._commits_url = self._makeStringAttribute(attributes["commits_url"])
if "committed_at" in attributes: # pragma no branch
self._committed_at = self._makeDatetimeAttribute(attributes["committed_at"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "files" in attributes: # pragma no branch
self._files = self._makeDictOfStringsToClassesAttribute(github.GistFile.GistFile, attributes["files"])
if "forks" in attributes: # pragma no branch
self._forks = self._makeListOfClassesAttribute(github.Gist.Gist, attributes["forks"])
if "forks_url" in attributes: # pragma no branch
self._forks_url = self._makeStringAttribute(attributes["forks_url"])
if "git_pull_url" in attributes: # pragma no branch
self._git_pull_url = self._makeStringAttribute(attributes["git_pull_url"])
if "git_push_url" in attributes: # pragma no branch
self._git_push_url = self._makeStringAttribute(attributes["git_push_url"])
if "history" in attributes: # pragma no branch
self._history = self._makeListOfClassesAttribute(GistHistoryState, attributes["history"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeStringAttribute(attributes["id"])
if "owner" in attributes: # pragma no branch
self._owner = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["owner"])
if "public" in attributes: # pragma no branch
self._public = self._makeBoolAttribute(attributes["public"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
if "version" in attributes: # pragma no branch
self._version = self._makeStringAttribute(attributes["version"])
| 10,499
|
Python
|
.py
| 198
| 46.267677
| 119
| 0.606833
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,343
|
CheckRun.py
|
PyGithub_PyGithub/github/CheckRun.py
|
############################ Copyrights and license ############################
# #
# Copyright 2020 Dhruv Manilawala <dhruvmanila@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 majorvin <majorvin.tan@outlook.com> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import TYPE_CHECKING, Any
import github.CheckRunAnnotation
import github.CheckRunOutput
import github.GithubApp
import github.GithubObject
import github.PullRequest
from github.GithubObject import (
Attribute,
CompletableGithubObject,
NotSet,
Opt,
is_defined,
is_optional,
is_optional_list,
)
from github.PaginatedList import PaginatedList
if TYPE_CHECKING:
from github.CheckRunAnnotation import CheckRunAnnotation
from github.CheckRunOutput import CheckRunOutput
from github.GithubApp import GithubApp
from github.PullRequest import PullRequest
class CheckRun(CompletableGithubObject):
"""
This class represents check runs.
The reference can be found here
https://docs.github.com/en/rest/reference/checks#check-runs
"""
def _initAttributes(self) -> None:
self._app: Attribute[GithubApp] = NotSet
self._check_suite_id: Attribute[int] = NotSet
self._completed_at: Attribute[datetime | None] = NotSet
self._conclusion: Attribute[str] = NotSet
self._details_url: Attribute[str] = NotSet
self._external_id: Attribute[str] = NotSet
self._head_sha: Attribute[str] = NotSet
self._html_url: Attribute[str] = NotSet
self._id: Attribute[int] = NotSet
self._name: Attribute[str] = NotSet
self._node_id: Attribute[str] = NotSet
self._output: Attribute[github.CheckRunOutput.CheckRunOutput] = NotSet
self._pull_requests: Attribute[list[PullRequest]] = NotSet
self._started_at: Attribute[datetime] = NotSet
self._status: Attribute[str] = NotSet
self._url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"id": self._id.value, "conclusion": self._conclusion.value})
@property
def app(self) -> GithubApp:
self._completeIfNotSet(self._app)
return self._app.value
@property
def check_suite_id(self) -> int:
self._completeIfNotSet(self._check_suite_id)
return self._check_suite_id.value
@property
def completed_at(self) -> datetime | None:
self._completeIfNotSet(self._completed_at)
return self._completed_at.value
@property
def conclusion(self) -> str:
self._completeIfNotSet(self._conclusion)
return self._conclusion.value
@property
def details_url(self) -> str:
self._completeIfNotSet(self._details_url)
return self._details_url.value
@property
def external_id(self) -> str:
self._completeIfNotSet(self._external_id)
return self._external_id.value
@property
def head_sha(self) -> str:
self._completeIfNotSet(self._head_sha)
return self._head_sha.value
@property
def html_url(self) -> str:
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self) -> int:
self._completeIfNotSet(self._id)
return self._id.value
@property
def name(self) -> str:
self._completeIfNotSet(self._name)
return self._name.value
@property
def node_id(self) -> str:
self._completeIfNotSet(self._node_id)
return self._node_id.value
@property
def output(self) -> CheckRunOutput:
self._completeIfNotSet(self._output)
return self._output.value
@property
def pull_requests(self) -> list[PullRequest]:
self._completeIfNotSet(self._pull_requests)
return self._pull_requests.value
@property
def started_at(self) -> datetime:
self._completeIfNotSet(self._started_at)
return self._started_at.value
@property
def status(self) -> str:
self._completeIfNotSet(self._status)
return self._status.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
def get_annotations(self) -> PaginatedList[CheckRunAnnotation]:
"""
:calls: `GET /repos/{owner}/{repo}/check-runs/{check_run_id}/annotations <https://docs.github.com/en/rest/reference/checks#list-check-run-annotations>`_
"""
return PaginatedList(
github.CheckRunAnnotation.CheckRunAnnotation,
self._requester,
f"{self.url}/annotations",
None,
headers={"Accept": "application/vnd.github.v3+json"},
)
def edit(
self,
name: Opt[str] = NotSet,
head_sha: Opt[str] = NotSet,
details_url: Opt[str] = NotSet,
external_id: Opt[str] = NotSet,
status: Opt[str] = NotSet,
started_at: Opt[datetime] = NotSet,
conclusion: Opt[str] = NotSet,
completed_at: Opt[datetime] = NotSet,
output: Opt[dict] = NotSet,
actions: Opt[list[dict]] = NotSet,
) -> None:
"""
:calls: `PATCH /repos/{owner}/{repo}/check-runs/{check_run_id} <https://docs.github.com/en/rest/reference/checks#update-a-check-run>`_
"""
assert is_optional(name, str), name
assert is_optional(head_sha, str), head_sha
assert is_optional(details_url, str), details_url
assert is_optional(external_id, str), external_id
assert is_optional(status, str), status
assert is_optional(started_at, datetime), started_at
assert is_optional(conclusion, str), conclusion
assert is_optional(completed_at, datetime), completed_at
assert is_optional(output, dict), output
assert is_optional_list(actions, dict), actions
post_parameters: dict[str, Any] = {}
if is_defined(name):
post_parameters["name"] = name
if is_defined(head_sha):
post_parameters["head_sha"] = head_sha
if is_defined(details_url):
post_parameters["details_url"] = details_url
if is_defined(external_id):
post_parameters["external_id"] = external_id
if is_defined(status):
post_parameters["status"] = status
if is_defined(started_at):
post_parameters["started_at"] = started_at.strftime("%Y-%m-%dT%H:%M:%SZ")
if is_defined(completed_at):
post_parameters["completed_at"] = completed_at.strftime("%Y-%m-%dT%H:%M:%SZ")
if is_defined(conclusion):
post_parameters["conclusion"] = conclusion
if is_defined(output):
post_parameters["output"] = output
if is_defined(actions):
post_parameters["actions"] = actions
headers, data = self._requester.requestJsonAndCheck("PATCH", self.url, input=post_parameters)
self._useAttributes(data)
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "app" in attributes: # pragma no branch
self._app = self._makeClassAttribute(github.GithubApp.GithubApp, attributes["app"])
# This only gives us a dictionary with `id` attribute of `check_suite`
if "check_suite" in attributes and "id" in attributes["check_suite"]: # pragma no branch
self._check_suite_id = self._makeIntAttribute(attributes["check_suite"]["id"])
if "completed_at" in attributes: # pragma no branch
self._completed_at = self._makeDatetimeAttribute(attributes["completed_at"])
if "conclusion" in attributes: # pragma no branch
self._conclusion = self._makeStringAttribute(attributes["conclusion"])
if "details_url" in attributes: # pragma no branch
self._details_url = self._makeStringAttribute(attributes["details_url"])
if "external_id" in attributes: # pragma no branch
self._external_id = self._makeStringAttribute(attributes["external_id"])
if "head_sha" in attributes: # pragma no branch
self._head_sha = self._makeStringAttribute(attributes["head_sha"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "node_id" in attributes: # pragma no branch
self._node_id = self._makeStringAttribute(attributes["node_id"])
if "output" in attributes: # pragma no branch
self._output = self._makeClassAttribute(github.CheckRunOutput.CheckRunOutput, attributes["output"])
if "pull_requests" in attributes: # pragma no branch
self._pull_requests = self._makeListOfClassesAttribute(
github.PullRequest.PullRequest, attributes["pull_requests"]
)
if "started_at" in attributes: # pragma no branch
self._started_at = self._makeDatetimeAttribute(attributes["started_at"])
if "status" in attributes: # pragma no branch
self._status = self._makeStringAttribute(attributes["status"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| 11,581
|
Python
|
.py
| 237
| 41.489451
| 160
| 0.604614
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,344
|
Workflow.py
|
PyGithub_PyGithub/github/Workflow.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2017 Simon <spam@esemi.ru> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Mahesh Raju <coder@mahesh.net> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Thomas Burghout <thomas.burghout@nedap.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2023 sd-kialo <138505487+sd-kialo@users.noreply.github.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import Any
import github.Branch
import github.Commit
import github.GithubObject
import github.NamedUser
import github.Tag
import github.WorkflowRun
from github.GithubObject import Attribute, CompletableGithubObject, NotSet, Opt
from github.PaginatedList import PaginatedList
class Workflow(CompletableGithubObject):
"""
This class represents Workflows.
The reference can be found here
https://docs.github.com/en/rest/reference/actions#workflows
"""
def _initAttributes(self) -> None:
self._id: Attribute[int] = NotSet
self._name: Attribute[str] = NotSet
self._path: Attribute[str] = NotSet
self._state: Attribute[str] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._url: Attribute[str] = NotSet
self._html_url: Attribute[str] = NotSet
self._badge_url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"name": self._name.value, "url": self._url.value})
@property
def id(self) -> int:
self._completeIfNotSet(self._id)
return self._id.value
@property
def name(self) -> str:
self._completeIfNotSet(self._name)
return self._name.value
@property
def path(self) -> str:
self._completeIfNotSet(self._path)
return self._path.value
@property
def state(self) -> str:
self._completeIfNotSet(self._state)
return self._state.value
@property
def created_at(self) -> datetime:
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def updated_at(self) -> datetime:
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
@property
def html_url(self) -> str:
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def badge_url(self) -> str:
self._completeIfNotSet(self._badge_url)
return self._badge_url.value
def create_dispatch(
self, ref: github.Branch.Branch | github.Tag.Tag | github.Commit.Commit | str, inputs: Opt[dict] = NotSet
) -> bool:
"""
:calls: `POST /repos/{owner}/{repo}/actions/workflows/{workflow_id}/dispatches <https://docs.github.com/en/rest/reference/actions#create-a-workflow-dispatch-event>`_
"""
assert (
isinstance(ref, github.Branch.Branch)
or isinstance(ref, github.Tag.Tag)
or isinstance(ref, github.Commit.Commit)
or isinstance(ref, str)
), ref
assert inputs is NotSet or isinstance(inputs, dict), inputs
if isinstance(ref, github.Branch.Branch):
ref = ref.name
elif isinstance(ref, github.Commit.Commit):
ref = ref.sha
elif isinstance(ref, github.Tag.Tag):
ref = ref.name
if inputs is NotSet:
inputs = {}
status, _, _ = self._requester.requestJson(
"POST", f"{self.url}/dispatches", input={"ref": ref, "inputs": inputs}
)
return status == 204
def get_runs(
self,
actor: Opt[github.NamedUser.NamedUser | str] = NotSet,
branch: Opt[github.Branch.Branch | str] = NotSet,
event: Opt[str] = NotSet,
status: Opt[str] = NotSet,
created: Opt[str] = NotSet,
exclude_pull_requests: Opt[bool] = NotSet,
check_suite_id: Opt[int] = NotSet,
head_sha: Opt[str] = NotSet,
) -> PaginatedList[github.WorkflowRun.WorkflowRun]:
"""
:calls: `GET /repos/{owner}/{repo}/actions/workflows/{workflow_id}/runs <https://docs.github.com/en/rest/actions/workflow-runs?apiVersion=2022-11-28#list-workflow-runs-for-a-workflow>`_
"""
assert actor is NotSet or isinstance(actor, github.NamedUser.NamedUser) or isinstance(actor, str), actor
assert branch is NotSet or isinstance(branch, github.Branch.Branch) or isinstance(branch, str), branch
assert event is NotSet or isinstance(event, str), event
assert status is NotSet or isinstance(status, str), status
assert created is NotSet or isinstance(created, str), created
assert exclude_pull_requests is NotSet or isinstance(exclude_pull_requests, bool), exclude_pull_requests
assert check_suite_id is NotSet or isinstance(check_suite_id, int), check_suite_id
assert head_sha is NotSet or isinstance(head_sha, str), head_sha
url_parameters: dict[str, Any] = dict()
if actor is not NotSet:
url_parameters["actor"] = actor._identity if isinstance(actor, github.NamedUser.NamedUser) else actor
if branch is not NotSet:
url_parameters["branch"] = branch.name if isinstance(branch, github.Branch.Branch) else branch
if event is not NotSet:
url_parameters["event"] = event
if status is not NotSet:
url_parameters["status"] = status
if created is not NotSet:
url_parameters["created"] = created
if exclude_pull_requests is not NotSet:
url_parameters["exclude_pull_requests"] = exclude_pull_requests
if check_suite_id is not NotSet:
url_parameters["check_suite_id"] = check_suite_id
if head_sha is not NotSet:
url_parameters["head_sha"] = head_sha
return PaginatedList(
github.WorkflowRun.WorkflowRun,
self._requester,
f"{self.url}/runs",
url_parameters,
None,
list_item="workflow_runs",
)
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "path" in attributes: # pragma no branch
self._path = self._makeStringAttribute(attributes["path"])
if "state" in attributes: # pragma no branch
self._state = self._makeStringAttribute(attributes["state"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "badge_url" in attributes: # pragma no branch
self._badge_url = self._makeStringAttribute(attributes["badge_url"])
| 10,471
|
Python
|
.py
| 198
| 45.914141
| 193
| 0.58248
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,345
|
Auth.py
|
PyGithub_PyGithub/github/Auth.py
|
############################ Copyrights and license ############################
# #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2023 chantra <chantra@users.noreply.github.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2024 Jonathan Kliem <jonathan.kliem@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import abc
import base64
import time
from abc import ABC
from datetime import datetime, timedelta, timezone
from typing import TYPE_CHECKING, Dict, Optional, Union
import jwt
from requests import utils
from github import Consts
from github.InstallationAuthorization import InstallationAuthorization
from github.Requester import Requester, WithRequester
if TYPE_CHECKING:
from github.GithubIntegration import GithubIntegration
# For App authentication, time remaining before token expiration to request a new one
ACCESS_TOKEN_REFRESH_THRESHOLD_SECONDS = 20
TOKEN_REFRESH_THRESHOLD_TIMEDELTA = timedelta(seconds=ACCESS_TOKEN_REFRESH_THRESHOLD_SECONDS)
# add new implementations of github.Auth.Auth to docs/utilities.rst
class Auth(abc.ABC):
"""
This class is the base class of all authentication methods for Requester.
"""
@property
@abc.abstractmethod
def token_type(self) -> str:
"""
The type of the auth token as used in the HTTP Authorization header, e.g. Bearer or Basic.
:return: token type
"""
@property
@abc.abstractmethod
def token(self) -> str:
"""
The auth token as used in the HTTP Authorization header.
:return: token
"""
def authentication(self, headers: dict) -> None:
"""
Add authorization to the headers.
"""
headers["Authorization"] = f"{self.token_type} {self.token}"
def mask_authentication(self, headers: dict) -> None:
"""
Mask header, e.g. for logging.
"""
headers["Authorization"] = self._masked_token
@property
def _masked_token(self) -> str:
return "(unknown auth removed)"
class HTTPBasicAuth(Auth, abc.ABC):
@property
@abc.abstractmethod
def username(self) -> str:
"""
The username.
"""
@property
@abc.abstractmethod
def password(self) -> str:
"""
The password.
"""
@property
def token_type(self) -> str:
return "Basic"
@property
def token(self) -> str:
return base64.b64encode(f"{self.username}:{self.password}".encode()).decode("utf-8").replace("\n", "")
@property
def _masked_token(self) -> str:
return "Basic (login and password removed)"
class Login(HTTPBasicAuth):
"""
This class is used to authenticate with login and password.
"""
def __init__(self, login: str, password: str):
assert isinstance(login, str)
assert len(login) > 0
assert isinstance(password, str)
assert len(password) > 0
self._login = login
self._password = password
@property
def login(self) -> str:
return self._login
@property
def username(self) -> str:
return self.login
@property
def password(self) -> str:
return self._password
class Token(Auth):
"""
This class is used to authenticate with a single constant token.
"""
def __init__(self, token: str):
assert isinstance(token, str)
assert len(token) > 0
self._token = token
@property
def token_type(self) -> str:
return "token"
@property
def token(self) -> str:
return self._token
@property
def _masked_token(self) -> str:
return "token (oauth token removed)"
class JWT(Auth, ABC):
"""
This class is the base class to authenticate with a JSON Web Token (JWT).
https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/generating-a-json-web-token-jwt-for-a-github-app
"""
@property
def token_type(self) -> str:
return "Bearer"
class AppAuth(JWT):
"""
This class is used to authenticate as a GitHub App.
https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/authenticating-as-a-github-app
"""
def __init__(
self,
app_id: Union[int, str],
private_key: str,
jwt_expiry: int = Consts.DEFAULT_JWT_EXPIRY,
jwt_issued_at: int = Consts.DEFAULT_JWT_ISSUED_AT,
jwt_algorithm: str = Consts.DEFAULT_JWT_ALGORITHM,
):
assert isinstance(app_id, (int, str)), app_id
if isinstance(app_id, str):
assert len(app_id) > 0, "app_id must not be empty"
assert isinstance(private_key, str)
assert len(private_key) > 0, "private_key must not be empty"
assert isinstance(jwt_expiry, int), jwt_expiry
assert Consts.MIN_JWT_EXPIRY <= jwt_expiry <= Consts.MAX_JWT_EXPIRY, jwt_expiry
self._app_id = app_id
self._private_key = private_key
self._jwt_expiry = jwt_expiry
self._jwt_issued_at = jwt_issued_at
self._jwt_algorithm = jwt_algorithm
@property
def app_id(self) -> Union[int, str]:
return self._app_id
@property
def private_key(self) -> str:
return self._private_key
@property
def token(self) -> str:
return self.create_jwt()
def get_installation_auth(
self,
installation_id: int,
token_permissions: Optional[Dict[str, str]] = None,
requester: Optional[Requester] = None,
) -> "AppInstallationAuth":
"""
Creates a github.Auth.AppInstallationAuth instance for an installation.
:param installation_id: installation id
:param token_permissions: optional permissions
:param requester: optional requester with app authentication
:return:
"""
return AppInstallationAuth(self, installation_id, token_permissions, requester)
def create_jwt(self, expiration: Optional[int] = None) -> str:
"""
Create a signed JWT
https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#authenticating-as-a-github-app
:return string: jwt
"""
if expiration is not None:
assert isinstance(expiration, int), expiration
assert Consts.MIN_JWT_EXPIRY <= expiration <= Consts.MAX_JWT_EXPIRY, expiration
now = int(time.time())
payload = {
"iat": now + self._jwt_issued_at,
"exp": now + (expiration if expiration is not None else self._jwt_expiry),
"iss": self._app_id,
}
encrypted = jwt.encode(payload, key=self.private_key, algorithm=self._jwt_algorithm)
if isinstance(encrypted, bytes):
return encrypted.decode("utf-8")
return encrypted
class AppAuthToken(JWT):
"""
This class is used to authenticate as a GitHub App with a single constant JWT.
https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/authenticating-as-a-github-app
"""
def __init__(self, token: str):
assert isinstance(token, str)
assert len(token) > 0
self._token = token
@property
def token(self) -> str:
return self._token
class AppInstallationAuth(Auth, WithRequester["AppInstallationAuth"]):
"""
This class is used to authenticate as a GitHub App Installation.
https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/authenticating-as-a-github-app-installation
"""
# used to fetch live access token when calling self.token
__integration: Optional["GithubIntegration"] = None
__installation_authorization: Optional[InstallationAuthorization] = None
def __init__(
self,
app_auth: AppAuth,
installation_id: int,
token_permissions: Optional[Dict[str, str]] = None,
requester: Optional[Requester] = None,
):
super().__init__()
assert isinstance(app_auth, AppAuth), app_auth
assert isinstance(installation_id, int), installation_id
assert token_permissions is None or isinstance(token_permissions, dict), token_permissions
assert requester is None or isinstance(requester, Requester), requester
self._app_auth = app_auth
self._installation_id = installation_id
self._token_permissions = token_permissions
if requester is not None:
self.withRequester(requester)
def withRequester(self, requester: Requester) -> "AppInstallationAuth":
assert isinstance(requester, Requester), requester
super().withRequester(requester.withAuth(self._app_auth))
# imported here to avoid circular import
from github.GithubIntegration import GithubIntegration
self.__integration = GithubIntegration(**self.requester.kwargs)
return self
@property
def app_id(self) -> Union[int, str]:
return self._app_auth.app_id
@property
def private_key(self) -> str:
return self._app_auth.private_key
@property
def installation_id(self) -> int:
return self._installation_id
@property
def token_permissions(self) -> Optional[Dict[str, str]]:
return self._token_permissions
@property
def token_type(self) -> str:
return "token"
@property
def token(self) -> str:
if self.__installation_authorization is None or self._is_expired:
self.__installation_authorization = self._get_installation_authorization()
return self.__installation_authorization.token
@property
def _is_expired(self) -> bool:
assert self.__installation_authorization is not None
token_expires_at = self.__installation_authorization.expires_at - TOKEN_REFRESH_THRESHOLD_TIMEDELTA
return token_expires_at < datetime.now(timezone.utc)
def _get_installation_authorization(self) -> InstallationAuthorization:
assert self.__integration is not None, "Method withRequester(Requester) must be called first"
return self.__integration.get_access_token(
self._installation_id,
permissions=self._token_permissions,
)
@property
def _masked_token(self) -> str:
return "token (oauth token removed)"
class AppUserAuth(Auth, WithRequester["AppUserAuth"]):
"""
This class is used to authenticate as a GitHub App on behalf of a user.
https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/authenticating-with-a-github-app-on-behalf-of-a-user
"""
_client_id: str
_client_secret: str
_token: str
_type: str
_scope: Optional[str]
_expires_at: Optional[datetime]
_refresh_token: Optional[str]
_refresh_expires_at: Optional[datetime]
# imported here to avoid circular import
from github.ApplicationOAuth import ApplicationOAuth
__app: ApplicationOAuth
def __init__(
self,
client_id: str,
client_secret: str,
token: str,
token_type: Optional[str] = None,
expires_at: Optional[datetime] = None,
refresh_token: Optional[str] = None,
refresh_expires_at: Optional[datetime] = None,
requester: Optional[Requester] = None,
) -> None:
super().__init__()
assert isinstance(client_id, str) and len(client_id) > 0
assert isinstance(client_secret, str) and len(client_secret) > 0
assert isinstance(token, str) and len(token) > 0
assert token_type is None or isinstance(token_type, str) and len(token_type) > 0, token_type
assert expires_at is None or isinstance(expires_at, datetime), expires_at
assert refresh_token is None or isinstance(refresh_token, str) and len(refresh_token) > 0
assert refresh_expires_at is None or isinstance(refresh_expires_at, datetime), refresh_expires_at
assert requester is None or isinstance(requester, Requester), requester
self._client_id = client_id
self._client_secret = client_secret
self._token = token
self._type = token_type or "bearer"
self._expires_at = expires_at
self._refresh_token = refresh_token
self._refresh_expires_at = refresh_expires_at
if requester is not None:
self.withRequester(requester)
@property
def token_type(self) -> str:
return self._type
@property
def token(self) -> str:
if self._is_expired:
self._refresh()
return self._token
def withRequester(self, requester: Requester) -> "AppUserAuth":
assert isinstance(requester, Requester), requester
super().withRequester(requester.withAuth(None))
# imported here to avoid circular import
from github.ApplicationOAuth import ApplicationOAuth
self.__app = ApplicationOAuth(
# take requester given to super().withRequester, not given to this method
super().requester,
headers={},
attributes={
"client_id": self._client_id,
"client_secret": self._client_secret,
},
completed=False,
)
return self
@property
def _is_expired(self) -> bool:
return self._expires_at is not None and self._expires_at < datetime.now(timezone.utc)
def _refresh(self) -> None:
if self._refresh_token is None:
raise RuntimeError("Cannot refresh expired token because no refresh token has been provided")
if self._refresh_expires_at is not None and self._refresh_expires_at < datetime.now(timezone.utc):
raise RuntimeError("Cannot refresh expired token because refresh token also expired")
# refresh token
token = self.__app.refresh_access_token(self._refresh_token)
# update this auth
self._token = token.token
self._type = token.type
self._scope = token.scope
self._expires_at = token.expires_at
self._refresh_token = token.refresh_token
self._refresh_expires_at = token.refresh_expires_at
@property
def expires_at(self) -> Optional[datetime]:
return self._expires_at
@property
def refresh_token(self) -> Optional[str]:
return self._refresh_token
@property
def refresh_expires_at(self) -> Optional[datetime]:
return self._refresh_expires_at
@property
def _masked_token(self) -> str:
return "Bearer (jwt removed)"
class NetrcAuth(HTTPBasicAuth, WithRequester["NetrcAuth"]):
"""
This class is used to authenticate via .netrc.
"""
def __init__(self) -> None:
super().__init__()
self._login: Optional[str] = None
self._password: Optional[str] = None
@property
def username(self) -> str:
return self.login
@property
def login(self) -> str:
assert self._login is not None, "Method withRequester(Requester) must be called first"
return self._login
@property
def password(self) -> str:
assert self._password is not None, "Method withRequester(Requester) must be called first"
return self._password
def withRequester(self, requester: Requester) -> "NetrcAuth":
assert isinstance(requester, Requester), requester
super().withRequester(requester)
auth = utils.get_netrc_auth(requester.base_url, raise_errors=True)
if auth is None:
raise RuntimeError(f"Could not get credentials from netrc for host {requester.hostname}")
self._login, self._password = auth
return self
| 17,581
|
Python
|
.py
| 412
| 35.565534
| 142
| 0.628189
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,346
|
StatsParticipation.py
|
PyGithub_PyGithub/github/StatsParticipation.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Adam Baratz <adam.baratz@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import Any
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class StatsParticipation(NonCompletableGithubObject):
"""
This class represents StatsParticipations.
The reference can be found here
https://docs.github.com/en/rest/reference/repos#get-the-weekly-commit-count
"""
def _initAttributes(self) -> None:
self._all: Attribute[list[int]] = NotSet
self._owner: Attribute[list[int]] = NotSet
@property
def all(self) -> list[int]:
return self._all.value
@property
def owner(self) -> list[int]:
return self._owner.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "all" in attributes: # pragma no branch
self._all = self._makeListOfIntsAttribute(attributes["all"])
if "owner" in attributes: # pragma no branch
self._owner = self._makeListOfIntsAttribute(attributes["owner"])
| 4,077
|
Python
|
.py
| 61
| 63.754098
| 80
| 0.50412
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,347
|
Migration.py
|
PyGithub_PyGithub/github/Migration.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Michael Stead <michael.stead@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Nicolas Agustín Torres <nicolastrres@gmail.com> #
# Copyright 2018 Shubham Singh <41840111+singh811@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 per1234 <accounts@perglass.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Mark Amery <markamery@btinternet.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
import urllib.parse
from datetime import datetime
from typing import Any
import github.GithubObject
import github.NamedUser
import github.PaginatedList
import github.Repository
from github import Consts
from github.GithubObject import Attribute, CompletableGithubObject, NotSet
class Migration(CompletableGithubObject):
"""
This class represents Migrations.
The reference can be found here
https://docs.github.com/en/rest/reference/migrations
"""
def _initAttributes(self) -> None:
self._id: Attribute[int] = NotSet
self._owner: Attribute[github.NamedUser.NamedUser] = NotSet
self._guid: Attribute[str] = NotSet
self._state: Attribute[str] = NotSet
self._lock_repositories: Attribute[bool] = NotSet
self._exclude_attachments: Attribute[bool] = NotSet
self._repositories: Attribute[list[github.Repository.Repository]] = NotSet
self._url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"state": self._state.value, "url": self._url.value})
@property
def id(self) -> int:
return self._id.value
@property
def owner(self) -> github.NamedUser.NamedUser:
self._completeIfNotSet(self._owner)
return self._owner.value
@property
def guid(self) -> str:
self._completeIfNotSet(self._guid)
return self._guid.value
@property
def state(self) -> str:
self._completeIfNotSet(self._guid)
return self._state.value
@property
def lock_repositories(self) -> bool:
self._completeIfNotSet(self._repositories)
return self._lock_repositories.value
@property
def exclude_attachments(self) -> bool:
self._completeIfNotSet(self._exclude_attachments)
return self._exclude_attachments.value
@property
def repositories(self) -> list[github.Repository.Repository]:
self._completeIfNotSet(self._repositories)
return self._repositories.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
@property
def created_at(self) -> datetime:
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def updated_at(self) -> datetime:
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
def get_status(self) -> str:
"""
:calls: `GET /user/migrations/{migration_id} <https://docs.github.com/en/rest/reference/migrations>`_
"""
headers, data = self._requester.requestJsonAndCheck(
"GET", self.url, headers={"Accept": Consts.mediaTypeMigrationPreview}
)
self._useAttributes(data)
return self.state
def get_archive_url(self) -> str:
"""
:calls: `GET /user/migrations/{migration_id}/archive <https://docs.github.com/en/rest/reference/migrations>`_
"""
headers, data = self._requester.requestJsonAndCheck(
"GET",
f"{self.url}/archive",
headers={"Accept": Consts.mediaTypeMigrationPreview},
)
return data["data"]
def delete(self) -> None:
"""
:calls: `DELETE /user/migrations/{migration_id}/archive <https://docs.github.com/en/rest/reference/migrations>`_
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
f"{self.url}/archive",
headers={"Accept": Consts.mediaTypeMigrationPreview},
)
def unlock_repo(self, repo_name: str) -> None:
"""
:calls: `DELETE /user/migrations/{migration_id}/repos/{repo_name}/lock <https://docs.github.com/en/rest/reference/migrations>`_
"""
assert isinstance(repo_name, str), repo_name
repo_name = urllib.parse.quote(repo_name)
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
f"{self.url}/repos/{repo_name}/lock",
headers={"Accept": Consts.mediaTypeMigrationPreview},
)
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "id" in attributes:
self._id = self._makeIntAttribute(attributes["id"])
if "owner" in attributes:
self._owner = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["owner"])
if "guid" in attributes:
self._guid = self._makeStringAttribute(attributes["guid"])
if "state" in attributes:
self._state = self._makeStringAttribute(attributes["state"])
if "lock_repositories" in attributes:
self._lock_repositories = self._makeBoolAttribute(attributes["lock_repositories"])
if "exclude_attachments" in attributes:
self._exclude_attachments = self._makeBoolAttribute(attributes["exclude_attachments"])
if "repositories" in attributes:
self._repositories = self._makeListOfClassesAttribute(
github.Repository.Repository, attributes["repositories"]
)
if "url" in attributes:
self._url = self._makeStringAttribute(attributes["url"])
if "created_at" in attributes:
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "updated_at" in attributes:
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
| 9,019
|
Python
|
.py
| 172
| 46.063953
| 135
| 0.579168
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,348
|
WorkflowRun.py
|
PyGithub_PyGithub/github/WorkflowRun.py
|
############################ Copyrights and license ############################
# #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2020 Yannick Jadoul <yannick.jadoul@belgacom.net> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2022 Aleksei Fedotov <lexa@cfotr.com> #
# Copyright 2022 Gabriele Oliaro <ict@gabrieleoliaro.it> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jeppe Fihl-Pearson <tenzer@tenzer.dk> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Sasha Chung <50770626+nuang-ee@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Chris Gavin <chris@chrisgavin.me> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import TYPE_CHECKING, Any, NamedTuple
import github.GitCommit
import github.PullRequest
import github.WorkflowJob
from github.GithubObject import Attribute, CompletableGithubObject, NotSet, Opt, is_optional
from github.PaginatedList import PaginatedList
if TYPE_CHECKING:
from github.Artifact import Artifact
from github.GitCommit import GitCommit
from github.PullRequest import PullRequest
from github.Repository import Repository
from github.WorkflowJob import WorkflowJob
class TimingData(NamedTuple):
billable: dict[str, dict[str, int]]
run_duration_ms: int
class WorkflowRun(CompletableGithubObject):
"""
This class represents Workflow Runs.
The reference can be found here
https://docs.github.com/en/rest/reference/actions#workflow-runs
"""
def _initAttributes(self) -> None:
self._id: Attribute[int] = NotSet
self._url: Attribute[str] = NotSet
self._name: Attribute[str] = NotSet
self._path: Attribute[str] = NotSet
self._head_branch: Attribute[str] = NotSet
self._head_sha: Attribute[str] = NotSet
self._run_attempt: Attribute[int] = NotSet
self._run_number: Attribute[int] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._pull_requests: Attribute[list[PullRequest]] = NotSet
self._status: Attribute[str] = NotSet
self._conclusion: Attribute[str] = NotSet
self._html_url: Attribute[str] = NotSet
self._jobs_url: Attribute[str] = NotSet
self._logs_url: Attribute[str] = NotSet
self._display_title: Attribute[str] = NotSet
self._event: Attribute[str] = NotSet
self._run_started_at: Attribute[datetime] = NotSet
self._check_suite_url: Attribute[str] = NotSet
self._cancel_url: Attribute[str] = NotSet
self._rerun_url: Attribute[str] = NotSet
self._artifacts_url: Attribute[str] = NotSet
self._workflow_url: Attribute[str] = NotSet
self._head_commit: Attribute[GitCommit] = NotSet
self._repository: Attribute[Repository] = NotSet
self._head_repository: Attribute[Repository] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"id": self._id.value, "url": self._url.value})
@property
def id(self) -> int:
self._completeIfNotSet(self._id)
return self._id.value
@property
def name(self) -> str:
self._completeIfNotSet(self._name)
return self._name.value
@property
def head_branch(self) -> str:
self._completeIfNotSet(self._head_branch)
return self._head_branch.value
@property
def head_sha(self) -> str:
self._completeIfNotSet(self._head_sha)
return self._head_sha.value
@property
def display_title(self) -> str:
self._completeIfNotSet(self._display_title)
return self._display_title.value
@property
def path(self) -> str:
self._completeIfNotSet(self._path)
return self._path.value
@property
def run_attempt(self) -> int:
self._completeIfNotSet(self._run_attempt)
return self._run_attempt.value
@property
def run_number(self) -> int:
self._completeIfNotSet(self._run_number)
return self._run_number.value
@property
def event(self) -> str:
self._completeIfNotSet(self._event)
return self._event.value
@property
def run_started_at(self) -> datetime:
self._completeIfNotSet(self._run_started_at)
return self._run_started_at.value
@property
def status(self) -> str:
self._completeIfNotSet(self._status)
return self._status.value
@property
def conclusion(self) -> str:
self._completeIfNotSet(self._conclusion)
return self._conclusion.value
@property
def workflow_id(self) -> int:
self._completeIfNotSet(self._workflow_id)
return self._workflow_id.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
@property
def html_url(self) -> str:
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def pull_requests(self) -> list[PullRequest]:
self._completeIfNotSet(self._pull_requests)
return self._pull_requests.value
@property
def created_at(self) -> datetime:
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def updated_at(self) -> datetime:
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def jobs_url(self) -> str:
self._completeIfNotSet(self._jobs_url)
return self._jobs_url.value
@property
def logs_url(self) -> str:
self._completeIfNotSet(self._logs_url)
return self._logs_url.value
@property
def check_suite_url(self) -> str:
self._completeIfNotSet(self._check_suite_url)
return self._check_suite_url.value
@property
def artifacts_url(self) -> str:
self._completeIfNotSet(self._artifacts_url)
return self._artifacts_url.value
def get_artifacts(self) -> PaginatedList[Artifact]:
return PaginatedList(
github.Artifact.Artifact,
self._requester,
self._artifacts_url.value,
None,
list_item="artifacts",
)
@property
def cancel_url(self) -> str:
self._completeIfNotSet(self._cancel_url)
return self._cancel_url.value
@property
def rerun_url(self) -> str:
self._completeIfNotSet(self._rerun_url)
return self._rerun_url.value
@property
def workflow_url(self) -> str:
self._completeIfNotSet(self._workflow_url)
return self._workflow_url.value
@property
def head_commit(self) -> GitCommit:
self._completeIfNotSet(self._head_commit)
return self._head_commit.value
@property
def repository(self) -> Repository:
self._completeIfNotSet(self._repository)
return self._repository.value
@property
def head_repository(self) -> Repository:
self._completeIfNotSet(self._head_repository)
return self._head_repository.value
def cancel(self) -> bool:
"""
:calls: `POST /repos/{owner}/{repo}/actions/runs/{run_id}/cancel <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_
"""
status, _, _ = self._requester.requestJson("POST", self.cancel_url)
return status == 202
def rerun(self) -> bool:
"""
:calls: `POST /repos/{owner}/{repo}/actions/runs/{run_id}/rerun <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_
"""
status, _, _ = self._requester.requestJson("POST", self.rerun_url)
return status == 201
def rerun_failed_jobs(self) -> bool:
"""
:calls: `POST /repos/{owner}/{repo}/actions/runs/{run_id}/rerun-failed-jobs <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_
"""
status, _, _ = self._requester.requestJson("POST", f"{self.url}/rerun-failed-jobs")
return status == 201
def timing(self) -> TimingData:
"""
:calls: `GET /repos/{owner}/{repo}/actions/runs/{run_id}/timing <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_
"""
headers, data = self._requester.requestJsonAndCheck("GET", f"{self.url}/timing")
return TimingData(billable=data["billable"], run_duration_ms=data["run_duration_ms"]) # type: ignore
def delete(self) -> bool:
"""
:calls: `DELETE /repos/{owner}/{repo}/actions/runs/{run_id} <https://docs.github.com/en/rest/reference/actions#workflow-runs>`_
"""
status, _, _ = self._requester.requestJson("DELETE", self.url)
return status == 204
def jobs(self, _filter: Opt[str] = NotSet) -> PaginatedList[WorkflowJob]:
"""
:calls "`GET /repos/{owner}/{repo}/actions/runs/{run_id}/jobs <https://docs.github.com/en/rest/reference/actions#list-jobs-for-a-workflow-run>`_
:param _filter: string `latest`, or `all`
"""
assert is_optional(_filter, str), _filter
url_parameters = NotSet.remove_unset_items({"filter": _filter})
return PaginatedList(
github.WorkflowJob.WorkflowJob,
self._requester,
self.jobs_url,
url_parameters,
list_item="jobs",
)
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "head_branch" in attributes: # pragma no branch
self._head_branch = self._makeStringAttribute(attributes["head_branch"])
if "head_sha" in attributes: # pragma no branch
self._head_sha = self._makeStringAttribute(attributes["head_sha"])
if "display_title" in attributes: # pragma no branch
self._display_title = self._makeStringAttribute(attributes["display_title"])
if "path" in attributes: # pragma no branch
self._path = self._makeStringAttribute(attributes["path"])
if "run_attempt" in attributes: # pragma no branch
self._run_attempt = self._makeIntAttribute(attributes["run_attempt"])
if "run_number" in attributes: # pragma no branch
self._run_number = self._makeIntAttribute(attributes["run_number"])
if "event" in attributes: # pragma no branch
self._event = self._makeStringAttribute(attributes["event"])
if "run_started_at" in attributes: # pragma no branch
assert attributes["run_started_at"] is None or isinstance(attributes["run_started_at"], str), attributes[
"run_started_at"
]
self._run_started_at = self._makeDatetimeAttribute(attributes["run_started_at"])
if "status" in attributes: # pragma no branch
self._status = self._makeStringAttribute(attributes["status"])
if "conclusion" in attributes: # pragma no branch
self._conclusion = self._makeStringAttribute(attributes["conclusion"])
if "workflow_id" in attributes: # pragma no branch
self._workflow_id = self._makeIntAttribute(attributes["workflow_id"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "pull_requests" in attributes: # pragma no branch
self._pull_requests = self._makeListOfClassesAttribute(
github.PullRequest.PullRequest, attributes["pull_requests"]
)
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "jobs_url" in attributes: # pragma no branch
self._jobs_url = self._makeStringAttribute(attributes["jobs_url"])
if "logs_url" in attributes: # pragma no branch
self._logs_url = self._makeStringAttribute(attributes["logs_url"])
if "check_suite_url" in attributes: # pragma no branch
self._check_suite_url = self._makeStringAttribute(attributes["check_suite_url"])
if "artifacts_url" in attributes: # pragma no branch
self._artifacts_url = self._makeStringAttribute(attributes["artifacts_url"])
if "cancel_url" in attributes: # pragma no branch
self._cancel_url = self._makeStringAttribute(attributes["cancel_url"])
if "rerun_url" in attributes: # pragma no branch
self._rerun_url = self._makeStringAttribute(attributes["rerun_url"])
if "workflow_url" in attributes: # pragma no branch
self._workflow_url = self._makeStringAttribute(attributes["workflow_url"])
if "head_commit" in attributes: # pragma no branch
self._head_commit = self._makeClassAttribute(github.GitCommit.GitCommit, attributes["head_commit"])
if "repository" in attributes: # pragma no branch
self._repository = self._makeClassAttribute(github.Repository.Repository, attributes["repository"])
if "head_repository" in attributes: # pragma no branch
self._head_repository = self._makeClassAttribute(
github.Repository.Repository, attributes["head_repository"]
)
| 15,721
|
Python
|
.py
| 314
| 42.410828
| 152
| 0.615029
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,349
|
StatsCommitActivity.py
|
PyGithub_PyGithub/github/StatsCommitActivity.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from datetime import datetime
from typing import Any, Dict
import github.GithubObject
from github.GithubObject import Attribute
class StatsCommitActivity(github.GithubObject.NonCompletableGithubObject):
"""
This class represents StatsCommitActivities.
The reference can be found here
https://docs.github.com/en/rest/reference/repos#get-the-last-year-of-commit-activity
"""
def _initAttributes(self) -> None:
self._week: Attribute[datetime] = github.GithubObject.NotSet
self._total: Attribute[int] = github.GithubObject.NotSet
self._days: Attribute[int] = github.GithubObject.NotSet
@property
def week(self) -> datetime:
return self._week.value
@property
def total(self) -> int:
return self._total.value
@property
def days(self) -> int:
return self._days.value
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "week" in attributes: # pragma no branch
self._week = self._makeTimestampAttribute(attributes["week"])
if "total" in attributes: # pragma no branch
self._total = self._makeIntAttribute(attributes["total"])
if "days" in attributes: # pragma no branch
self._days = self._makeListOfIntsAttribute(attributes["days"])
| 4,309
|
Python
|
.py
| 67
| 60.761194
| 88
| 0.51879
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,350
|
ProjectColumn.py
|
PyGithub_PyGithub/github/ProjectColumn.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2015 Matt Babineau <mbabineau@dataxu.com> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Martijn Koster <mak-github@greenhills.co.uk> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 Yossarian King <yggy@blackbirdinteractive.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Benoit Latinier <benoit@latinier.fr> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Vincent <github@fleeto.us> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Alice GIRARD <bouhahah@gmail.com> #
# Copyright 2020 Florent Clarret <florent.clarret@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Dhruv Bhanushali <dhruv_b@live.com> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import Any
import github.GithubObject
import github.Project
import github.ProjectCard
from github.GithubObject import Attribute, CompletableGithubObject, NotSet, Opt
from github.PaginatedList import PaginatedList
from . import Consts
class ProjectColumn(CompletableGithubObject):
"""
This class represents Project Columns.
The reference can be found here
https://docs.github.com/en/rest/reference/projects#columns
"""
def _initAttributes(self) -> None:
self._cards_url: Attribute[str] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._id: Attribute[int] = NotSet
self._name: Attribute[str] = NotSet
self._node_id: Attribute[str] = NotSet
self._project_url: Attribute[str] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"name": self._name.value})
@property
def cards_url(self) -> str:
return self._cards_url.value
@property
def created_at(self) -> datetime:
return self._created_at.value
@property
def id(self) -> int:
return self._id.value
@property
def name(self) -> str:
return self._name.value
@property
def node_id(self) -> str:
return self._node_id.value
@property
def project_url(self) -> str:
return self._project_url.value
@property
def updated_at(self) -> datetime:
return self._updated_at.value
@property
def url(self) -> str:
return self._url.value
def get_cards(self, archived_state: Opt[str] = NotSet) -> PaginatedList[github.ProjectCard.ProjectCard]:
"""
:calls: `GET /projects/columns/{column_id}/cards <https://docs.github.com/en/rest/reference/projects#list-project-cards>`_
"""
assert archived_state is NotSet or isinstance(archived_state, str), archived_state
url_parameters = dict()
if archived_state is not NotSet:
url_parameters["archived_state"] = archived_state
return PaginatedList(
github.ProjectCard.ProjectCard,
self._requester,
f"{self.url}/cards",
url_parameters,
{"Accept": Consts.mediaTypeProjectsPreview},
)
def create_card(
self,
note: Opt[str] = NotSet,
content_id: Opt[int] = NotSet,
content_type: Opt[str] = NotSet,
) -> github.ProjectCard.ProjectCard:
"""
:calls: `POST /projects/columns/{column_id}/cards <https://docs.github.com/en/rest/reference/projects#create-a-project-card>`_
"""
if isinstance(note, str):
assert content_id is NotSet, content_id
assert content_type is NotSet, content_type
post_parameters: dict[str, Any] = {"note": note}
else:
assert note is NotSet, note
assert isinstance(content_id, int), content_id
assert isinstance(content_type, str), content_type
post_parameters = {"content_id": content_id, "content_type": content_type}
import_header = {"Accept": Consts.mediaTypeProjectsPreview}
headers, data = self._requester.requestJsonAndCheck(
"POST", f"{self.url}/cards", headers=import_header, input=post_parameters
)
return github.ProjectCard.ProjectCard(self._requester, headers, data, completed=True)
def move(self, position: str) -> bool:
"""
:calls: `POST POST /projects/columns/{column_id}/moves <https://docs.github.com/en/rest/reference/projects#move-a-project-column>`_
"""
assert isinstance(position, str), position
post_parameters = {"position": position}
status, _, _ = self._requester.requestJson(
"POST",
f"{self.url}/moves",
input=post_parameters,
headers={"Accept": Consts.mediaTypeProjectsPreview},
)
return status == 201
def delete(self) -> bool:
"""
:calls: `DELETE /projects/columns/{column_id} <https://docs.github.com/en/rest/reference/projects#delete-a-project-column>`_
"""
status, _, _ = self._requester.requestJson(
"DELETE",
self.url,
headers={"Accept": Consts.mediaTypeProjectsPreview},
)
return status == 204
def edit(self, name: str) -> None:
"""
:calls: `PATCH /projects/columns/{column_id} <https://docs.github.com/en/rest/reference/projects#update-an-existing-project-column>`_
"""
assert isinstance(name, str), name
patch_parameters = {"name": name}
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
input=patch_parameters,
headers={"Accept": Consts.mediaTypeProjectsPreview},
)
self._useAttributes(data)
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "cards_url" in attributes: # pragma no branch
self._cards_url = self._makeStringAttribute(attributes["cards_url"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "node_id" in attributes: # pragma no branch
self._node_id = self._makeStringAttribute(attributes["node_id"])
if "project_url" in attributes: # pragma no branch
self._project_url = self._makeStringAttribute(attributes["project_url"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| 9,971
|
Python
|
.py
| 188
| 46.244681
| 141
| 0.569407
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,351
|
RequiredStatusChecks.py
|
PyGithub_PyGithub/github/RequiredStatusChecks.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import Any
from github.GithubObject import Attribute, CompletableGithubObject, NotSet
class RequiredStatusChecks(CompletableGithubObject):
"""
This class represents Required Status Checks.
The reference can be found here
https://docs.github.com/en/rest/reference/repos#get-status-checks-protection
"""
def _initAttributes(self) -> None:
self._strict: Attribute[bool] = NotSet
self._contexts: Attribute[list[str]] = NotSet
self._url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"strict": self._strict.value, "url": self._url.value})
@property
def strict(self) -> bool:
self._completeIfNotSet(self._strict)
return self._strict.value
@property
def contexts(self) -> list[str]:
self._completeIfNotSet(self._contexts)
return self._contexts.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "strict" in attributes: # pragma no branch
self._strict = self._makeBoolAttribute(attributes["strict"])
if "contexts" in attributes: # pragma no branch
self._contexts = self._makeListOfStringsAttribute(attributes["contexts"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| 4,671
|
Python
|
.py
| 73
| 60.123288
| 87
| 0.518866
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,352
|
Artifact.py
|
PyGithub_PyGithub/github/Artifact.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2015 Matt Babineau <mbabineau@dataxu.com> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Martijn Koster <mak-github@greenhills.co.uk> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Colby Gallup <colbygallup@gmail.com> #
# Copyright 2020 Mahesh Raju <coder@mahesh.net> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2022 Aleksei Fedotov <lexa@cfotr.com> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import TYPE_CHECKING, Any
import github.WorkflowRun
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
if TYPE_CHECKING:
from github.WorkflowRun import WorkflowRun
class Artifact(NonCompletableGithubObject):
"""
This class represents an Artifact of Github Run.
"""
def _initAttributes(self) -> None:
self._archive_download_url: Attribute[str] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._expired: Attribute[bool] = NotSet
self._expires_at: Attribute[datetime] = NotSet
self._head_sha: Attribute[str] = NotSet
self._id: Attribute[int] = NotSet
self._name: Attribute[str] = NotSet
self._node_id: Attribute[str] = NotSet
self._size_in_bytes: Attribute[int] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._url: Attribute[str] = NotSet
self._workflow_run: Attribute[WorkflowRun] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"name": self._name.value, "id": self._id.value})
@property
def archive_download_url(self) -> str:
return self._archive_download_url.value
@property
def created_at(self) -> datetime:
return self._created_at.value
@property
def expired(self) -> bool:
return self._expired.value
@property
def expires_at(self) -> datetime:
return self._expires_at.value
@property
def head_sha(self) -> str:
return self._head_sha.value
@property
def id(self) -> int:
return self._id.value
@property
def name(self) -> str:
return self._name.value
@property
def node_id(self) -> str:
return self._node_id.value
@property
def size_in_bytes(self) -> int:
return self._size_in_bytes.value
@property
def updated_at(self) -> datetime:
return self._updated_at.value
@property
def url(self) -> str:
return self._url.value
@property
def workflow_run(self) -> WorkflowRun:
return self._workflow_run.value
def delete(self) -> bool:
"""
:calls: `DELETE /repos/{owner}/{repo}/actions/artifacts/{artifact_id} <https://docs.github.com/en/rest/actions/artifacts#delete-an-artifact>`_
"""
status, headers, data = self._requester.requestBlob("DELETE", self.url)
return status == 204
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "archive_download_url" in attributes: # pragma no branch
self._archive_download_url = self._makeStringAttribute(attributes["archive_download_url"])
if "created_at" in attributes: # pragma no branch
assert attributes["created_at"] is None or isinstance(attributes["created_at"], (str,)), attributes[
"created_at"
]
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "expired" in attributes: # pragma no branch
self._expired = self._makeBoolAttribute(attributes["expired"])
if "expires_at" in attributes: # pragma no branch
assert attributes["expires_at"] is None or isinstance(attributes["expires_at"], (str,)), attributes[
"expires_at"
]
self._expires_at = self._makeDatetimeAttribute(attributes["expires_at"])
if "head_sha" in attributes: # pragma no branch
self._head_sha = self._makeStringAttribute(attributes["head_sha"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "node_id" in attributes: # pragma no branch
self._node_id = self._makeStringAttribute(attributes["node_id"])
if "size_in_bytes" in attributes: # pragma no branch
self._size_in_bytes = self._makeIntAttribute(attributes["size_in_bytes"])
if "updated_at" in attributes: # pragma no branch
assert attributes["updated_at"] is None or isinstance(attributes["updated_at"], (str,)), attributes[
"updated_at"
]
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "workflow_run" in attributes: # pragma no branch
self._workflow_run = self._makeClassAttribute(github.WorkflowRun.WorkflowRun, attributes["workflow_run"])
| 8,207
|
Python
|
.py
| 145
| 50.427586
| 150
| 0.564677
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,353
|
GistComment.py
|
PyGithub_PyGithub/github/GistComment.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import Any
import github.GithubObject
import github.NamedUser
from github.GithubObject import Attribute, CompletableGithubObject, NotSet
class GistComment(CompletableGithubObject):
"""
This class represents GistComments.
The reference can be found here
https://docs.github.com/en/rest/reference/gists#comments
"""
def _initAttributes(self) -> None:
self._body: Attribute[str] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._id: Attribute[int] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._url: Attribute[str] = NotSet
self._user: Attribute[github.NamedUser.NamedUser] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"id": self._id.value, "user": self._user.value})
@property
def body(self) -> str:
self._completeIfNotSet(self._body)
return self._body.value
@property
def created_at(self) -> datetime:
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def id(self) -> int:
self._completeIfNotSet(self._id)
return self._id.value
@property
def updated_at(self) -> datetime:
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
@property
def user(self) -> github.NamedUser.NamedUser:
self._completeIfNotSet(self._user)
return self._user.value
def delete(self) -> None:
"""
:calls: `DELETE /gists/{gist_id}/comments/{id} <https://docs.github.com/en/rest/reference/gists#comments>`_
"""
headers, data = self._requester.requestJsonAndCheck("DELETE", self.url)
def edit(self, body: str) -> None:
"""
:calls: `PATCH /gists/{gist_id}/comments/{id} <https://docs.github.com/en/rest/reference/gists#comments>`_
"""
assert isinstance(body, str), body
post_parameters = {
"body": body,
}
headers, data = self._requester.requestJsonAndCheck("PATCH", self.url, input=post_parameters)
self._useAttributes(data)
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
| 6,338
|
Python
|
.py
| 112
| 51.25
| 115
| 0.55058
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,354
|
Organization.py
|
PyGithub_PyGithub/github/Organization.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Steve English <steve.english@navetas.com> #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2015 Sebastien Besson <seb.besson@gmail.com> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Matthew Neal <meneal@matthews-mbp.raleigh.ibm.com> #
# Copyright 2016 Michael Pereira <pereira.m@gmail.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Balázs Rostás <rostas.balazs@gmail.com> #
# Copyright 2018 Anton Nguyen <afnguyen85@gmail.com> #
# Copyright 2018 Jacopo Notarstefano <jacopo.notarstefano@gmail.com> #
# Copyright 2018 Jasper van Wanrooy <jasper@vanwanrooy.net> #
# Copyright 2018 Raihaan <31362124+res0nance@users.noreply.github.com> #
# Copyright 2018 Shubham Singh <41840111+singh811@users.noreply.github.com> #
# Copyright 2018 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2018 Tim Boring <tboring@hearst.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 Yossarian King <yggy@blackbirdinteractive.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Brian Choy <byceee@gmail.com> #
# Copyright 2019 Geoffroy Jabouley <gjabouley@invensense.com> #
# Copyright 2019 Pascal Bach <pasci.bach@gmail.com> #
# Copyright 2019 Raihaan <31362124+res0nance@users.noreply.github.com> #
# Copyright 2019 Shibasis Patel <smartshibasish@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2019 ebrown <brownierin@users.noreply.github.com> #
# Copyright 2020 Anuj Bansal <bansalanuj1996@gmail.com> #
# Copyright 2020 Glenn McDonald <testworksau@users.noreply.github.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2020 latacora-daniel <71085674+latacora-daniel@users.noreply.github.com>#
# Copyright 2020 ton-katsu <sakamoto.yoshihisa@gmail.com> #
# Copyright 2021 James Simpson <jsimpso@users.noreply.github.com> #
# Copyright 2021 Marina Peresypkina <mi9onev@gmail.com> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Tanner <51724788+lightningboltemoji@users.noreply.github.com> #
# Copyright 2022 KimSia Sim <245021+simkimsia@users.noreply.github.com> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Felipe Peter <mr-peipei@web.de> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Jonathan Greg <31892308+jmgreg31@users.noreply.github.com> #
# Copyright 2023 Jonathan Leitschuh <jonathan.leitschuh@gmail.com> #
# Copyright 2023 Mark Amery <markamery@btinternet.com> #
# Copyright 2023 Mauricio Alejandro Martínez Pacheco <mauricio.martinez@premise.com>#
# Copyright 2023 Mauricio Alejandro Martínez Pacheco <n_othing@hotmail.com> #
# Copyright 2023 Oliver Mannion <125105+tekumara@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Andrii Kezikov <cheshirez@gmail.com> #
# Copyright 2024 Bill Napier <napier@pobox.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jacky Lam <jacky.lam@r2studiohk.com> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2024 Mohamed Mostafa <112487260+mohy01@users.noreply.github.com> #
# Copyright 2024 Oskar Jansson <56458534+janssonoskar@users.noreply.github.com>#
# Copyright 2024 Thomas Cooper <coopernetes@proton.me> #
# Copyright 2024 Thomas Crowley <15927917+thomascrowley@users.noreply.github.com>#
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
import urllib.parse
from datetime import datetime
from typing import TYPE_CHECKING, Any
import github.Event
import github.GithubObject
import github.HookDelivery
import github.NamedUser
import github.OrganizationCustomProperty
import github.OrganizationDependabotAlert
import github.OrganizationSecret
import github.OrganizationVariable
import github.Plan
import github.Project
import github.Repository
import github.Team
from github import Consts
from github.GithubObject import (
Attribute,
CompletableGithubObject,
NotSet,
Opt,
is_defined,
is_optional,
is_optional_list,
is_undefined,
)
from github.PaginatedList import PaginatedList
if TYPE_CHECKING:
from github.Event import Event
from github.Hook import Hook
from github.Installation import Installation
from github.Issue import Issue
from github.Label import Label
from github.Migration import Migration
from github.NamedUser import NamedUser
from github.OrganizationCustomProperty import (
CustomProperty,
OrganizationCustomProperty,
RepositoryCustomPropertyValues,
)
from github.OrganizationDependabotAlert import OrganizationDependabotAlert
from github.OrganizationSecret import OrganizationSecret
from github.OrganizationVariable import OrganizationVariable
from github.Plan import Plan
from github.Project import Project
from github.PublicKey import PublicKey
from github.Repository import Repository
from github.Team import Team
class Organization(CompletableGithubObject):
"""
This class represents Organizations.
The reference can be found here
https://docs.github.com/en/rest/reference/orgs
"""
def _initAttributes(self) -> None:
self._archived_at: Attribute[datetime] = NotSet
self._default_repository_permission: Attribute[str] = NotSet
self._has_organization_projects: Attribute[bool] = NotSet
self._has_repository_projects: Attribute[bool] = NotSet
self._hooks_url: Attribute[str] = NotSet
self._issues_url: Attribute[str] = NotSet
self._members_can_create_repositories: Attribute[bool] = NotSet
self._two_factor_requirement_enabled: Attribute[bool] = NotSet
self._avatar_url: Attribute[str] = NotSet
self._billing_email: Attribute[str] = NotSet
self._blog: Attribute[str | None] = NotSet
self._collaborators: Attribute[int] = NotSet
self._company: Attribute[str] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._description: Attribute[str] = NotSet
self._disk_usage: Attribute[int] = NotSet
self._email: Attribute[str] = NotSet
self._events_url: Attribute[str] = NotSet
self._followers: Attribute[int] = NotSet
self._following: Attribute[int] = NotSet
self._gravatar_id: Attribute[str] = NotSet
self._html_url: Attribute[str] = NotSet
self._id: Attribute[int] = NotSet
self._location: Attribute[str] = NotSet
self._login: Attribute[str] = NotSet
self._members_url: Attribute[str] = NotSet
self._name: Attribute[str] = NotSet
self._owned_private_repos: Attribute[int] = NotSet
self._plan: Attribute[Plan] = NotSet
self._private_gists: Attribute[int] = NotSet
self._public_gists: Attribute[int] = NotSet
self._public_members_url: Attribute[str] = NotSet
self._public_repos: Attribute[int] = NotSet
self._repos_url: Attribute[str] = NotSet
self._total_private_repos: Attribute[int] = NotSet
self._type: Attribute[str] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"login": self._login.value})
@property
def archived_at(self) -> datetime:
self._completeIfNotSet(self._archived_at)
return self._archived_at.value
@property
def avatar_url(self) -> str:
self._completeIfNotSet(self._avatar_url)
return self._avatar_url.value
@property
def billing_email(self) -> str:
self._completeIfNotSet(self._billing_email)
return self._billing_email.value
@property
def blog(self) -> str | None:
self._completeIfNotSet(self._blog)
return self._blog.value
@property
def collaborators(self) -> int:
self._completeIfNotSet(self._collaborators)
return self._collaborators.value
@property
def company(self) -> str | None:
self._completeIfNotSet(self._company)
return self._company.value
@property
def created_at(self) -> datetime:
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def default_repository_permission(self) -> str:
self._completeIfNotSet(self._default_repository_permission)
return self._default_repository_permission.value
@property
def description(self) -> str:
self._completeIfNotSet(self._description)
return self._description.value
@property
def disk_usage(self) -> int:
self._completeIfNotSet(self._disk_usage)
return self._disk_usage.value
@property
def email(self) -> str | None:
self._completeIfNotSet(self._email)
return self._email.value
@property
def events_url(self) -> str:
self._completeIfNotSet(self._events_url)
return self._events_url.value
@property
def followers(self) -> int:
self._completeIfNotSet(self._followers)
return self._followers.value
@property
def following(self) -> int:
self._completeIfNotSet(self._following)
return self._following.value
@property
def gravatar_id(self) -> str:
self._completeIfNotSet(self._gravatar_id)
return self._gravatar_id.value
@property
def has_organization_projects(self) -> bool:
self._completeIfNotSet(self._has_organization_projects)
return self._has_organization_projects.value
@property
def has_repository_projects(self) -> bool:
self._completeIfNotSet(self._has_repository_projects)
return self._has_repository_projects.value
@property
def hooks_url(self) -> str:
self._completeIfNotSet(self._hooks_url)
return self._hooks_url.value
@property
def html_url(self) -> str:
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self) -> int:
self._completeIfNotSet(self._id)
return self._id.value
@property
def issues_url(self) -> str:
self._completeIfNotSet(self._issues_url)
return self._issues_url.value
@property
def location(self) -> str:
self._completeIfNotSet(self._location)
return self._location.value
@property
def login(self) -> str:
self._completeIfNotSet(self._login)
return self._login.value
@property
def members_can_create_repositories(self) -> bool:
self._completeIfNotSet(self._members_can_create_repositories)
return self._members_can_create_repositories.value
@property
def members_url(self) -> str:
self._completeIfNotSet(self._members_url)
return self._members_url.value
@property
def name(self) -> str | None:
self._completeIfNotSet(self._name)
return self._name.value
@property
def owned_private_repos(self) -> int:
self._completeIfNotSet(self._owned_private_repos)
return self._owned_private_repos.value
@property
def plan(self) -> Plan:
self._completeIfNotSet(self._plan)
return self._plan.value
@property
def private_gists(self) -> int:
self._completeIfNotSet(self._private_gists)
return self._private_gists.value
@property
def public_gists(self) -> int:
self._completeIfNotSet(self._public_gists)
return self._public_gists.value
@property
def public_members_url(self) -> str:
self._completeIfNotSet(self._public_members_url)
return self._public_members_url.value
@property
def public_repos(self) -> int:
self._completeIfNotSet(self._public_repos)
return self._public_repos.value
@property
def repos_url(self) -> str:
self._completeIfNotSet(self._repos_url)
return self._repos_url.value
@property
def total_private_repos(self) -> int:
self._completeIfNotSet(self._total_private_repos)
return self._total_private_repos.value
@property
def two_factor_requirement_enabled(self) -> bool:
self._completeIfNotSet(self._two_factor_requirement_enabled)
return self._two_factor_requirement_enabled.value
@property
def type(self) -> str:
self._completeIfNotSet(self._type)
return self._type.value
@property
def updated_at(self) -> datetime:
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
def add_to_members(self, member: NamedUser, role: Opt[str] = NotSet) -> None:
"""
:calls: `PUT /orgs/{org}/memberships/{user} <https://docs.github.com/en/rest/reference/orgs#update-an-organization-membership-for-the-authenticated-user>`_
"""
assert is_optional(role, str), role
assert isinstance(member, github.NamedUser.NamedUser), member
put_parameters = NotSet.remove_unset_items({"role": role})
headers, data = self._requester.requestJsonAndCheck(
"PUT", f"{self.url}/memberships/{member._identity}", input=put_parameters
)
def add_to_public_members(self, public_member: NamedUser) -> None:
"""
:calls: `PUT /orgs/{org}/public_members/{user} <https://docs.github.com/en/rest/reference/orgs#members>`_
"""
assert isinstance(public_member, github.NamedUser.NamedUser), public_member
headers, data = self._requester.requestJsonAndCheck(
"PUT", f"{self.url}/public_members/{public_member._identity}"
)
def create_fork(
self,
repo: Repository,
name: Opt[str] = NotSet,
default_branch_only: Opt[bool] = NotSet,
) -> Repository:
"""
:calls: `POST /repos/{owner}/{repo}/forks <https://docs.github.com/en/rest/reference/repos#forks>`_
"""
assert isinstance(repo, github.Repository.Repository), repo
return repo.create_fork(
self,
name=name,
default_branch_only=default_branch_only,
)
def create_repo_from_template(
self,
name: str,
repo: Repository,
description: Opt[str] = NotSet,
include_all_branches: Opt[bool] = NotSet,
private: Opt[bool] = NotSet,
) -> Repository:
"""self.name
:calls: `POST /repos/{template_owner}/{template_repo}/generate <https://docs.github.com/en/rest/reference/repos#create-a-repository-using-a-template>`_
"""
assert isinstance(name, str), name
assert isinstance(repo, github.Repository.Repository), repo
assert is_optional(description, str), description
assert is_optional(include_all_branches, bool), include_all_branches
assert is_optional(private, bool), private
post_parameters: dict[str, Any] = NotSet.remove_unset_items(
{
"name": name,
"owner": self.login,
"description": description,
"include_all_branches": include_all_branches,
"private": private,
}
)
headers, data = self._requester.requestJsonAndCheck(
"POST",
f"/repos/{repo.owner.login}/{repo.name}/generate",
input=post_parameters,
headers={"Accept": "application/vnd.github.v3+json"},
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def create_hook(
self,
name: str,
config: dict[str, str],
events: Opt[list[str]] = NotSet,
active: Opt[bool] = NotSet,
) -> Hook:
"""
:calls: `POST /orgs/{owner}/hooks <https://docs.github.com/en/rest/reference/orgs#webhooks>`_
:param name: string
:param config: dict
:param events: list of string
:param active: bool
:rtype: :class:`github.Hook.Hook`
"""
assert isinstance(name, str), name
assert isinstance(config, dict), config
assert is_optional_list(events, str), events
assert is_optional(active, bool), active
post_parameters: dict[str, Any] = NotSet.remove_unset_items(
{
"name": name,
"config": config,
"events": events,
"active": active,
}
)
headers, data = self._requester.requestJsonAndCheck("POST", f"{self.url}/hooks", input=post_parameters)
return github.Hook.Hook(self._requester, headers, data, completed=True)
def create_project(self, name: str, body: Opt[str] = NotSet) -> github.Project.Project:
"""
:calls: `POST /orgs/{org}/projects <https://docs.github.com/en/rest/reference/projects#create-an-organization-project>`_
"""
assert isinstance(name, str), name
assert is_optional(body, str), body
post_parameters: dict[str, Any] = NotSet.remove_unset_items({"name": name, "body": body})
headers, data = self._requester.requestJsonAndCheck(
"POST",
f"{self.url}/projects",
input=post_parameters,
headers={"Accept": Consts.mediaTypeProjectsPreview},
)
return github.Project.Project(self._requester, headers, data, completed=True)
def create_repo(
self,
name: str,
description: Opt[str] = NotSet,
homepage: Opt[str] = NotSet,
private: Opt[bool] = NotSet,
visibility: Opt[str] = NotSet,
has_issues: Opt[bool] = NotSet,
has_wiki: Opt[bool] = NotSet,
has_downloads: Opt[bool] = NotSet,
has_projects: Opt[bool] = NotSet,
team_id: Opt[int] = NotSet,
auto_init: Opt[bool] = NotSet,
license_template: Opt[str] = NotSet,
gitignore_template: Opt[str] = NotSet,
allow_squash_merge: Opt[bool] = NotSet,
allow_merge_commit: Opt[bool] = NotSet,
allow_rebase_merge: Opt[bool] = NotSet,
delete_branch_on_merge: Opt[bool] = NotSet,
allow_update_branch: Opt[bool] = NotSet,
is_template: Opt[bool] = NotSet,
allow_auto_merge: Opt[bool] = NotSet,
use_squash_pr_title_as_default: Opt[bool] = NotSet,
squash_merge_commit_title: Opt[str] = NotSet,
squash_merge_commit_message: Opt[str] = NotSet,
merge_commit_title: Opt[str] = NotSet,
merge_commit_message: Opt[str] = NotSet,
custom_properties: Opt[dict[str, Any]] = NotSet,
) -> github.Repository.Repository:
"""
:calls: `POST /orgs/{org}/repos <https://docs.github.com/en/rest/reference/repos>`_
"""
assert isinstance(name, str), name
assert is_optional(description, str), description
assert is_optional(homepage, str), homepage
assert is_optional(private, bool), private
assert is_optional(visibility, str), visibility
assert is_optional(has_issues, bool), has_issues
assert is_optional(has_wiki, bool), has_wiki
assert is_optional(has_downloads, bool), has_downloads
assert is_optional(has_projects, bool), has_projects
assert is_optional(team_id, int), team_id
assert is_optional(auto_init, bool), auto_init
assert is_optional(license_template, str), license_template
assert is_optional(gitignore_template, str), gitignore_template
assert is_optional(allow_squash_merge, bool), allow_squash_merge
assert is_optional(allow_merge_commit, bool), allow_merge_commit
assert is_optional(allow_rebase_merge, bool), allow_rebase_merge
assert is_optional(delete_branch_on_merge, bool), delete_branch_on_merge
assert is_optional(allow_update_branch, bool), allow_update_branch
assert is_optional(is_template, bool), is_template
assert is_optional(allow_auto_merge, bool), allow_auto_merge
assert is_optional(use_squash_pr_title_as_default, bool), use_squash_pr_title_as_default
assert squash_merge_commit_title in ["PR_TITLE", "COMMIT_OR_PR_TITLE", NotSet], squash_merge_commit_title
assert squash_merge_commit_message in [
"PR_BODY",
"COMMIT_MESSAGES",
"BLANK",
NotSet,
], squash_merge_commit_message
assert merge_commit_title in ["PR_TITLE", "MERGE_MESSAGE", NotSet], merge_commit_title
assert merge_commit_message in ["PR_TITLE", "PR_BODY", "BLANK", NotSet], merge_commit_message
assert is_optional(custom_properties, dict), custom_properties
post_parameters = NotSet.remove_unset_items(
{
"name": name,
"description": description,
"homepage": homepage,
"private": private,
"visibility": visibility,
"has_issues": has_issues,
"has_wiki": has_wiki,
"has_downloads": has_downloads,
"has_projects": has_projects,
"team_id": team_id,
"auto_init": auto_init,
"license_template": license_template,
"gitignore_template": gitignore_template,
"allow_squash_merge": allow_squash_merge,
"allow_merge_commit": allow_merge_commit,
"allow_rebase_merge": allow_rebase_merge,
"delete_branch_on_merge": delete_branch_on_merge,
"allow_update_branch": allow_update_branch,
"is_template": is_template,
"allow_auto_merge": allow_auto_merge,
"use_squash_pr_title_as_default": use_squash_pr_title_as_default,
"squash_merge_commit_title": squash_merge_commit_title,
"squash_merge_commit_message": squash_merge_commit_message,
"merge_commit_title": merge_commit_title,
"merge_commit_message": merge_commit_message,
"custom_properties": custom_properties,
}
)
headers, data = self._requester.requestJsonAndCheck(
"POST",
f"{self.url}/repos",
input=post_parameters,
headers={"Accept": Consts.repoVisibilityPreview},
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def create_secret(
self,
secret_name: str,
unencrypted_value: str,
visibility: str = "all",
selected_repositories: Opt[list[github.Repository.Repository]] = NotSet,
secret_type: str = "actions",
) -> github.OrganizationSecret.OrganizationSecret:
"""
:param secret_name: string name of the secret
:param unencrypted_value: string plain text value of the secret
:param visibility: string options all or selected
:param selected_repositories: list of repositrories that the secret will be available in
:param secret_type: string options actions or dependabot
:calls: `PUT /orgs/{org}/{secret_type}/secrets/{secret_name} <https://docs.github.com/en/rest/actions/secrets#create-or-update-an-organization-secret>`_
"""
assert isinstance(secret_name, str), secret_name
assert isinstance(unencrypted_value, str), unencrypted_value
assert isinstance(visibility, str), visibility
assert is_optional_list(selected_repositories, github.Repository.Repository), selected_repositories
assert secret_type in ["actions", "dependabot"], "secret_type should be actions or dependabot"
if visibility == "selected":
assert isinstance(selected_repositories, list) and all(
isinstance(element, github.Repository.Repository) for element in selected_repositories
), selected_repositories
else:
assert selected_repositories is NotSet
public_key = self.get_public_key(secret_type=secret_type)
payload = public_key.encrypt(unencrypted_value)
put_parameters: dict[str, Any] = {
"key_id": public_key.key_id,
"encrypted_value": payload,
"visibility": visibility,
}
if is_defined(selected_repositories):
# Dependbot and Actions endpoint expects different types
# https://docs.github.com/en/rest/dependabot/secrets?apiVersion=2022-11-28#create-or-update-an-organization-secret
# https://docs.github.com/en/rest/actions/secrets?apiVersion=2022-11-28#create-or-update-an-organization-secret
if secret_type == "actions":
put_parameters["selected_repository_ids"] = [element.id for element in selected_repositories]
if secret_type == "dependabot":
put_parameters["selected_repository_ids"] = [str(element.id) for element in selected_repositories]
self._requester.requestJsonAndCheck(
"PUT", f"{self.url}/{secret_type}/secrets/{urllib.parse.quote(secret_name)}", input=put_parameters
)
return github.OrganizationSecret.OrganizationSecret(
requester=self._requester,
headers={},
attributes={
"name": secret_name,
"visibility": visibility,
"selected_repositories_url": f"{self.url}/{secret_type}/secrets/{urllib.parse.quote(secret_name)}/repositories",
"url": f"{self.url}/{secret_type}/secrets/{urllib.parse.quote(secret_name)}",
},
completed=False,
)
def get_secrets(self, secret_type: str = "actions") -> PaginatedList[OrganizationSecret]:
"""
Gets all organization secrets :param secret_type: string options actions or dependabot :rtype:
:class:`PaginatedList` of :class:`github.OrganizationSecret.OrganizationSecret`
"""
assert secret_type in ["actions", "dependabot"], "secret_type should be actions or dependabot"
return PaginatedList(
github.OrganizationSecret.OrganizationSecret,
self._requester,
f"{self.url}/{secret_type}/secrets",
None,
list_item="secrets",
)
def get_secret(self, secret_name: str, secret_type: str = "actions") -> OrganizationSecret:
"""
:calls: 'GET /orgs/{org}/{secret_type}/secrets/{secret_name} <https://docs.github.com/en/rest/actions/secrets#get-an-organization-secret>`_
:param secret_name: string
:param secret_type: string options actions or dependabot
:rtype: github.OrganizationSecret.OrganizationSecret
"""
assert isinstance(secret_name, str), secret_name
assert secret_type in ["actions", "dependabot"], "secret_type should be actions or dependabot"
return github.OrganizationSecret.OrganizationSecret(
requester=self._requester,
headers={},
attributes={"url": f"{self.url}/{secret_type}/secrets/{urllib.parse.quote(secret_name)}"},
completed=False,
)
def create_team(
self,
name: str,
repo_names: Opt[list[Repository]] = NotSet,
permission: Opt[str] = NotSet,
privacy: Opt[str] = NotSet,
description: Opt[str] = NotSet,
parent_team_id: Opt[int] = NotSet,
maintainers: Opt[list[int]] = NotSet,
notification_setting: Opt[str] = NotSet,
) -> Team:
"""
:calls: `POST /orgs/{org}/teams <https://docs.github.com/en/rest/reference/teams#list-teams>`_
:param name: string
:param repo_names: list of :class:`github.Repository.Repository`
:param permission: string
:param privacy: string
:param description: string
:param parent_team_id: integer
:param maintainers: list of: integer
:param notification_setting: string
:rtype: :class:`github.Team.Team`
"""
assert isinstance(name, str), name
assert is_optional_list(repo_names, github.Repository.Repository), repo_names
assert is_optional_list(maintainers, int), maintainers
assert is_optional(parent_team_id, int), parent_team_id
assert is_optional(permission, str), permission
assert is_optional(privacy, str), privacy
assert is_optional(description, str), description
assert notification_setting in ["notifications_enabled", "notifications_disabled", NotSet], notification_setting
post_parameters: dict[str, Any] = NotSet.remove_unset_items(
{
"name": name,
"permission": permission,
"privacy": privacy,
"description": description,
"parent_team_id": parent_team_id,
"maintainers": maintainers,
"notification_setting": notification_setting,
}
)
if is_defined(repo_names):
post_parameters["repo_names"] = [element._identity for element in repo_names]
headers, data = self._requester.requestJsonAndCheck("POST", f"{self.url}/teams", input=post_parameters)
return github.Team.Team(self._requester, headers, data, completed=True)
def create_variable(
self,
variable_name: str,
value: str,
visibility: str = "all",
selected_repositories: github.GithubObject.Opt[list[github.Repository.Repository]] = NotSet,
) -> github.OrganizationVariable.OrganizationVariable:
"""
:calls: `POST /orgs/{org}/actions/variables/ <https://docs.github.com/en/rest/actions/variables#create-an-organization-variable>`_
:param variable_name: string
:param value: string
:param visibility: string
:param selected_repositories: list of :class:`github.Repository.Repository`
:rtype: github.OrganizationVariable.OrganizationVariable
"""
assert isinstance(variable_name, str), variable_name
assert isinstance(value, str), value
assert isinstance(visibility, str), visibility
if visibility == "selected":
assert isinstance(selected_repositories, list) and all(
isinstance(element, github.Repository.Repository) for element in selected_repositories
), selected_repositories
else:
assert selected_repositories is NotSet
post_parameters: dict[str, Any] = {
"name": variable_name,
"value": value,
"visibility": visibility,
}
if is_defined(selected_repositories):
post_parameters["selected_repository_ids"] = [element.id for element in selected_repositories]
self._requester.requestJsonAndCheck("POST", f"{self.url}/actions/variables", input=post_parameters)
return github.OrganizationVariable.OrganizationVariable(
requester=self._requester,
headers={},
attributes={
"name": variable_name,
"visibility": visibility,
"value": value,
"selected_repositories_url": f"{self.url}/actions/variables/{urllib.parse.quote(variable_name)}/repositories",
"url": self.url,
},
completed=False,
)
def get_variables(self) -> PaginatedList[OrganizationVariable]:
"""
Gets all organization variables :rtype: :class:`PaginatedList` of
:class:`github.OrganizationVariable.OrganizationVariable`
"""
return PaginatedList(
github.OrganizationVariable.OrganizationVariable,
self._requester,
f"{self.url}/actions/variables",
None,
list_item="variables",
)
def get_variable(self, variable_name: str) -> OrganizationVariable:
"""
:calls: 'GET /orgs/{org}/actions/variables/{variable_name} <https://docs.github.com/en/rest/actions/variables#get-an-organization-variable>`_
:param variable_name: string
:rtype: github.OrganizationVariable.OrganizationVariable
"""
assert isinstance(variable_name, str), variable_name
return github.OrganizationVariable.OrganizationVariable(
requester=self._requester,
headers={},
attributes={"url": f"{self.url}/actions/variables/{urllib.parse.quote(variable_name)}"},
completed=False,
)
def delete_hook(self, id: int) -> None:
"""
:calls: `DELETE /orgs/{owner}/hooks/{id} <https://docs.github.com/en/rest/reference/orgs#webhooks>`_
:param id: integer
:rtype: None`
"""
assert isinstance(id, int), id
headers, data = self._requester.requestJsonAndCheck("DELETE", f"{self.url}/hooks/{id}")
def edit(
self,
billing_email: Opt[str] = NotSet,
blog: Opt[str] = NotSet,
company: Opt[str] = NotSet,
description: Opt[str] = NotSet,
email: Opt[str] = NotSet,
location: Opt[str] = NotSet,
name: Opt[str] = NotSet,
) -> None:
"""
:calls: `PATCH /orgs/{org} <https://docs.github.com/en/rest/reference/orgs>`_
"""
assert is_optional(billing_email, str), billing_email
assert is_optional(blog, str), blog
assert is_optional(company, str), company
assert is_optional(description, str), description
assert is_optional(email, str), email
assert is_optional(location, str), location
assert is_optional(name, str), name
post_parameters = NotSet.remove_unset_items(
{
"billing_email": billing_email,
"blog": blog,
"company": company,
"description": description,
"email": email,
"location": location,
"name": name,
}
)
headers, data = self._requester.requestJsonAndCheck("PATCH", self.url, input=post_parameters)
self._useAttributes(data)
def edit_hook(
self,
id: int,
name: str,
config: dict[str, str],
events: Opt[list[str]] = NotSet,
active: Opt[bool] = NotSet,
) -> Hook:
"""
:calls: `PATCH /orgs/{owner}/hooks/{id} <https://docs.github.com/en/rest/reference/orgs#webhooks>`_
"""
assert isinstance(id, int), id
assert isinstance(name, str), name
assert isinstance(config, dict), config
assert is_optional_list(events, str), events
assert is_optional(active, bool), active
post_parameters: dict[str, Any] = NotSet.remove_unset_items(
{"name": name, "config": config, "events": events, "active": active}
)
headers, data = self._requester.requestJsonAndCheck("PATCH", f"{self.url}/hooks/{id}", input=post_parameters)
return github.Hook.Hook(self._requester, headers, data, completed=True)
def get_events(self) -> PaginatedList[Event]:
"""
:calls: `GET /orgs/{org}/events <https://docs.github.com/en/rest/reference/activity#events>`_
:rtype: :class:`PaginatedList` of :class:`github.Event.Event`
"""
return PaginatedList(github.Event.Event, self._requester, f"{self.url}/events", None)
def get_hook(self, id: int) -> github.Hook.Hook:
"""
:calls: `GET /orgs/{owner}/hooks/{id} <https://docs.github.com/en/rest/reference/orgs#webhooks>`_
"""
assert isinstance(id, int), id
headers, data = self._requester.requestJsonAndCheck("GET", f"{self.url}/hooks/{id}")
return github.Hook.Hook(self._requester, headers, data, completed=True)
def get_hooks(self) -> PaginatedList[Hook]:
"""
:calls: `GET /orgs/{owner}/hooks <https://docs.github.com/en/rest/reference/orgs#webhooks>`_
"""
return PaginatedList(github.Hook.Hook, self._requester, f"{self.url}/hooks", None)
def get_hook_delivery(self, hook_id: int, delivery_id: int) -> github.HookDelivery.HookDelivery:
"""
:calls: `GET /orgs/{owner}/hooks/{hook_id}/deliveries/{delivery_id} <https://docs.github.com/en/rest/reference/orgs#get-a-webhook-delivery-for-an-organization-webhook>`_
:param hook_id: integer
:param delivery_id: integer
:rtype: :class:`github.HookDelivery.HookDelivery`
"""
assert isinstance(hook_id, int), hook_id
assert isinstance(delivery_id, int), delivery_id
headers, data = self._requester.requestJsonAndCheck(
"GET", f"{self.url}/hooks/{hook_id}/deliveries/{delivery_id}"
)
return github.HookDelivery.HookDelivery(self._requester, headers, data, completed=True)
def get_hook_deliveries(self, hook_id: int) -> PaginatedList[github.HookDelivery.HookDeliverySummary]:
"""
:calls: `GET /orgs/{owner}/hooks/{hook_id}/deliveries <https://docs.github.com/en/rest/reference/orgs#list-deliveries-for-an-organization-webhook>`_
:param hook_id: integer
:rtype: :class:`PaginatedList` of :class:`github.HookDelivery.HookDeliverySummary`
"""
assert isinstance(hook_id, int), hook_id
return PaginatedList(
github.HookDelivery.HookDeliverySummary,
self._requester,
f"{self.url}/hooks/{hook_id}/deliveries",
None,
)
def get_issues(
self,
filter: Opt[str] = NotSet,
state: Opt[str] = NotSet,
labels: Opt[list[Label]] = NotSet,
sort: Opt[str] = NotSet,
direction: Opt[str] = NotSet,
since: Opt[datetime] = NotSet,
) -> PaginatedList[Issue]:
"""
:calls: `GET /orgs/{org}/issues <https://docs.github.com/en/rest/reference/issues>`_
:rtype: :class:`PaginatedList` of :class:`github.Issue.Issue`
:param filter: string
:param state: string
:param labels: list of :class:`github.Label.Label`
:param sort: string
:param direction: string
:param since: datetime
:rtype: :class:`PaginatedList` of :class:`github.Issue.Issue`
"""
assert is_optional(filter, str), filter
assert is_optional(state, str), state
assert is_optional_list(labels, github.Label.Label), labels
assert is_optional(sort, str), sort
assert is_optional(direction, str), direction
assert is_optional(since, datetime), since
url_parameters: dict[str, Any] = NotSet.remove_unset_items(
{"filter": filter, "state": state, "sort": sort, "direction": direction}
)
if is_defined(labels):
url_parameters["labels"] = ",".join(label.name for label in labels)
if is_defined(since):
url_parameters["since"] = since.strftime("%Y-%m-%dT%H:%M:%SZ")
return PaginatedList(github.Issue.Issue, self._requester, f"{self.url}/issues", url_parameters)
def get_members(
self,
filter_: Opt[str] = NotSet,
role: Opt[str] = NotSet,
) -> PaginatedList[NamedUser]:
"""
:calls: `GET /orgs/{org}/members <https://docs.github.com/en/rest/reference/orgs#members>`_
"""
assert is_optional(filter_, str), filter_
assert is_optional(role, str), role
url_parameters = NotSet.remove_unset_items({"filter": filter_, "role": role})
return PaginatedList(
github.NamedUser.NamedUser,
self._requester,
f"{self.url}/members",
url_parameters,
)
def get_projects(self, state: Opt[str] = NotSet) -> PaginatedList[Project]:
"""
:calls: `GET /orgs/{org}/projects <https://docs.github.com/en/rest/reference/projects#list-organization-projects>`_
"""
url_parameters = NotSet.remove_unset_items({"state": state})
return PaginatedList(
github.Project.Project,
self._requester,
f"{self.url}/projects",
url_parameters,
{"Accept": Consts.mediaTypeProjectsPreview},
)
def get_public_members(self) -> PaginatedList[NamedUser]:
"""
:calls: `GET /orgs/{org}/public_members <https://docs.github.com/en/rest/reference/orgs#members>`_
:rtype: :class:`PaginatedList` of :class:`github.NamedUser.NamedUser`
"""
return PaginatedList(
github.NamedUser.NamedUser,
self._requester,
f"{self.url}/public_members",
None,
)
def get_outside_collaborators(self, filter_: Opt[str] = NotSet) -> PaginatedList[NamedUser]:
"""
:calls: `GET /orgs/{org}/outside_collaborators <https://docs.github.com/en/rest/reference/orgs#outside-collaborators>`_
"""
assert is_optional(filter_, str), filter_
url_parameters = NotSet.remove_unset_items({"filter": filter_})
return PaginatedList(
github.NamedUser.NamedUser,
self._requester,
f"{self.url}/outside_collaborators",
url_parameters,
)
def remove_outside_collaborator(self, collaborator: NamedUser) -> None:
"""
:calls: `DELETE /orgs/{org}/outside_collaborators/{username} <https://docs.github.com/en/rest/reference/orgs#outside-collaborators>`_
:param collaborator: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(collaborator, github.NamedUser.NamedUser), collaborator
headers, data = self._requester.requestJsonAndCheck(
"DELETE", f"{self.url}/outside_collaborators/{collaborator._identity}"
)
def convert_to_outside_collaborator(self, member: NamedUser) -> None:
"""
:calls: `PUT /orgs/{org}/outside_collaborators/{username} <https://docs.github.com/en/rest/reference/orgs#outside-collaborators>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck(
"PUT", f"{self.url}/outside_collaborators/{member._identity}"
)
def get_public_key(self, secret_type: str = "actions") -> PublicKey:
"""
:calls: `GET /orgs/{org}/{secret_type}/secrets/public-key <https://docs.github.com/en/rest/reference/actions#get-an-organization-public-key>`_
:param secret_type: string options actions or dependabot
:rtype: :class:`github.PublicKey.PublicKey`
"""
headers, data = self._requester.requestJsonAndCheck("GET", f"{self.url}/{secret_type}/secrets/public-key")
return github.PublicKey.PublicKey(self._requester, headers, data, completed=True)
def get_repo(self, name: str) -> Repository:
"""
:calls: `GET /repos/{owner}/{repo} <https://docs.github.com/en/rest/reference/repos>`_
:param name: string
:rtype: :class:`github.Repository.Repository`
"""
assert isinstance(name, str), name
name = urllib.parse.quote(name)
headers, data = self._requester.requestJsonAndCheck(
"GET",
f"/repos/{self.login}/{name}",
headers={"Accept": Consts.repoVisibilityPreview},
)
return github.Repository.Repository(self._requester, headers, data, completed=True)
def get_repos(
self,
type: Opt[str] = NotSet,
sort: Opt[str] = NotSet,
direction: Opt[str] = NotSet,
) -> PaginatedList[Repository]:
"""
:calls: `GET /orgs/{org}/repos <https://docs.github.com/en/rest/reference/repos>`_
:param type: string ('all', 'public', 'private', 'forks', 'sources', 'member')
:param sort: string ('created', 'updated', 'pushed', 'full_name')
:param direction: string ('asc', desc')
"""
assert is_optional(type, str), type
assert is_optional(sort, str), sort
assert is_optional(direction, str), direction
url_parameters = NotSet.remove_unset_items({"type": type, "sort": sort, "direction": direction})
return PaginatedList(
github.Repository.Repository,
self._requester,
f"{self.url}/repos",
url_parameters,
headers={"Accept": Consts.repoVisibilityPreview},
)
def get_team(self, id: int) -> Team:
"""
:calls: `GET /teams/{id} <https://docs.github.com/en/rest/reference/teams>`_
"""
assert isinstance(id, int), id
headers, data = self._requester.requestJsonAndCheck("GET", f"/teams/{id}")
return github.Team.Team(self._requester, headers, data, completed=True)
def get_team_by_slug(self, slug: str) -> Team:
"""
:calls: `GET /orgs/{org}/teams/{team_slug} <https://docs.github.com/en/rest/reference/teams#get-a-team-by-name>`_
"""
assert isinstance(slug, str), slug
slug = urllib.parse.quote(slug)
headers, data = self._requester.requestJsonAndCheck("GET", f"{self.url}/teams/{slug}")
return github.Team.Team(self._requester, headers, data, completed=True)
def get_teams(self) -> PaginatedList[Team]:
"""
:calls: `GET /orgs/{org}/teams <https://docs.github.com/en/rest/reference/teams#list-teams>`_
"""
return PaginatedList(github.Team.Team, self._requester, f"{self.url}/teams", None)
def invitations(self) -> PaginatedList[NamedUser]:
"""
:calls: `GET /orgs/{org}/invitations <https://docs.github.com/en/rest/reference/orgs#members>`_
"""
return PaginatedList(
github.NamedUser.NamedUser,
self._requester,
f"{self.url}/invitations",
None,
headers={"Accept": Consts.mediaTypeOrganizationInvitationPreview},
)
def invite_user(
self,
user: Opt[NamedUser] = NotSet,
email: Opt[str] = NotSet,
role: Opt[str] = NotSet,
teams: Opt[list[Team]] = NotSet,
) -> None:
"""
:calls: `POST /orgs/{org}/invitations <https://docs.github.com/en/rest/reference/orgs#members>`_
:param user: :class:`github.NamedUser.NamedUser`
:param email: string
:param role: string
:param teams: array of :class:`github.Team.Team`
:rtype: None
"""
assert is_optional(user, github.NamedUser.NamedUser), user
assert is_optional(email, str), email
assert is_defined(email) != is_defined(user), "specify only one of email or user"
assert is_undefined(role) or role in ["admin", "direct_member", "billing_manager"], role
assert is_optional_list(teams, github.Team.Team), teams
parameters: dict[str, Any] = NotSet.remove_unset_items({"email": email, "role": role})
if is_defined(user):
parameters["invitee_id"] = user.id
if is_defined(teams):
parameters["team_ids"] = [t.id for t in teams]
headers, data = self._requester.requestJsonAndCheck(
"POST",
f"{self.url}/invitations",
headers={"Accept": Consts.mediaTypeOrganizationInvitationPreview},
input=parameters,
)
def cancel_invitation(self, invitee: NamedUser) -> bool:
"""
:calls: `DELETE /orgs/{org}/invitations/{invitation_id} <https://docs.github.com/en/rest/reference/orgs#cancel-an-organization-invitation>`_
:param invitee: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(invitee, github.NamedUser.NamedUser), invitee
status, headers, data = self._requester.requestJson("DELETE", f"{self.url}/invitations/{invitee.id}")
return status == 204
def has_in_members(self, member: NamedUser) -> bool:
"""
:calls: `GET /orgs/{org}/members/{user} <https://docs.github.com/en/rest/reference/orgs#members>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(member, github.NamedUser.NamedUser), member
status, headers, data = self._requester.requestJson("GET", f"{self.url}/members/{member._identity}")
if status == 302:
status, headers, data = self._requester.requestJson("GET", headers["location"])
return status == 204
def has_in_public_members(self, public_member: NamedUser) -> bool:
"""
:calls: `GET /orgs/{org}/public_members/{user} <https://docs.github.com/en/rest/reference/orgs#members>`_
:param public_member: :class:`github.NamedUser.NamedUser`
:rtype: bool
"""
assert isinstance(public_member, github.NamedUser.NamedUser), public_member
status, headers, data = self._requester.requestJson(
"GET", f"{self.url}/public_members/{public_member._identity}"
)
return status == 204
def remove_from_membership(self, member: NamedUser) -> None:
"""
:calls: `DELETE /orgs/{org}/memberships/{user} <https://docs.github.com/en/rest/reference/orgs#remove-an-organization-member>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck("DELETE", f"{self.url}/memberships/{member._identity}")
def remove_from_members(self, member: NamedUser) -> None:
"""
:calls: `DELETE /orgs/{org}/members/{user} <https://docs.github.com/en/rest/reference/orgs#members>`_
:param member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck("DELETE", f"{self.url}/members/{member._identity}")
def remove_from_public_members(self, public_member: NamedUser) -> None:
"""
:calls: `DELETE /orgs/{org}/public_members/{user} <https://docs.github.com/en/rest/reference/orgs#members>`_
:param public_member: :class:`github.NamedUser.NamedUser`
:rtype: None
"""
assert isinstance(public_member, github.NamedUser.NamedUser), public_member
headers, data = self._requester.requestJsonAndCheck(
"DELETE", f"{self.url}/public_members/{public_member._identity}"
)
def create_migration(
self,
repos: list[str],
lock_repositories: Opt[bool] = NotSet,
exclude_attachments: Opt[bool] = NotSet,
) -> Migration:
"""
:calls: `POST /orgs/{org}/migrations <https://docs.github.com/en/rest/reference/migrations#list-organization-migrations>`_
:param repos: list or tuple of str
:param lock_repositories: bool
:param exclude_attachments: bool
:rtype: :class:`github.Migration.Migration`
"""
assert isinstance(repos, (list, tuple)), repos
assert all(isinstance(repo, str) for repo in repos), repos
assert is_optional(lock_repositories, bool), lock_repositories
assert is_optional(exclude_attachments, bool), exclude_attachments
post_parameters = NotSet.remove_unset_items(
{
"repositories": repos,
"lock_repositories": lock_repositories,
"exclude_attachments": exclude_attachments,
}
)
headers, data = self._requester.requestJsonAndCheck(
"POST",
f"/orgs/{self.login}/migrations",
input=post_parameters,
headers={"Accept": Consts.mediaTypeMigrationPreview},
)
return github.Migration.Migration(self._requester, headers, data, completed=True)
def get_migrations(self) -> PaginatedList[Migration]:
"""
:calls: `GET /orgs/{org}/migrations <https://docs.github.com/en/rest/reference/migrations#list-organization-migrations>`_
:rtype: :class:`PaginatedList` of :class:`github.Migration.Migration`
"""
return PaginatedList(
github.Migration.Migration,
self._requester,
f"/orgs/{self.login}/migrations",
None,
headers={"Accept": Consts.mediaTypeMigrationPreview},
)
def get_installations(self) -> PaginatedList[Installation]:
"""
:calls: `GET /orgs/{org}/installations <https://docs.github.com/en/rest/reference/orgs#list-app-installations-for-an-organization>`_
:rtype: :class:`PaginatedList` of :class:`github.Installation.Installation`
"""
return PaginatedList(
github.Installation.Installation,
self._requester,
f"{self.url}/installations",
None,
None,
list_item="installations",
)
def get_dependabot_alerts(
self,
state: Opt[str] = NotSet,
severity: Opt[str] = NotSet,
ecosystem: Opt[str] = NotSet,
package: Opt[str] = NotSet,
scope: Opt[str] = NotSet,
sort: Opt[str] = NotSet,
direction: Opt[str] = NotSet,
) -> PaginatedList[OrganizationDependabotAlert]:
"""
:calls: `GET /orgs/{org}/dependabot/alerts <https://docs.github.com/en/rest/dependabot/alerts#list-dependabot-alerts-for-an-organization>`_
:param state: Optional string
:param severity: Optional string
:param ecosystem: Optional string
:param package: Optional string
:param scope: Optional string
:param sort: Optional string
:param direction: Optional string
:rtype: :class:`PaginatedList` of :class:`github.DependabotAlert.DependabotAlert`
"""
allowed_states = ["auto_dismissed", "dismissed", "fixed", "open"]
allowed_severities = ["low", "medium", "high", "critical"]
allowed_ecosystems = ["composer", "go", "maven", "npm", "nuget", "pip", "pub", "rubygems", "rust"]
allowed_scopes = ["development", "runtime"]
allowed_sorts = ["created", "updated"]
allowed_directions = ["asc", "desc"]
assert state in allowed_states + [NotSet], f"State can be one of {', '.join(allowed_states)}"
assert severity in allowed_severities + [NotSet], f"Severity can be one of {', '.join(allowed_severities)}"
assert ecosystem in allowed_ecosystems + [NotSet], f"Ecosystem can be one of {', '.join(allowed_ecosystems)}"
assert scope in allowed_scopes + [NotSet], f"Scope can be one of {', '.join(allowed_scopes)}"
assert sort in allowed_sorts + [NotSet], f"Sort can be one of {', '.join(allowed_sorts)}"
assert direction in allowed_directions + [NotSet], f"Direction can be one of {', '.join(allowed_directions)}"
url_parameters = NotSet.remove_unset_items(
{
"state": state,
"severity": severity,
"ecosystem": ecosystem,
"package": package,
"scope": scope,
"sort": sort,
"direction": direction,
}
)
return PaginatedList(
github.OrganizationDependabotAlert.OrganizationDependabotAlert,
self._requester,
f"{self.url}/dependabot/alerts",
url_parameters,
)
def get_custom_properties(self) -> PaginatedList[OrganizationCustomProperty]:
"""
:calls: `GET /orgs/{org}/properties/schema <https://docs.github.com/en/rest/orgs/custom-properties#get-all-custom-properties-for-an-organization>`_
:rtype: :class:`PaginatedList` of :class:`github.OrganizationCustomProperty.OrganizationCustomProperty`
"""
return PaginatedList(
contentClass=github.OrganizationCustomProperty.OrganizationCustomProperty,
requester=self._requester,
firstUrl=f"{self.url}/properties/schema",
firstParams=None,
)
def get_custom_property(self, property_name: str) -> OrganizationCustomProperty:
"""
:calls: `GET /orgs/{org}/properties/schema/{property_name} <https://docs.github.com/en/rest/orgs/custom-properties#get-a-custom-property-for-an-organization>`_
:param property_name: string
:rtype: :class:`github.OrganizationCustomProperty.OrganizationCustomProperty`
"""
assert isinstance(property_name, str), property_name
headers, data = self._requester.requestJsonAndCheck(
"GET", f"{self.url}/properties/schema/{urllib.parse.quote(property_name)}"
)
return github.OrganizationCustomProperty.OrganizationCustomProperty(
requester=self._requester,
headers=headers,
attributes=data,
completed=False,
)
def create_custom_properties(self, properties: list[CustomProperty]) -> list[OrganizationCustomProperty]:
"""
Create or update custom properties for an organization
:calls: `PATCH /orgs/{org}/properties/schema <https://docs.github.com/en/rest/orgs/custom-properties#create-or-update-custom-properties-for-an-organization>`_
:param properties: list of :class:`github.OrganizationCustomProperty.CustomProperty`
:rtype: list of :class:`github.OrganizationCustomProperty.OrganizationCustomProperty`
"""
assert isinstance(properties, list), properties
assert all(isinstance(p, github.OrganizationCustomProperty.CustomProperty) for p in properties), properties
patch_parameters = {"properties": [p.to_dict() for p in properties]}
headers, data = self._requester.requestJsonAndCheck(
"PATCH", f"{self.url}/properties/schema", input=patch_parameters
)
return [
github.OrganizationCustomProperty.OrganizationCustomProperty(
requester=self._requester, headers=headers, attributes=property, completed=True
)
for property in data
]
def create_custom_property(self, property: CustomProperty) -> OrganizationCustomProperty:
"""
Create or update a custom property for an organization
:calls: `PUT /orgs/{org}/properties/schema/{property_name} <https://docs.github.com/en/rest/orgs/custom-properties#create-or-update-a-custom-property-for-an-organization>`_
:param property: :class:`github.OrganizationCustomProperty.CustomProperty`
:rtype: :class:`github.OrganizationCustomProperty.OrganizationCustomProperty`
"""
assert isinstance(property, github.OrganizationCustomProperty.CustomProperty), property
assert property.values_editable_by is NotSet
post_parameters = property.to_dict()
property_name = post_parameters.pop("property_name")
headers, data = self._requester.requestJsonAndCheck(
"PUT", f"{self.url}/properties/schema/{property_name}", input=post_parameters
)
return github.OrganizationCustomProperty.OrganizationCustomProperty(
requester=self._requester, headers=headers, attributes=data, completed=True
)
def remove_custom_property(self, property_name: str) -> None:
"""
:calls: `DELETE /orgs/{org}/properties/schema/{property_name} <https://docs.github.com/en/rest/orgs/custom-properties#remove-a-custom-property-for-an-organization>`_
:param property_name: string
:rtype: None
"""
assert isinstance(property_name, str), property_name
self._requester.requestJsonAndCheck("DELETE", f"{self.url}/properties/schema/{property_name}")
def list_custom_property_values(
self, repository_query: Opt[str] = NotSet
) -> PaginatedList[RepositoryCustomPropertyValues]:
"""
:calls: `GET /orgs/{org}/properties <https://docs.github.com/en/rest/orgs/custom-properties#list-custom-property-values-for-an-organization>`_
:rtype: :class:`PaginatedList` of dict
"""
return PaginatedList(
contentClass=github.OrganizationCustomProperty.RepositoryCustomPropertyValues,
requester=self._requester,
firstUrl=f"{self.url}/properties/values",
firstParams=NotSet.remove_unset_items({"repository_query": repository_query}),
)
def create_custom_property_values(
self, repository_names: list[str], properties: dict[str, str | list | None]
) -> None:
"""
Create or update custom property values for organization repositories
:calls: `PATCH /orgs/{org}/properties <https://docs.github.com/en/rest/orgs/custom-properties#create-or-update-custom-property-values-for-organization-repositories>`_
:param repository_names: list of strings
:param properties: dict of string to string, list or None
:rtype: None
"""
assert isinstance(repository_names, list), repository_names
assert all(isinstance(repo, str) for repo in repository_names), repository_names
assert isinstance(properties, dict), properties
assert all(isinstance(value, (str, list, type(None))) for value in properties.values()), properties
patch_parameters = {
"repository_names": repository_names,
"properties": [{"property_name": k, "value": v} for k, v in properties.items()],
}
self._requester.requestJsonAndCheck("PATCH", f"{self.url}/properties/values", input=patch_parameters)
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "archived_at" in attributes: # pragma no branch
assert attributes["archived_at"] is None or isinstance(attributes["archived_at"], str), attributes[
"archived_at"
]
self._archived_at = self._makeDatetimeAttribute(attributes["archived_at"])
if "avatar_url" in attributes: # pragma no branch
self._avatar_url = self._makeStringAttribute(attributes["avatar_url"])
if "billing_email" in attributes: # pragma no branch
self._billing_email = self._makeStringAttribute(attributes["billing_email"])
if "blog" in attributes: # pragma no branch
self._blog = self._makeStringAttribute(attributes["blog"])
if "collaborators" in attributes: # pragma no branch
self._collaborators = self._makeIntAttribute(attributes["collaborators"])
if "company" in attributes: # pragma no branch
self._company = self._makeStringAttribute(attributes["company"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "default_repository_permission" in attributes: # pragma no branch
self._default_repository_permission = self._makeStringAttribute(attributes["default_repository_permission"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "disk_usage" in attributes: # pragma no branch
self._disk_usage = self._makeIntAttribute(attributes["disk_usage"])
if "email" in attributes: # pragma no branch
self._email = self._makeStringAttribute(attributes["email"])
if "events_url" in attributes: # pragma no branch
self._events_url = self._makeStringAttribute(attributes["events_url"])
if "followers" in attributes: # pragma no branch
self._followers = self._makeIntAttribute(attributes["followers"])
if "following" in attributes: # pragma no branch
self._following = self._makeIntAttribute(attributes["following"])
if "gravatar_id" in attributes: # pragma no branch
self._gravatar_id = self._makeStringAttribute(attributes["gravatar_id"])
if "has_organization_projects" in attributes: # pragma no branch
self._has_organization_projects = self._makeBoolAttribute(attributes["has_organization_projects"])
if "has_repository_projects" in attributes: # pragma no branch
self._has_repository_projects = self._makeBoolAttribute(attributes["has_repository_projects"])
if "hooks_url" in attributes: # pragma no branch
self._hooks_url = self._makeStringAttribute(attributes["hooks_url"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "issues_url" in attributes: # pragma no branch
self._issues_url = self._makeStringAttribute(attributes["issues_url"])
if "location" in attributes: # pragma no branch
self._location = self._makeStringAttribute(attributes["location"])
if "login" in attributes: # pragma no branch
self._login = self._makeStringAttribute(attributes["login"])
if "members_can_create_repositories" in attributes: # pragma no branch
self._members_can_create_repositories = self._makeBoolAttribute(
attributes["members_can_create_repositories"]
)
if "members_url" in attributes: # pragma no branch
self._members_url = self._makeStringAttribute(attributes["members_url"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "owned_private_repos" in attributes: # pragma no branch
self._owned_private_repos = self._makeIntAttribute(attributes["owned_private_repos"])
if "plan" in attributes: # pragma no branch
self._plan = self._makeClassAttribute(github.Plan.Plan, attributes["plan"])
if "private_gists" in attributes: # pragma no branch
self._private_gists = self._makeIntAttribute(attributes["private_gists"])
if "public_gists" in attributes: # pragma no branch
self._public_gists = self._makeIntAttribute(attributes["public_gists"])
if "public_members_url" in attributes: # pragma no branch
self._public_members_url = self._makeStringAttribute(attributes["public_members_url"])
if "public_repos" in attributes: # pragma no branch
self._public_repos = self._makeIntAttribute(attributes["public_repos"])
if "repos_url" in attributes: # pragma no branch
self._repos_url = self._makeStringAttribute(attributes["repos_url"])
if "total_private_repos" in attributes: # pragma no branch
self._total_private_repos = self._makeIntAttribute(attributes["total_private_repos"])
if "two_factor_requirement_enabled" in attributes: # pragma no branch
self._two_factor_requirement_enabled = self._makeBoolAttribute(attributes["two_factor_requirement_enabled"])
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| 70,535
|
Python
|
.py
| 1,403
| 41.2402
| 180
| 0.631718
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,355
|
Requester.py
|
PyGithub_PyGithub/github/Requester.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Andrew Bettison <andrewb@zip.com.au> #
# Copyright 2012 Dima Kukushkin <dima@kukushkin.me> #
# Copyright 2012 Michael Woodworth <mwoodworth@upverter.com> #
# Copyright 2012 Petteri Muilu <pmuilu@xena.(none)> #
# Copyright 2012 Steve English <steve.english@navetas.com> #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Cameron White <cawhite@pdx.edu> #
# Copyright 2013 Ed Jackson <ed.jackson@gmail.com> #
# Copyright 2013 Jonathan J Hunt <hunt@braincorporation.com> #
# Copyright 2013 Mark Roddy <markroddy@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Jimmy Zelinskie <jimmyzelinskie@gmail.com> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2015 Brian Eugley <Brian.Eugley@capitalone.com> #
# Copyright 2015 Daniel Pocock <daniel@pocock.pro> #
# Copyright 2016 Denis K <f1nal@cgaming.org> #
# Copyright 2016 Jared K. Smith <jaredsmith@jaredsmith.net> #
# Copyright 2016 Mathieu Mitchell <mmitchell@iweb.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Chris McBride <thehighlander@users.noreply.github.com> #
# Copyright 2017 Hugo <hugovk@users.noreply.github.com> #
# Copyright 2017 Simon <spam@esemi.ru> #
# Copyright 2018 Arda Kuyumcu <kuyumcuarda@gmail.com> #
# Copyright 2018 Dylan <djstein@ncsu.edu> #
# Copyright 2018 Maarten Fonville <mfonville@users.noreply.github.com> #
# Copyright 2018 Mike Miller <github@mikeage.net> #
# Copyright 2018 R1kk3r <R1kk3r@users.noreply.github.com> #
# Copyright 2018 Shubham Singh <41840111+singh811@users.noreply.github.com> #
# Copyright 2018 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2018 Tuuu Nya <yuzesheji@qq.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Isac Souza <isouza@daitan.com> #
# Copyright 2019 Rigas Papathanasopoulos <rigaspapas@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Jesse Li <jesse.li2002@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Amador Pahim <apahim@redhat.com> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2022 Liuyang Wan <tsfdye@gmail.com> #
# Copyright 2023 Denis Blanchette <dblanchette@coveo.com> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Heitor Polidoro <heitor.polidoro@gmail.com> #
# Copyright 2023 Hemslo Wang <hemslo.wang@gmail.com> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Phillip Tran <phillip.qtr@gmail.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2023 adosibalo <94008816+adosibalo@users.noreply.github.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2024 Jonathan Kliem <jonathan.kliem@gmail.com> #
# Copyright 2024 Kobbi Gal <85439776+kgal-pan@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import io
import json
import logging
import mimetypes
import os
import re
import threading
import time
import urllib
import urllib.parse
from collections import deque
from datetime import datetime, timezone
from io import IOBase
from typing import (
TYPE_CHECKING,
Any,
BinaryIO,
Callable,
Deque,
Dict,
Generic,
ItemsView,
List,
Optional,
Tuple,
Type,
TypeVar,
Union,
)
import requests
import requests.adapters
from urllib3 import Retry
import github.Consts as Consts
import github.GithubException as GithubException
if TYPE_CHECKING:
from .AppAuthentication import AppAuthentication
from .Auth import Auth
from .GithubObject import GithubObject
from .InstallationAuthorization import InstallationAuthorization
T = TypeVar("T")
# For App authentication, time remaining before token expiration to request a new one
ACCESS_TOKEN_REFRESH_THRESHOLD_SECONDS = 20
class RequestsResponse:
# mimic the httplib response object
def __init__(self, r: requests.Response):
self.status = r.status_code
self.headers = r.headers
self.text = r.text
def getheaders(self) -> ItemsView[str, str]:
return self.headers.items()
def read(self) -> str:
return self.text
class HTTPSRequestsConnectionClass:
retry: Union[int, Retry]
# mimic the httplib connection object
def __init__(
self,
host: str,
port: Optional[int] = None,
strict: bool = False,
timeout: Optional[int] = None,
retry: Optional[Union[int, Retry]] = None,
pool_size: Optional[int] = None,
**kwargs: Any,
) -> None:
self.port = port if port else 443
self.host = host
self.protocol = "https"
self.timeout = timeout
self.verify = kwargs.get("verify", True)
self.session = requests.Session()
# having Session.auth set something other than None disables falling back to .netrc file
# https://github.com/psf/requests/blob/d63e94f552ebf77ccf45d97e5863ac46500fa2c7/src/requests/sessions.py#L480-L481
# see https://github.com/PyGithub/PyGithub/pull/2703
self.session.auth = Requester.noopAuth
if retry is None:
self.retry = requests.adapters.DEFAULT_RETRIES
else:
self.retry = retry
if pool_size is None:
self.pool_size = requests.adapters.DEFAULT_POOLSIZE
else:
self.pool_size = pool_size
self.adapter = requests.adapters.HTTPAdapter(
max_retries=self.retry,
pool_connections=self.pool_size,
pool_maxsize=self.pool_size,
)
self.session.mount("https://", self.adapter)
def request(
self,
verb: str,
url: str,
input: Optional[Union[str, io.BufferedReader]],
headers: Dict[str, str],
) -> None:
self.verb = verb
self.url = url
self.input = input
self.headers = headers
def getresponse(self) -> RequestsResponse:
verb = getattr(self.session, self.verb.lower())
url = f"{self.protocol}://{self.host}:{self.port}{self.url}"
r = verb(
url,
headers=self.headers,
data=self.input,
timeout=self.timeout,
verify=self.verify,
allow_redirects=False,
)
return RequestsResponse(r)
def close(self) -> None:
self.session.close()
class HTTPRequestsConnectionClass:
# mimic the httplib connection object
def __init__(
self,
host: str,
port: Optional[int] = None,
strict: bool = False,
timeout: Optional[int] = None,
retry: Optional[Union[int, Retry]] = None,
pool_size: Optional[int] = None,
**kwargs: Any,
):
self.port = port if port else 80
self.host = host
self.protocol = "http"
self.timeout = timeout
self.verify = kwargs.get("verify", True)
self.session = requests.Session()
# having Session.auth set something other than None disables falling back to .netrc file
# https://github.com/psf/requests/blob/d63e94f552ebf77ccf45d97e5863ac46500fa2c7/src/requests/sessions.py#L480-L481
# see https://github.com/PyGithub/PyGithub/pull/2703
self.session.auth = Requester.noopAuth
if retry is None:
self.retry = requests.adapters.DEFAULT_RETRIES
else:
self.retry = retry # type: ignore
if pool_size is None:
self.pool_size = requests.adapters.DEFAULT_POOLSIZE
else:
self.pool_size = pool_size
self.adapter = requests.adapters.HTTPAdapter(
max_retries=self.retry,
pool_connections=self.pool_size,
pool_maxsize=self.pool_size,
)
self.session.mount("http://", self.adapter)
def request(self, verb: str, url: str, input: None, headers: Dict[str, str]) -> None:
self.verb = verb
self.url = url
self.input = input
self.headers = headers
def getresponse(self) -> RequestsResponse:
verb = getattr(self.session, self.verb.lower())
url = f"{self.protocol}://{self.host}:{self.port}{self.url}"
r = verb(
url,
headers=self.headers,
data=self.input,
timeout=self.timeout,
verify=self.verify,
allow_redirects=False,
)
return RequestsResponse(r)
def close(self) -> None:
self.session.close()
class Requester:
__installation_authorization: Optional["InstallationAuthorization"]
__app_auth: Optional["AppAuthentication"]
__httpConnectionClass = HTTPRequestsConnectionClass
__httpsConnectionClass = HTTPSRequestsConnectionClass
__persist = True
__logger: Optional[logging.Logger] = None
_frameBuffer: List[Any]
@staticmethod
def noopAuth(request: requests.models.PreparedRequest) -> requests.models.PreparedRequest:
return request
@classmethod
def injectConnectionClasses(
cls,
httpConnectionClass: Type[HTTPRequestsConnectionClass],
httpsConnectionClass: Type[HTTPSRequestsConnectionClass],
) -> None:
cls.__persist = False
cls.__httpConnectionClass = httpConnectionClass
cls.__httpsConnectionClass = httpsConnectionClass
@classmethod
def resetConnectionClasses(cls) -> None:
cls.__persist = True
cls.__httpConnectionClass = HTTPRequestsConnectionClass
cls.__httpsConnectionClass = HTTPSRequestsConnectionClass
@classmethod
def injectLogger(cls, logger: logging.Logger) -> None:
cls.__logger = logger
@classmethod
def resetLogger(cls) -> None:
cls.__logger = None
#############################################################
# For Debug
@classmethod
def setDebugFlag(cls, flag: bool) -> None:
cls.DEBUG_FLAG = flag
@classmethod
def setOnCheckMe(cls, onCheckMe: Callable) -> None:
cls.ON_CHECK_ME = onCheckMe
DEBUG_FLAG = False
DEBUG_FRAME_BUFFER_SIZE = 1024
DEBUG_HEADER_KEY = "DEBUG_FRAME"
ON_CHECK_ME: Optional[Callable] = None
def NEW_DEBUG_FRAME(self, requestHeader: Dict[str, str]) -> None:
"""
Initialize a debug frame with requestHeader
Frame count is updated and will be attached to respond header
The structure of a frame: [requestHeader, statusCode, responseHeader, raw_data]
Some of them may be None
"""
if self.DEBUG_FLAG: # pragma no branch (Flag always set in tests)
new_frame = [requestHeader, None, None, None]
if self._frameCount < self.DEBUG_FRAME_BUFFER_SIZE - 1: # pragma no branch (Should be covered)
self._frameBuffer.append(new_frame)
else:
self._frameBuffer[0] = new_frame # pragma no cover (Should be covered)
self._frameCount = len(self._frameBuffer) - 1
def DEBUG_ON_RESPONSE(self, statusCode: int, responseHeader: Dict[str, Union[str, int]], data: str) -> None:
"""
Update current frame with response Current frame index will be attached to responseHeader.
"""
if self.DEBUG_FLAG: # pragma no branch (Flag always set in tests)
self._frameBuffer[self._frameCount][1:4] = [
statusCode,
responseHeader,
data,
]
responseHeader[self.DEBUG_HEADER_KEY] = self._frameCount
def check_me(self, obj: "GithubObject") -> None:
if self.DEBUG_FLAG and self.ON_CHECK_ME is not None: # pragma no branch (Flag always set in tests)
frame = None
if self.DEBUG_HEADER_KEY in obj._headers:
frame_index = obj._headers[self.DEBUG_HEADER_KEY]
frame = self._frameBuffer[frame_index] # type: ignore
self.ON_CHECK_ME(obj, frame)
def _initializeDebugFeature(self) -> None:
self._frameCount = 0
self._frameBuffer = []
#############################################################
_frameCount: int
__connectionClass: Union[Type[HTTPRequestsConnectionClass], Type[HTTPSRequestsConnectionClass]]
__hostname: str
__authorizationHeader: Optional[str]
__seconds_between_requests: Optional[float]
__seconds_between_writes: Optional[float]
# keep arguments in-sync with github.MainClass and GithubIntegration
def __init__(
self,
auth: Optional["Auth"],
base_url: str,
timeout: int,
user_agent: str,
per_page: int,
verify: Union[bool, str],
retry: Optional[Union[int, Retry]],
pool_size: Optional[int],
seconds_between_requests: Optional[float] = None,
seconds_between_writes: Optional[float] = None,
):
self._initializeDebugFeature()
self.__auth = auth
self.__base_url = base_url
o = urllib.parse.urlparse(base_url)
self.__graphql_prefix = self.get_graphql_prefix(o.path)
self.__graphql_url = urllib.parse.urlunparse(o._replace(path=self.__graphql_prefix))
self.__hostname = o.hostname # type: ignore
self.__port = o.port
self.__prefix = o.path
self.__timeout = timeout
self.__retry = retry # NOTE: retry can be either int or an urllib3 Retry object
self.__pool_size = pool_size
self.__seconds_between_requests = seconds_between_requests
self.__seconds_between_writes = seconds_between_writes
self.__last_requests: Dict[str, float] = dict()
self.__scheme = o.scheme
if o.scheme == "https":
self.__connectionClass = self.__httpsConnectionClass
elif o.scheme == "http":
self.__connectionClass = self.__httpConnectionClass
else:
assert False, "Unknown URL scheme"
self.__connection: Optional[Union[HTTPRequestsConnectionClass, HTTPSRequestsConnectionClass]] = None
self.__connection_lock = threading.Lock()
self.__custom_connections: Deque[Union[HTTPRequestsConnectionClass, HTTPSRequestsConnectionClass]] = deque()
self.rate_limiting = (-1, -1)
self.rate_limiting_resettime = 0
self.FIX_REPO_GET_GIT_REF = True
self.per_page = per_page
self.oauth_scopes = None
assert user_agent is not None, (
"github now requires a user-agent. "
"See https://docs.github.com/en/rest/overview/resources-in-the-rest-api#user-agent-required"
)
self.__userAgent = user_agent
self.__verify = verify
self.__installation_authorization = None
# provide auth implementations that require a requester with this requester
if isinstance(self.__auth, WithRequester):
self.__auth.withRequester(self)
def __getstate__(self) -> Dict[str, Any]:
state = self.__dict__.copy()
# __connection_lock is not picklable
del state["_Requester__connection_lock"]
# __connection is not usable on remote, so ignore it
del state["_Requester__connection"]
# __custom_connections is not usable on remote, so ignore it
del state["_Requester__custom_connections"]
return state
def __setstate__(self, state: Dict[str, Any]) -> None:
self.__dict__.update(state)
self.__connection_lock = threading.Lock()
self.__connection = None
self.__custom_connections = deque()
@staticmethod
# replace with str.removesuffix once support for Python 3.8 is dropped
def remove_suffix(string: str, suffix: str) -> str:
if string.endswith(suffix):
return string[: -len(suffix)]
return string
@staticmethod
def get_graphql_prefix(path: Optional[str]) -> str:
if path is None or path in ["", "/"]:
path = ""
if path.endswith(("/v3", "/v3/")):
path = Requester.remove_suffix(path, "/")
path = Requester.remove_suffix(path, "/v3")
return path + "/graphql"
def close(self) -> None:
"""
Close the connection to the server.
"""
with self.__connection_lock:
if self.__connection is not None:
self.__connection.close()
self.__connection = None
while self.__custom_connections:
self.__custom_connections.popleft().close()
@property
def kwargs(self) -> Dict[str, Any]:
"""
Returns arguments required to recreate this Requester with Requester.__init__, as well as with
MainClass.__init__ and GithubIntegration.__init__.
"""
return dict(
auth=self.__auth,
base_url=self.__base_url,
timeout=self.__timeout,
user_agent=self.__userAgent,
per_page=self.per_page,
verify=self.__verify,
retry=self.__retry,
pool_size=self.__pool_size,
seconds_between_requests=self.__seconds_between_requests,
seconds_between_writes=self.__seconds_between_writes,
)
@property
def base_url(self) -> str:
return self.__base_url
@property
def graphql_url(self) -> str:
return self.__graphql_url
@property
def scheme(self) -> str:
return self.__scheme
@property
def hostname(self) -> str:
return self.__hostname
@property
def hostname_and_port(self) -> str:
if self.__port is None:
return self.hostname
return f"{self.hostname}:{self.__port}"
@property
def auth(self) -> Optional["Auth"]:
return self.__auth
def withAuth(self, auth: Optional["Auth"]) -> "Requester":
"""
Create a new requester instance with identical configuration but the given authentication method.
:param auth: authentication method
:return: new Requester implementation
"""
kwargs = self.kwargs
kwargs.update(auth=auth)
return Requester(**kwargs)
def requestJsonAndCheck(
self,
verb: str,
url: str,
parameters: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, str]] = None,
input: Optional[Any] = None,
) -> Tuple[Dict[str, Any], Any]:
"""
Send a request with JSON body.
:param input: request body, serialized to JSON if specified
:return: ``(headers: dict, JSON Response: Any)``
:raises: :class:`GithubException` for error status codes
"""
return self.__check(*self.requestJson(verb, url, parameters, headers, input, self.__customConnection(url)))
def requestMultipartAndCheck(
self,
verb: str,
url: str,
parameters: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
input: Optional[Dict[str, str]] = None,
) -> Tuple[Dict[str, Any], Optional[Dict[str, Any]]]:
"""
Send a request with multi-part-encoded body.
:param input: request body, will be multi-part encoded if specified
:return: ``(headers: dict, JSON Response: Any)``
:raises: :class:`GithubException` for error status codes
"""
return self.__check(*self.requestMultipart(verb, url, parameters, headers, input, self.__customConnection(url)))
def requestBlobAndCheck(
self,
verb: str,
url: str,
parameters: Optional[Dict[str, str]] = None,
headers: Optional[Dict[str, str]] = None,
input: Optional[str] = None,
cnx: Optional[Union[HTTPRequestsConnectionClass, HTTPSRequestsConnectionClass]] = None,
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Send a request with a file for the body.
:param input: path to a file to use for the request body
:return: ``(headers: dict, JSON Response: Any)``
:raises: :class:`GithubException` for error status codes
"""
return self.__check(*self.requestBlob(verb, url, parameters, headers, input, self.__customConnection(url)))
def graphql_query(self, query: str, variables: Dict[str, Any]) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
:calls: `POST /graphql <https://docs.github.com/en/graphql>`_
"""
input_ = {"query": query, "variables": variables}
response_headers, data = self.requestJsonAndCheck("POST", self.graphql_url, input=input_)
if "errors" in data:
raise self.createException(400, response_headers, data)
return response_headers, data
def graphql_named_mutation(
self, mutation_name: str, variables: Dict[str, Any], output: Optional[str] = None
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
Create a mutation in the format:
mutation MutationName($input: MutationNameInput!) {
mutationName(input: $input) {
<output>
}
}
and call the self.graphql_query method
"""
title = "".join([x.capitalize() for x in mutation_name.split("_")])
mutation_name = title[:1].lower() + title[1:]
output = output or ""
query = f"mutation {title}($input: {title}Input!) {{ {mutation_name}(input: $input) {{ {output} }} }}"
return self.graphql_query(query, variables)
def __check(
self,
status: int,
responseHeaders: Dict[str, Any],
output: str,
) -> Tuple[Dict[str, Any], Any]:
data = self.__structuredFromJson(output)
if status >= 400:
raise self.createException(status, responseHeaders, data)
return responseHeaders, data
def __customConnection(
self, url: str
) -> Optional[Union[HTTPRequestsConnectionClass, HTTPSRequestsConnectionClass]]:
cnx: Optional[Union[HTTPRequestsConnectionClass, HTTPSRequestsConnectionClass]] = None
if not url.startswith("/"):
o = urllib.parse.urlparse(url)
if (
o.hostname != self.__hostname
or (o.port and o.port != self.__port)
or (o.scheme != self.__scheme and not (o.scheme == "https" and self.__scheme == "http"))
): # issue80
if o.scheme == "http":
cnx = self.__httpConnectionClass(
o.hostname, # type: ignore
o.port,
retry=self.__retry,
pool_size=self.__pool_size,
)
self.__custom_connections.append(cnx)
elif o.scheme == "https":
cnx = self.__httpsConnectionClass(
o.hostname, # type: ignore
o.port,
retry=self.__retry,
pool_size=self.__pool_size,
)
self.__custom_connections.append(cnx)
return cnx
@classmethod
def createException(
cls,
status: int,
headers: Dict[str, Any],
output: Dict[str, Any],
) -> GithubException.GithubException:
message = output.get("message", "").lower() if output is not None else ""
exc = GithubException.GithubException
if status == 401 and message == "bad credentials":
exc = GithubException.BadCredentialsException
elif status == 401 and Consts.headerOTP in headers and re.match(r".*required.*", headers[Consts.headerOTP]):
exc = GithubException.TwoFactorException
elif status == 403 and message.startswith("missing or invalid user agent string"):
exc = GithubException.BadUserAgentException
elif status == 403 and cls.isRateLimitError(message):
exc = GithubException.RateLimitExceededException
elif status == 404 and message == "not found":
exc = GithubException.UnknownObjectException
return exc(status, output, headers)
@classmethod
def isRateLimitError(cls, message: str) -> bool:
return cls.isPrimaryRateLimitError(message) or cls.isSecondaryRateLimitError(message)
@classmethod
def isPrimaryRateLimitError(cls, message: str) -> bool:
if not message:
return False
message = message.lower()
return message.startswith("api rate limit exceeded")
@classmethod
def isSecondaryRateLimitError(cls, message: str) -> bool:
if not message:
return False
message = message.lower()
return (
message.startswith("you have exceeded a secondary rate limit")
or message.endswith("please retry your request again later.")
or message.endswith("please wait a few minutes before you try again.")
)
def __structuredFromJson(self, data: str) -> Any:
if len(data) == 0:
return None
else:
if isinstance(data, bytes):
data = data.decode("utf-8")
try:
return json.loads(data)
except ValueError:
if data.startswith("{") or data.startswith("["):
raise
return {"data": data}
def requestJson(
self,
verb: str,
url: str,
parameters: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
input: Optional[Any] = None,
cnx: Optional[Union[HTTPRequestsConnectionClass, HTTPSRequestsConnectionClass]] = None,
) -> Tuple[int, Dict[str, Any], str]:
"""
Send a request with JSON input.
:param input: request body, will be serialized as JSON
:returns:``(status, headers, body)``
"""
def encode(input: Any) -> Tuple[str, str]:
return "application/json", json.dumps(input)
return self.__requestEncode(cnx, verb, url, parameters, headers, input, encode)
def requestMultipart(
self,
verb: str,
url: str,
parameters: Optional[Dict[str, Any]] = None,
headers: Optional[Dict[str, Any]] = None,
input: Optional[Dict[str, str]] = None,
cnx: Optional[Union[HTTPRequestsConnectionClass, HTTPSRequestsConnectionClass]] = None,
) -> Tuple[int, Dict[str, Any], str]:
"""
Send a request with multi-part encoding.
:param input: request body, will be serialized as multipart form data
:returns:``(status, headers, body)``
"""
def encode(input: Dict[str, Any]) -> Tuple[str, str]:
boundary = "----------------------------3c3ba8b523b2"
eol = "\r\n"
encoded_input = ""
for name, value in input.items():
encoded_input += f"--{boundary}{eol}"
encoded_input += f'Content-Disposition: form-data; name="{name}"{eol}'
encoded_input += eol
encoded_input += value + eol
encoded_input += f"--{boundary}--{eol}"
return f"multipart/form-data; boundary={boundary}", encoded_input
return self.__requestEncode(cnx, verb, url, parameters, headers, input, encode)
def requestBlob(
self,
verb: str,
url: str,
parameters: Optional[Dict[str, str]] = None,
headers: Optional[Dict[str, str]] = None,
input: Optional[str] = None,
cnx: Optional[Union[HTTPRequestsConnectionClass, HTTPSRequestsConnectionClass]] = None,
) -> Tuple[int, Dict[str, Any], str]:
"""
Send a request with a file as request body.
:param input: path to a local file to use for request body
:returns:``(status, headers, body)``
"""
if headers is None:
headers = {}
def encode(local_path: str) -> Tuple[str, Any]:
if "Content-Type" in headers: # type: ignore
mime_type = headers["Content-Type"] # type: ignore
else:
guessed_type = mimetypes.guess_type(local_path)
mime_type = guessed_type[0] if guessed_type[0] is not None else Consts.defaultMediaType
f = open(local_path, "rb")
return mime_type, f
if input:
headers["Content-Length"] = str(os.path.getsize(input))
return self.__requestEncode(cnx, verb, url, parameters, headers, input, encode)
def requestMemoryBlobAndCheck(
self,
verb: str,
url: str,
parameters: Any,
headers: Dict[str, Any],
file_like: BinaryIO,
cnx: Optional[Union[HTTPRequestsConnectionClass, HTTPSRequestsConnectionClass]] = None,
) -> Tuple[Dict[str, Any], Any]:
"""
Send a request with a binary file-like for the body.
:param file_like: file-like object to use for the request body
:return: ``(headers: dict, JSON Response: Any)``
:raises: :class:`GithubException` for error status codes
"""
# The expected signature of encode means that the argument is ignored.
def encode(_: Any) -> Tuple[str, Any]:
return headers["Content-Type"], file_like
if not cnx:
cnx = self.__customConnection(url)
return self.__check(*self.__requestEncode(cnx, verb, url, parameters, headers, file_like, encode))
def __requestEncode(
self,
cnx: Optional[Union[HTTPRequestsConnectionClass, HTTPSRequestsConnectionClass]],
verb: str,
url: str,
parameters: Optional[Dict[str, str]],
requestHeaders: Optional[Dict[str, str]],
input: Optional[T],
encode: Callable[[T], Tuple[str, Any]],
) -> Tuple[int, Dict[str, Any], str]:
assert verb in ["HEAD", "GET", "POST", "PATCH", "PUT", "DELETE"]
if parameters is None:
parameters = {}
if requestHeaders is None:
requestHeaders = {}
if self.__auth is not None:
self.__auth.authentication(requestHeaders)
requestHeaders["User-Agent"] = self.__userAgent
url = self.__makeAbsoluteUrl(url)
url = self.__addParametersToUrl(url, parameters)
encoded_input = None
if input is not None:
requestHeaders["Content-Type"], encoded_input = encode(input)
self.NEW_DEBUG_FRAME(requestHeaders)
status, responseHeaders, output = self.__requestRaw(cnx, verb, url, requestHeaders, encoded_input)
if Consts.headerRateRemaining in responseHeaders and Consts.headerRateLimit in responseHeaders:
self.rate_limiting = (
# ints expected but sometimes floats returned: https://github.com/PyGithub/PyGithub/pull/2697
int(float(responseHeaders[Consts.headerRateRemaining])),
int(float(responseHeaders[Consts.headerRateLimit])),
)
if Consts.headerRateReset in responseHeaders:
# ints expected but sometimes floats returned: https://github.com/PyGithub/PyGithub/pull/2697
self.rate_limiting_resettime = int(float(responseHeaders[Consts.headerRateReset]))
if Consts.headerOAuthScopes in responseHeaders:
self.oauth_scopes = responseHeaders[Consts.headerOAuthScopes].split(", ")
self.DEBUG_ON_RESPONSE(status, responseHeaders, output)
return status, responseHeaders, output
def __requestRaw(
self,
cnx: Optional[Union[HTTPRequestsConnectionClass, HTTPSRequestsConnectionClass]],
verb: str,
url: str,
requestHeaders: Dict[str, str],
input: Optional[Any],
) -> Tuple[int, Dict[str, Any], str]:
self.__deferRequest(verb)
try:
original_cnx = cnx
if cnx is None:
cnx = self.__createConnection()
cnx.request(verb, url, input, requestHeaders)
response = cnx.getresponse()
status = response.status
responseHeaders = {k.lower(): v for k, v in response.getheaders()}
output = response.read()
if input:
if isinstance(input, IOBase):
input.close()
self.__log(verb, url, requestHeaders, input, status, responseHeaders, output)
if status == 202 and (
verb == "GET" or verb == "HEAD"
): # only for requests that are considered 'safe' in RFC 2616
time.sleep(Consts.PROCESSING_202_WAIT_TIME)
return self.__requestRaw(original_cnx, verb, url, requestHeaders, input)
if status == 301 and "location" in responseHeaders:
location = responseHeaders["location"]
o = urllib.parse.urlparse(location)
if o.scheme != self.__scheme:
raise RuntimeError(
f"Github server redirected from {self.__scheme} protocol to {o.scheme}, "
f"please correct your Github server URL via base_url: Github(base_url=...)"
)
if o.hostname != self.__hostname:
raise RuntimeError(
f"Github server redirected from host {self.__hostname} to {o.hostname}, "
f"please correct your Github server URL via base_url: Github(base_url=...)"
)
if o.path == url:
port = ":" + str(self.__port) if self.__port is not None else ""
requested_location = f"{self.__scheme}://{self.__hostname}{port}{url}"
raise RuntimeError(
f"Requested {requested_location} but server redirected to {location}, "
f"you may need to correct your Github server URL "
f"via base_url: Github(base_url=...)"
)
if self._logger.isEnabledFor(logging.INFO):
self._logger.info(f"Following Github server redirection from {url} to {o.path}")
return self.__requestRaw(original_cnx, verb, o.path, requestHeaders, input)
return status, responseHeaders, output
finally:
# we record the time of this request after it finished
# to defer next request starting from this request's end, not start
self.__recordRequestTime(verb)
def __deferRequest(self, verb: str) -> None:
# Ensures at least self.__seconds_between_requests seconds have passed since any last request
# and self.__seconds_between_writes seconds have passed since last write request (if verb refers to a write).
# Uses self.__last_requests.
requests = self.__last_requests.values()
writes = [l for v, l in self.__last_requests.items() if v != "GET"]
last_request = max(requests) if requests else 0
last_write = max(writes) if writes else 0
next_request = (last_request + self.__seconds_between_requests) if self.__seconds_between_requests else 0
next_write = (last_write + self.__seconds_between_writes) if self.__seconds_between_writes else 0
next = next_request if verb == "GET" else max(next_request, next_write)
defer = max(next - datetime.now(timezone.utc).timestamp(), 0)
if defer > 0:
if self.__logger is None:
self.__logger = logging.getLogger(__name__)
self.__logger.debug(f"sleeping {defer}s before next GitHub request")
time.sleep(defer)
def __recordRequestTime(self, verb: str) -> None:
# Updates self.__last_requests with current timestamp for given verb
self.__last_requests[verb] = datetime.now(timezone.utc).timestamp()
def __makeAbsoluteUrl(self, url: str) -> str:
# URLs generated locally will be relative to __base_url
# URLs returned from the server will start with __base_url
if url.startswith("/"):
url = f"{self.__prefix}{url}"
else:
o = urllib.parse.urlparse(url)
assert o.hostname in [
self.__hostname,
"uploads.github.com",
"status.github.com",
"github.com",
], o.hostname
assert o.path.startswith((self.__prefix, self.__graphql_prefix, "/api/", "/login/oauth")), o.path
assert o.port == self.__port, o.port
url = o.path
if o.query != "":
url += f"?{o.query}"
return url
def __addParametersToUrl(
self,
url: str,
parameters: Dict[str, Any],
) -> str:
if len(parameters) == 0:
return url
else:
return f"{url}?{urllib.parse.urlencode(parameters)}"
def __createConnection(
self,
) -> Union[HTTPRequestsConnectionClass, HTTPSRequestsConnectionClass]:
if self.__persist and self.__connection is not None:
return self.__connection
with self.__connection_lock:
if self.__connection is not None:
if self.__persist:
return self.__connection
self.__connection.close()
self.__connection = self.__connectionClass(
self.__hostname,
self.__port,
retry=self.__retry,
pool_size=self.__pool_size,
timeout=self.__timeout,
verify=self.__verify,
)
return self.__connection
@property
def _logger(self) -> logging.Logger:
if self.__logger is None:
self.__logger = logging.getLogger(__name__)
return self.__logger
def __log(
self,
verb: str,
url: str,
requestHeaders: Dict[str, str],
input: Optional[Any],
status: Optional[int],
responseHeaders: Dict[str, Any],
output: Optional[str],
) -> None:
if self._logger.isEnabledFor(logging.DEBUG):
headersForRequest = requestHeaders.copy()
if self.__auth:
self.__auth.mask_authentication(headersForRequest)
self._logger.debug(
"%s %s://%s%s %s %s ==> %i %s %s",
verb,
self.__scheme,
self.__hostname,
url,
headersForRequest,
input,
status,
responseHeaders,
output,
)
class WithRequester(Generic[T]):
"""
Mixin class that allows to set a requester.
"""
__requester: Requester
def __init__(self) -> None:
self.__requester: Optional[Requester] = None # type: ignore
@property
def requester(self) -> Requester:
return self.__requester
def withRequester(self, requester: Requester) -> "WithRequester[T]":
assert isinstance(requester, Requester), requester
self.__requester = requester
return self
| 42,317
|
Python
|
.py
| 927
| 36.361381
| 122
| 0.585401
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,356
|
IssueComment.py
|
PyGithub_PyGithub/github/IssueComment.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Michael Stead <michael.stead@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Nicolas Agustín Torres <nicolastrres@gmail.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 per1234 <accounts@perglass.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Huan-Cheng Chang <changhc84@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Malik Shahzad Muzaffar <shahzad.malik.muzaffar@cern.ch> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Arash Kadkhodaei <arash77.kad@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import TYPE_CHECKING, Any
import github.GithubObject
import github.NamedUser
from github import Consts
from github.GithubObject import Attribute, CompletableGithubObject, NotSet
from github.PaginatedList import PaginatedList
if TYPE_CHECKING:
from github.Reaction import Reaction
class IssueComment(CompletableGithubObject):
"""
This class represents IssueComments.
The reference can be found here
https://docs.github.com/en/rest/reference/issues#comments
"""
def _initAttributes(self) -> None:
self._body: Attribute[str] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._id: Attribute[int] = NotSet
self._issue_url: Attribute[str] = NotSet
self._node_id: Attribute[str] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._url: Attribute[str] = NotSet
self._html_url: Attribute[str] = NotSet
self._user: Attribute[github.NamedUser.NamedUser] = NotSet
self._reactions: Attribute[dict] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"id": self._id.value, "user": self._user.value})
@property
def body(self) -> str:
self._completeIfNotSet(self._body)
return self._body.value
@property
def created_at(self) -> datetime:
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def id(self) -> int:
self._completeIfNotSet(self._id)
return self._id.value
@property
def issue_url(self) -> str:
self._completeIfNotSet(self._issue_url)
return self._issue_url.value
@property
def node_id(self) -> str:
self._completeIfNotSet(self._node_id)
return self._node_id.value
@property
def updated_at(self) -> datetime:
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
@property
def html_url(self) -> str:
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def user(self) -> github.NamedUser.NamedUser:
self._completeIfNotSet(self._user)
return self._user.value
@property
def reactions(self) -> dict:
self._completeIfNotSet(self._reactions)
return self._reactions.value
def delete(self) -> None:
"""
:calls: `DELETE /repos/{owner}/{repo}/issues/comments/{id} <https://docs.github.com/en/rest/reference/issues#comments>`_
"""
headers, data = self._requester.requestJsonAndCheck("DELETE", self.url)
def edit(self, body: str) -> None:
"""
:calls: `PATCH /repos/{owner}/{repo}/issues/comments/{id} <https://docs.github.com/en/rest/reference/issues#comments>`_
"""
assert isinstance(body, str), body
post_parameters = {
"body": body,
}
headers, data = self._requester.requestJsonAndCheck("PATCH", self.url, input=post_parameters)
self._useAttributes(data)
def get_reactions(self) -> PaginatedList[Reaction]:
"""
:calls: `GET /repos/{owner}/{repo}/issues/comments/{id}/reactions
<https://docs.github.com/en/rest/reference/reactions#list-reactions-for-an-issue-comment>`_
"""
return PaginatedList(
github.Reaction.Reaction,
self._requester,
f"{self.url}/reactions",
None,
headers={"Accept": Consts.mediaTypeReactionsPreview},
)
def create_reaction(self, reaction_type: str) -> Reaction:
"""
:calls: `POST /repos/{owner}/{repo}/issues/comments/{id}/reactions
<https://docs.github.com/en/rest/reference/reactions#create-reaction-for-an-issue-comment>`_
"""
assert isinstance(reaction_type, str), reaction_type
post_parameters = {
"content": reaction_type,
}
headers, data = self._requester.requestJsonAndCheck(
"POST",
f"{self.url}/reactions",
input=post_parameters,
headers={"Accept": Consts.mediaTypeReactionsPreview},
)
return github.Reaction.Reaction(self._requester, headers, data, completed=True)
def delete_reaction(self, reaction_id: int) -> bool:
"""
:calls: `DELETE /repos/{owner}/{repo}/issues/comments/{comment_id}/reactions/{reaction_id}
<https://docs.github.com/en/rest/reference/reactions#delete-an-issue-comment-reaction>`_
"""
assert isinstance(reaction_id, int), reaction_id
status, _, _ = self._requester.requestJson(
"DELETE",
f"{self.url}/reactions/{reaction_id}",
headers={"Accept": Consts.mediaTypeReactionsPreview},
)
return status == 204
def minimize(self, reason: str = "OUTDATED") -> bool:
"""
:calls: `POST /graphql <https://docs.github.com/en/graphql>`_ with a mutation to minimize comment
<https://docs.github.com/en/graphql/reference/mutations#minimizecomment>
"""
assert isinstance(reason, str), reason
variables = {
"subjectId": self.node_id,
"classifier": reason,
}
_, data = self._requester.graphql_named_mutation(
mutation_name="minimize_comment",
variables={"input": NotSet.remove_unset_items(variables)},
output="minimizedComment { isMinimized }",
)
return data["data"]["minimizeComment"]["minimizedComment"]["isMinimized"] is True
def unminimize(self) -> bool:
"""
:calls: `POST /graphql <https://docs.github.com/en/graphql>`_ with a mutation to unminimize comment
<https://docs.github.com/en/graphql/reference/mutations#unminimizecomment>
"""
variables = {
"subjectId": self.node_id,
}
_, data = self._requester.graphql_named_mutation(
mutation_name="unminimize_comment",
variables={"input": NotSet.remove_unset_items(variables)},
output="unminimizedComment { isMinimized }",
)
return data["data"]["unminimizeComment"]["unminimizedComment"]["isMinimized"] is False
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "issue_url" in attributes: # pragma no branch
self._issue_url = self._makeStringAttribute(attributes["issue_url"])
if "node_id" in attributes: # pragma no branch
self._node_id = self._makeStringAttribute(attributes["node_id"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "user" in attributes: # pragma no branch
self._user = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["user"])
if "reactions" in attributes:
self._reactions = self._makeDictAttribute(attributes["reactions"])
| 11,443
|
Python
|
.py
| 220
| 44.831818
| 128
| 0.581867
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,357
|
Event.py
|
PyGithub_PyGithub/github/Event.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import Any
import github.GithubObject
import github.NamedUser
import github.Organization
import github.Repository
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class Event(NonCompletableGithubObject):
"""
This class represents Events.
The reference can be found here
https://docs.github.com/en/rest/reference/activity#events
"""
def _initAttributes(self) -> None:
self._actor: Attribute[github.NamedUser.NamedUser] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._id: Attribute[str] = NotSet
self._org: Attribute[github.Organization.Organization] = NotSet
self._payload: Attribute[dict[str, Any]] = NotSet
self._public: Attribute[bool] = NotSet
self._repo: Attribute[github.Repository.Repository] = NotSet
self._type: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"id": self._id.value, "type": self._type.value})
@property
def actor(self) -> github.NamedUser.NamedUser:
return self._actor.value
@property
def created_at(self) -> datetime:
return self._created_at.value
@property
def id(self) -> str:
return self._id.value
@property
def org(self) -> github.Organization.Organization:
return self._org.value
@property
def payload(self) -> dict[str, Any]:
return self._payload.value
@property
def public(self) -> bool:
return self._public.value
@property
def repo(self) -> github.Repository.Repository:
return self._repo.value
@property
def type(self) -> str:
return self._type.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "actor" in attributes: # pragma no branch
self._actor = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["actor"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "id" in attributes: # pragma no branch
self._id = self._makeStringAttribute(attributes["id"])
if "org" in attributes: # pragma no branch
self._org = self._makeClassAttribute(github.Organization.Organization, attributes["org"])
if "payload" in attributes: # pragma no branch
self._payload = self._makeDictAttribute(attributes["payload"])
if "public" in attributes: # pragma no branch
self._public = self._makeBoolAttribute(attributes["public"])
if "repo" in attributes: # pragma no branch
self._repo = self._makeClassAttribute(github.Repository.Repository, attributes["repo"])
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
| 6,029
|
Python
|
.py
| 104
| 53.028846
| 101
| 0.557813
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,358
|
Path.py
|
PyGithub_PyGithub/github/Path.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Justin Kufro <jkufro@andrew.cmu.edu> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from typing import Any, Dict
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class Path(NonCompletableGithubObject):
"""
This class represents a popular Path for a GitHub repository.
The reference can be found here
https://docs.github.com/en/rest/reference/repos#traffic
"""
def _initAttributes(self) -> None:
self._path: Attribute[str] = NotSet
self._title: Attribute[str] = NotSet
self._count: Attribute[int] = NotSet
self._uniques: Attribute[int] = NotSet
def __repr__(self) -> str:
return self.get__repr__(
{
"path": self._path.value,
"title": self._title.value,
"count": self._count.value,
"uniques": self._uniques.value,
}
)
@property
def path(self) -> str:
return self._path.value
@property
def title(self) -> str:
return self._title.value
@property
def count(self) -> int:
return self._count.value
@property
def uniques(self) -> int:
return self._uniques.value
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "path" in attributes: # pragma no branch
self._path = self._makeStringAttribute(attributes["path"])
if "title" in attributes: # pragma no branch
self._title = self._makeStringAttribute(attributes["title"])
if "count" in attributes: # pragma no branch
self._count = self._makeIntAttribute(attributes["count"])
if "uniques" in attributes: # pragma no branch
self._uniques = self._makeIntAttribute(attributes["uniques"])
| 4,789
|
Python
|
.py
| 81
| 54.111111
| 80
| 0.504579
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,359
|
WorkflowJob.py
|
PyGithub_PyGithub/github/WorkflowJob.py
|
############################ Copyrights and license ############################
# #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jeppe Fihl-Pearson <tenzer@tenzer.dk> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2024 Xavi Vega <xabi1309@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import Any
import github.GithubObject
import github.WorkflowStep
from github.GithubObject import Attribute, CompletableGithubObject, NotSet
class WorkflowJob(CompletableGithubObject):
"""
This class represents Workflow Jobs.
The reference can be found here
https://docs.github.com/en/rest/reference/actions#workflow-jobs
"""
def _initAttributes(self) -> None:
self._check_run_url: Attribute[str] = NotSet
self._completed_at: Attribute[datetime] = NotSet
self._conclusion: Attribute[str] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._head_branch: Attribute[str] = NotSet
self._head_sha: Attribute[str] = NotSet
self._html_url: Attribute[str] = NotSet
self._id: Attribute[int] = NotSet
self._labels: Attribute[list[str]] = NotSet
self._name: Attribute[str] = NotSet
self._node_id: Attribute[str] = NotSet
self._run_attempt: Attribute[int] = NotSet
self._run_id: Attribute[int] = NotSet
self._run_url: Attribute[str] = NotSet
self._runner_group_id: Attribute[int] = NotSet
self._runner_group_name: Attribute[str] = NotSet
self._runner_id: Attribute[int] = NotSet
self._runner_name: Attribute[str] = NotSet
self._started_at: Attribute[datetime] = NotSet
self._status: Attribute[str] = NotSet
self._steps: Attribute[list[github.WorkflowStep.WorkflowStep]] = NotSet
self._url: Attribute[str] = NotSet
self._workflow_name: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"id": self._id.value, "url": self._url.value})
@property
def check_run_url(self) -> str:
self._completeIfNotSet(self._check_run_url)
return self._check_run_url.value
@property
def completed_at(self) -> datetime:
self._completeIfNotSet(self._completed_at)
return self._completed_at.value
@property
def conclusion(self) -> str:
self._completeIfNotSet(self._conclusion)
return self._conclusion.value
@property
def created_at(self) -> datetime:
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def head_branch(self) -> str:
self._completeIfNotSet(self._head_branch)
return self._head_branch.value
@property
def head_sha(self) -> str:
self._completeIfNotSet(self._head_sha)
return self._head_sha.value
@property
def html_url(self) -> str:
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def id(self) -> int:
self._completeIfNotSet(self._id)
return self._id.value
@property
def labels(self) -> list[str]:
self._completeIfNotSet(self._labels)
return self._labels.value
@property
def name(self) -> str:
self._completeIfNotSet(self._name)
return self._name.value
@property
def node_id(self) -> str:
self._completeIfNotSet(self._node_id)
return self._node_id.value
@property
def run_attempt(self) -> int:
self._completeIfNotSet(self._run_attempt)
return self._run_attempt.value
@property
def run_id(self) -> int:
self._completeIfNotSet(self._run_id)
return self._run_id.value
@property
def run_url(self) -> str:
self._completeIfNotSet(self._run_url)
return self._run_url.value
@property
def runner_group_id(self) -> int:
self._completeIfNotSet(self._runner_group_id)
return self._runner_group_id.value
@property
def runner_group_name(self) -> str:
self._completeIfNotSet(self._runner_group_name)
return self._runner_group_name.value
@property
def runner_id(self) -> int:
self._completeIfNotSet(self._runner_id)
return self._runner_id.value
@property
def runner_name(self) -> str:
self._completeIfNotSet(self._runner_name)
return self._runner_name.value
@property
def started_at(self) -> datetime:
self._completeIfNotSet(self._started_at)
return self._started_at.value
@property
def status(self) -> str:
self._completeIfNotSet(self._status)
return self._status.value
@property
def steps(self) -> list[github.WorkflowStep.WorkflowStep]:
self._completeIfNotSet(self._steps)
return self._steps.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
@property
def workflow_name(self) -> str:
self._completeIfNotSet(self._workflow_name)
return self._workflow_name.value
def logs_url(self) -> str:
headers, _ = self._requester.requestBlobAndCheck("GET", f"{self.url}/logs")
return headers["location"]
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "check_run_url" in attributes: # pragma no branch
self._check_run_url = self._makeStringAttribute(attributes["check_run_url"])
if "completed_at" in attributes: # pragma no branch
self._completed_at = self._makeDatetimeAttribute(attributes["completed_at"])
if "conclusion" in attributes: # pragma no branch
self._conclusion = self._makeStringAttribute(attributes["conclusion"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "head_branch" in attributes: # pragma no branch
self._head_branch = self._makeStringAttribute(attributes["head_branch"])
if "head_sha" in attributes: # pragma no branch
self._head_sha = self._makeStringAttribute(attributes["head_sha"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "labels" in attributes: # pragma no branch
self._labels = self._makeListOfStringsAttribute(attributes["labels"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "node_id" in attributes: # pragma no branch
self._node_id = self._makeStringAttribute(attributes["node_id"])
if "run_attempt" in attributes: # pragma no branch
self._run_attempt = self._makeIntAttribute(attributes["run_attempt"])
if "run_id" in attributes: # pragma no branch
self._run_id = self._makeIntAttribute(attributes["run_id"])
if "run_url" in attributes: # pragma no branch
self._run_url = self._makeStringAttribute(attributes["run_url"])
if "runner_group_id" in attributes: # pragma no branch
self._runner_group_id = self._makeIntAttribute(attributes["runner_group_id"])
if "runner_group_name" in attributes: # pragma no branch
self._runner_group_name = self._makeStringAttribute(attributes["runner_group_name"])
if "runner_id" in attributes: # pragma no branch
self._runner_id = self._makeIntAttribute(attributes["runner_id"])
if "runner_name" in attributes: # pragma no branch
self._runner_name = self._makeStringAttribute(attributes["runner_name"])
if "started_at" in attributes: # pragma no branch
self._started_at = self._makeDatetimeAttribute(attributes["started_at"])
if "status" in attributes: # pragma no branch
self._status = self._makeStringAttribute(attributes["status"])
if "steps" in attributes: # pragma no branch
self._steps = self._makeListOfClassesAttribute(github.WorkflowStep.WorkflowStep, attributes["steps"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "workflow_name" in attributes: # pragma no branch
self._workflow_name = self._makeStringAttribute(attributes["workflow_name"])
| 10,476
|
Python
|
.py
| 207
| 43.376812
| 113
| 0.604104
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,360
|
SecurityAndAnalysis.py
|
PyGithub_PyGithub/github/SecurityAndAnalysis.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Nikolay Yurin <yurinnick93@gmail.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Caleb McCombs <caleb@mccombalot.net> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from typing import Any, Dict
import github.SecurityAndAnalysisFeature
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class SecurityAndAnalysis(NonCompletableGithubObject):
"""
This class represents Security and Analysis Settings.
"""
def _initAttributes(self) -> None:
self._advanced_security: Attribute[github.SecurityAndAnalysisFeature.SecurityAndAnalysisFeature] = NotSet
self._dependabot_security_updates: Attribute[
github.SecurityAndAnalysisFeature.SecurityAndAnalysisFeature
] = NotSet
self._secret_scanning: Attribute[github.SecurityAndAnalysisFeature.SecurityAndAnalysisFeature] = NotSet
self._secret_scanning_non_provider_patterns: Attribute[
github.SecurityAndAnalysisFeature.SecurityAndAnalysisFeature
] = NotSet
self._secret_scanning_push_protection: Attribute[
github.SecurityAndAnalysisFeature.SecurityAndAnalysisFeature
] = NotSet
self._secret_scanning_validity_checks: Attribute[
github.SecurityAndAnalysisFeature.SecurityAndAnalysisFeature
] = NotSet
def __repr__(self) -> str:
repr_attributes = {
"advanced_security": repr(self._advanced_security.value),
"dependabot_security_updates": repr(self._dependabot_security_updates.value),
"secret_scanning": repr(self._secret_scanning.value),
"secret_scanning_non_provider_patterns": repr(self._secret_scanning_non_provider_patterns.value),
"secret_scanning_push_protection": repr(self._secret_scanning_push_protection.value),
"secret_scanning_validity_checks": repr(self._secret_scanning_validity_checks.value),
}
return self.get__repr__(repr_attributes)
@property
def advanced_security(self) -> github.SecurityAndAnalysisFeature.SecurityAndAnalysisFeature:
return self._advanced_security.value
@property
def dependabot_security_updates(self) -> github.SecurityAndAnalysisFeature.SecurityAndAnalysisFeature:
return self._dependabot_security_updates.value
@property
def secret_scanning(self) -> github.SecurityAndAnalysisFeature.SecurityAndAnalysisFeature:
return self._secret_scanning.value
@property
def secret_scanning_non_provider_patterns(self) -> github.SecurityAndAnalysisFeature.SecurityAndAnalysisFeature:
return self._secret_scanning_non_provider_patterns.value
@property
def secret_scanning_push_protection(self) -> github.SecurityAndAnalysisFeature.SecurityAndAnalysisFeature:
return self._secret_scanning_push_protection.value
@property
def secret_scanning_validity_checks(self) -> github.SecurityAndAnalysisFeature.SecurityAndAnalysisFeature:
return self._secret_scanning_validity_checks.value
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
def make_attribute(attribute_name: str) -> None:
if attribute_name in attributes:
setattr(
self,
f"_{attribute_name}",
self._makeClassAttribute(
github.SecurityAndAnalysisFeature.SecurityAndAnalysisFeature, attributes[attribute_name]
),
)
make_attribute("advanced_security")
make_attribute("dependabot_security_updates")
make_attribute("secret_scanning")
make_attribute("secret_scanning_non_provider_patterns")
make_attribute("secret_scanning_push_protection")
make_attribute("secret_scanning_validity_checks")
| 6,878
|
Python
|
.py
| 106
| 58.575472
| 116
| 0.589907
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,361
|
AdvisoryBase.py
|
PyGithub_PyGithub/github/AdvisoryBase.py
|
############################ Copyrights and license ############################
# #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Joseph Henrich <crimsonknave@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Thomas Cooper <coopernetes@proton.me> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import Any
from github.CVSS import CVSS
from github.CWE import CWE
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class AdvisoryBase(NonCompletableGithubObject):
"""
This class represents a the shared attributes between GlobalAdvisory, RepositoryAdvisory and DependabotAdvisory
https://docs.github.com/en/rest/security-advisories/global-advisories
https://docs.github.com/en/rest/security-advisories/repository-advisories
https://docs.github.com/en/rest/dependabot/alerts
"""
def _initAttributes(self) -> None:
self._cve_id: Attribute[str] = NotSet
self._cvss: Attribute[CVSS] = NotSet
self._cwes: Attribute[list[CWE]] = NotSet
self._description: Attribute[str] = NotSet
self._ghsa_id: Attribute[str] = NotSet
self._html_url: Attribute[str] = NotSet
self._identifiers: Attribute[list[dict]] = NotSet
self._published_at: Attribute[datetime] = NotSet
self._severity: Attribute[str] = NotSet
self._summary: Attribute[str] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._url: Attribute[str] = NotSet
self._withdrawn_at: Attribute[datetime] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"ghsa_id": self.ghsa_id, "summary": self.summary})
@property
def cve_id(self) -> str:
return self._cve_id.value
@property
def cvss(self) -> CVSS:
return self._cvss.value
@property
def cwes(self) -> list[CWE]:
return self._cwes.value
@property
def description(self) -> str:
return self._description.value
@property
def ghsa_id(self) -> str:
return self._ghsa_id.value
@property
def html_url(self) -> str:
return self._html_url.value
@property
def identifiers(self) -> list[dict]:
return self._identifiers.value
@property
def published_at(self) -> datetime:
return self._published_at.value
@property
def severity(self) -> str:
return self._severity.value
@property
def summary(self) -> str:
return self._summary.value
@property
def updated_at(self) -> datetime:
return self._updated_at.value
@property
def url(self) -> str:
return self._url.value
@property
def withdrawn_at(self) -> datetime:
return self._withdrawn_at.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "cve_id" in attributes: # pragma no branch
self._cve_id = self._makeStringAttribute(attributes["cve_id"])
if "cvss" in attributes: # pragma no branch
self._cvss = self._makeClassAttribute(CVSS, attributes["cvss"])
if "cwes" in attributes: # pragma no branch
self._cwes = self._makeListOfClassesAttribute(CWE, attributes["cwes"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "ghsa_id" in attributes: # pragma no branch
self._ghsa_id = self._makeStringAttribute(attributes["ghsa_id"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "identifiers" in attributes: # pragma no branch
self._identifiers = self._makeListOfDictsAttribute(attributes["identifiers"])
if "published_at" in attributes: # pragma no branch
assert attributes["published_at"] is None or isinstance(attributes["published_at"], str), attributes[
"published_at"
]
self._published_at = self._makeDatetimeAttribute(attributes["published_at"])
if "severity" in attributes: # pragma no branch
self._severity = self._makeStringAttribute(attributes["severity"])
if "summary" in attributes: # pragma no branch
self._summary = self._makeStringAttribute(attributes["summary"])
if "updated_at" in attributes: # pragma no branch
assert attributes["updated_at"] is None or isinstance(attributes["updated_at"], str), attributes[
"updated_at"
]
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "withdrawn_at" in attributes: # pragma no branch
assert attributes["withdrawn_at"] is None or isinstance(attributes["withdrawn_at"], str), attributes[
"withdrawn_at"
]
self._withdrawn_at = self._makeDatetimeAttribute(attributes["withdrawn_at"])
| 6,789
|
Python
|
.py
| 128
| 46.125
| 115
| 0.579669
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,362
|
GitObject.py
|
PyGithub_PyGithub/github/GitObject.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from typing import Any, Dict
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class GitObject(NonCompletableGithubObject):
"""
This class represents GitObjects.
"""
def _initAttributes(self) -> None:
self._sha: Attribute[str] = NotSet
self._type: Attribute[str] = NotSet
self._url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"sha": self._sha.value})
@property
def sha(self) -> str:
return self._sha.value
@property
def type(self) -> str:
return self._type.value
@property
def url(self) -> str:
return self._url.value
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| 4,037
|
Python
|
.py
| 64
| 59.359375
| 80
| 0.492304
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,363
|
RepositoryPreferences.py
|
PyGithub_PyGithub/github/RepositoryPreferences.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Adam Baratz <adam.baratz@gmail.com> #
# Copyright 2019 Nick Campbell <nicholas.j.campbell@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Dhruv Manilawala <dhruvmanila@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import github.Repository
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
if TYPE_CHECKING:
from github.Repository import Repository
class RepositoryPreferences(NonCompletableGithubObject):
"""
This class represents repository preferences.
The reference can be found here
https://docs.github.com/en/free-pro-team@latest/rest/reference/checks#update-repository-preferences-for-check-suites
"""
def _initAttributes(self) -> None:
self._preferences: Attribute[dict[str, list[dict[str, bool | int]]]] = NotSet
self._repository: Attribute[Repository] = NotSet
@property
def preferences(self) -> dict[str, list[dict[str, bool | int]]]:
return self._preferences.value
@property
def repository(self) -> Repository:
return self._repository.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "preferences" in attributes: # pragma no branch
self._preferences = self._makeDictAttribute(attributes["preferences"])
if "repository" in attributes: # pragma no branch
self._repository = self._makeClassAttribute(github.Repository.Repository, attributes["repository"])
| 4,385
|
Python
|
.py
| 64
| 65.453125
| 120
| 0.536551
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,364
|
GitCommit.py
|
PyGithub_PyGithub/github/GitCommit.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import Any
import github.GitAuthor
import github.GithubObject
import github.GitTree
from github.GithubObject import Attribute, CompletableGithubObject, NotSet
class GitCommit(CompletableGithubObject):
"""
This class represents GitCommits.
The reference can be found here
https://docs.github.com/en/rest/reference/git#commits
"""
def _initAttributes(self) -> None:
self._author: Attribute[github.GitAuthor.GitAuthor] = NotSet
self._committer: Attribute[github.GitAuthor.GitAuthor] = NotSet
self._html_url: Attribute[str] = NotSet
self._message: Attribute[str] = NotSet
self._parents: Attribute[list[GitCommit]] = NotSet
self._sha: Attribute[str] = NotSet
self._tree: Attribute[github.GitTree.GitTree] = NotSet
self._url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"sha": self._sha.value})
@property
def author(self) -> github.GitAuthor.GitAuthor:
self._completeIfNotSet(self._author)
return self._author.value
@property
def committer(self) -> github.GitAuthor.GitAuthor:
self._completeIfNotSet(self._committer)
return self._committer.value
@property
def html_url(self) -> str:
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def message(self) -> str:
self._completeIfNotSet(self._message)
return self._message.value
@property
def parents(self) -> list[GitCommit]:
self._completeIfNotSet(self._parents)
return self._parents.value
@property
def sha(self) -> str:
self._completeIfNotSet(self._sha)
return self._sha.value
@property
def tree(self) -> github.GitTree.GitTree:
self._completeIfNotSet(self._tree)
return self._tree.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
@property
def _identity(self) -> str:
return self.sha
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "author" in attributes: # pragma no branch
self._author = self._makeClassAttribute(github.GitAuthor.GitAuthor, attributes["author"])
if "committer" in attributes: # pragma no branch
self._committer = self._makeClassAttribute(github.GitAuthor.GitAuthor, attributes["committer"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "message" in attributes: # pragma no branch
self._message = self._makeStringAttribute(attributes["message"])
if "parents" in attributes: # pragma no branch
self._parents = self._makeListOfClassesAttribute(GitCommit, attributes["parents"])
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "tree" in attributes: # pragma no branch
self._tree = self._makeClassAttribute(github.GitTree.GitTree, attributes["tree"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| 6,383
|
Python
|
.py
| 113
| 51.141593
| 107
| 0.56247
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,365
|
GitTag.py
|
PyGithub_PyGithub/github/GitTag.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import github.GitAuthor
import github.GithubObject
import github.GitObject
import github.GitTreeElement
from github.GithubObject import Attribute, CompletableGithubObject, NotSet
if TYPE_CHECKING:
from github.GitAuthor import GitAuthor
from github.GitObject import GitObject
class GitTag(CompletableGithubObject):
"""
This class represents GitTags.
The reference can be found here
https://docs.github.com/en/rest/reference/git#tags
"""
def _initAttributes(self) -> None:
self._message: Attribute[str] = NotSet
self._object: Attribute[GitObject] = NotSet
self._sha: Attribute[str] = NotSet
self._tag: Attribute[str] = NotSet
self._tagger: Attribute[GitAuthor] = NotSet
self._url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"sha": self._sha.value, "tag": self._tag.value})
@property
def message(self) -> str:
self._completeIfNotSet(self._message)
return self._message.value
@property
def object(self) -> GitObject:
self._completeIfNotSet(self._object)
return self._object.value
@property
def sha(self) -> str:
self._completeIfNotSet(self._sha)
return self._sha.value
@property
def tag(self) -> str:
self._completeIfNotSet(self._tag)
return self._tag.value
@property
def tagger(self) -> GitAuthor:
self._completeIfNotSet(self._tagger)
return self._tagger.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "message" in attributes: # pragma no branch
self._message = self._makeStringAttribute(attributes["message"])
if "object" in attributes: # pragma no branch
self._object = self._makeClassAttribute(github.GitObject.GitObject, attributes["object"])
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "tag" in attributes: # pragma no branch
self._tag = self._makeStringAttribute(attributes["tag"])
if "tagger" in attributes: # pragma no branch
self._tagger = self._makeClassAttribute(github.GitAuthor.GitAuthor, attributes["tagger"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| 5,660
|
Python
|
.py
| 100
| 51.83
| 101
| 0.5488
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,366
|
CWE.py
|
PyGithub_PyGithub/github/CWE.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2022 Eric Nieuwland <eric.nieuwland@gmail.com> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jonathan Leitschuh <jonathan.leitschuh@gmail.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from typing import Any, Dict
from github.GithubObject import Attribute, CompletableGithubObject, NotSet
class CWE(CompletableGithubObject):
"""
This class represents a CWE.
The reference can be found here
https://docs.github.com/en/rest/security-advisories/repository-advisories
"""
def _initAttributes(self) -> None:
self._cwe_id: Attribute[str] = NotSet
self._name: Attribute[str] = NotSet
@property
def cwe_id(self) -> str:
return self._cwe_id.value
@property
def name(self) -> str:
return self._name.value
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "cwe_id" in attributes: # pragma no branch
self._cwe_id = self._makeStringAttribute(attributes["cwe_id"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
| 4,072
|
Python
|
.py
| 61
| 63.672131
| 80
| 0.499
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,367
|
AppAuthentication.py
|
PyGithub_PyGithub/github/AppAuthentication.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 TechnicalPirate <35609336+TechnicalPirate@users.noreply.github.com>#
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2022 Eric Nieuwland <eric.nieuwland@gmail.com> #
# Copyright 2023 Denis Blanchette <dblanchette@coveo.com> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from typing import Dict, Optional, Union
import deprecated
from github.Auth import AppAuth, AppInstallationAuth
@deprecated.deprecated("Use github.Auth.AppInstallationAuth instead")
class AppAuthentication(AppInstallationAuth):
def __init__(
self,
app_id: Union[int, str],
private_key: str,
installation_id: int,
token_permissions: Optional[Dict[str, str]] = None,
):
super().__init__(
app_auth=AppAuth(app_id, private_key),
installation_id=installation_id,
token_permissions=token_permissions,
)
| 3,362
|
Python
|
.py
| 51
| 62.843137
| 85
| 0.483812
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,368
|
GitReleaseAsset.py
|
PyGithub_PyGithub/github/GitReleaseAsset.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Chris McBride <thehighlander@users.noreply.github.com> #
# Copyright 2017 Simon <spam@esemi.ru> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import Any
import github.NamedUser
from github.GithubObject import Attribute, CompletableGithubObject, NotSet
class GitReleaseAsset(CompletableGithubObject):
"""
This class represents GitReleaseAssets.
The reference can be found here
https://docs.github.com/en/rest/reference/repos#releases
"""
def _initAttributes(self) -> None:
self._url: Attribute[str] = NotSet
self._id: Attribute[int] = NotSet
self._name: Attribute[str] = NotSet
self._label: Attribute[str] = NotSet
self._content_type: Attribute[str] = NotSet
self._state: Attribute[str] = NotSet
self._size: Attribute[int] = NotSet
self._download_count: Attribute[int] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._browser_download_url: Attribute[str] = NotSet
self._uploader: Attribute[github.NamedUser.NamedUser] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"url": self.url})
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
@property
def id(self) -> int:
self._completeIfNotSet(self._id)
return self._id.value
@property
def name(self) -> str:
self._completeIfNotSet(self._name)
return self._name.value
@property
def label(self) -> str:
self._completeIfNotSet(self._label)
return self._label.value
@property
def content_type(self) -> str:
self._completeIfNotSet(self._content_type)
return self._content_type.value
@property
def state(self) -> str:
self._completeIfNotSet(self._state)
return self._state.value
@property
def size(self) -> int:
self._completeIfNotSet(self._size)
return self._size.value
@property
def download_count(self) -> int:
self._completeIfNotSet(self._download_count)
return self._download_count.value
@property
def created_at(self) -> datetime:
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def updated_at(self) -> datetime:
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def browser_download_url(self) -> str:
self._completeIfNotSet(self._browser_download_url)
return self._browser_download_url.value
@property
def uploader(self) -> github.NamedUser.NamedUser:
self._completeIfNotSet(self._uploader)
return self._uploader.value
def delete_asset(self) -> bool:
"""
Delete asset from the release.
"""
headers, data = self._requester.requestJsonAndCheck("DELETE", self.url)
return True
def update_asset(self, name: str, label: str = "") -> GitReleaseAsset:
"""
Update asset metadata.
"""
assert isinstance(name, str), name
assert isinstance(label, str), label
post_parameters = {"name": name, "label": label}
headers, data = self._requester.requestJsonAndCheck("PATCH", self.url, input=post_parameters)
return GitReleaseAsset(self._requester, headers, data, completed=True)
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "label" in attributes: # pragma no branch
self._label = self._makeStringAttribute(attributes["label"])
if "uploader" in attributes: # pragma no branch
self._uploader = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["uploader"])
if "content_type" in attributes: # pragma no branch
self._content_type = self._makeStringAttribute(attributes["content_type"])
if "state" in attributes: # pragma no branch
self._state = self._makeStringAttribute(attributes["state"])
if "size" in attributes: # pragma no branch
self._size = self._makeIntAttribute(attributes["size"])
if "download_count" in attributes: # pragma no branch
self._download_count = self._makeIntAttribute(attributes["download_count"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "browser_download_url" in attributes: # pragma no branch
self._browser_download_url = self._makeStringAttribute(attributes["browser_download_url"])
| 8,337
|
Python
|
.py
| 154
| 47.941558
| 105
| 0.577277
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,369
|
Notification.py
|
PyGithub_PyGithub/github/Notification.py
|
############################ Copyrights and license ############################
# #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Peter Golm <golm.peter@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Alice GIRARD <bouhahah@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Olof-Joachim Frahm (欧雅福) <olof@macrolet.net> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Alice GIRARD <bouhahah@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2024 Matthias Bilger <matthias@bilger.info> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import Any
import github.GithubObject
import github.Issue
import github.NotificationSubject
import github.PullRequest
import github.Repository
from github.GithubObject import Attribute, CompletableGithubObject, NotSet
class Notification(CompletableGithubObject):
"""
This class represents Notifications.
The reference can be found here
https://docs.github.com/en/rest/reference/activity#notifications
"""
def _initAttributes(self) -> None:
self._id: Attribute[str] = NotSet
self._last_read_at: Attribute[datetime] = NotSet
self._repository: Attribute[github.Repository.Repository] = NotSet
self._subject: Attribute[github.NotificationSubject.NotificationSubject] = NotSet
self._reason: Attribute[str] = NotSet
self._subscription_url: Attribute[str] = NotSet
self._unread: Attribute[bool] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"id": self._id.value, "subject": self._subject.value})
@property
def id(self) -> str:
self._completeIfNotSet(self._id)
return self._id.value
@property
def last_read_at(self) -> datetime:
self._completeIfNotSet(self._last_read_at)
return self._last_read_at.value
@property
def repository(self) -> github.Repository.Repository:
self._completeIfNotSet(self._repository)
return self._repository.value
@property
def subject(self) -> github.NotificationSubject.NotificationSubject:
self._completeIfNotSet(self._subject)
return self._subject.value
@property
def reason(self) -> str:
self._completeIfNotSet(self._reason)
return self._reason.value
@property
def subscription_url(self) -> str:
self._completeIfNotSet(self._subscription_url)
return self._subscription_url.value
@property
def unread(self) -> bool:
self._completeIfNotSet(self._unread)
return self._unread.value
@property
def updated_at(self) -> datetime:
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
def mark_as_read(self) -> None:
"""
:calls: `PATCH /notifications/threads/{id} <https://docs.github.com/en/rest/reference/activity#notifications>`_
"""
headers, data = self._requester.requestJsonAndCheck(
"PATCH",
self.url,
)
def mark_as_done(self) -> None:
"""
:calls: `DELETE /notifications/threads/{id} <https://docs.github.com/en/rest/reference/activity#notifications>`_
"""
headers, data = self._requester.requestJsonAndCheck(
"DELETE",
self.url,
)
def get_pull_request(self) -> github.PullRequest.PullRequest:
headers, data = self._requester.requestJsonAndCheck("GET", self.subject.url)
return github.PullRequest.PullRequest(self._requester, headers, data, completed=True)
def get_issue(self) -> github.Issue.Issue:
headers, data = self._requester.requestJsonAndCheck("GET", self.subject.url)
return github.Issue.Issue(self._requester, headers, data, completed=True)
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "id" in attributes: # pragma no branch
self._id = self._makeStringAttribute(attributes["id"])
if "last_read_at" in attributes: # pragma no branch
self._last_read_at = self._makeDatetimeAttribute(attributes["last_read_at"])
if "repository" in attributes: # pragma no branch
self._repository = self._makeClassAttribute(github.Repository.Repository, attributes["repository"])
if "subject" in attributes: # pragma no branch
self._subject = self._makeClassAttribute(
github.NotificationSubject.NotificationSubject, attributes["subject"]
)
if "reason" in attributes: # pragma no branch
self._reason = self._makeStringAttribute(attributes["reason"])
if "subscription_url" in attributes: # pragma no branch
self._subscription_url = self._makeStringAttribute(attributes["subscription_url"])
if "unread" in attributes: # pragma no branch
self._unread = self._makeBoolAttribute(attributes["unread"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| 8,280
|
Python
|
.py
| 148
| 49.939189
| 120
| 0.582377
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,370
|
Commit.py
|
PyGithub_PyGithub/github/Commit.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# Copyright 2014 Andy Casey <acasey@mso.anu.edu.au> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 John Eskew <jeskew@edx.org> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Danilo Martins <mawkee@gmail.com> #
# Copyright 2020 Dhruv Manilawala <dhruvmanila@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2024 iarspider <iarspider@gmail.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import github.CheckRun
import github.CheckSuite
import github.CommitCombinedStatus
import github.CommitComment
import github.CommitStats
import github.CommitStatus
import github.File
import github.GitCommit
import github.NamedUser
import github.PaginatedList
from github.GithubObject import Attribute, CompletableGithubObject, NotSet, Opt, is_optional
from github.PaginatedList import PaginatedList
if TYPE_CHECKING:
from github.CheckRun import CheckRun
from github.CheckSuite import CheckSuite
from github.CommitCombinedStatus import CommitCombinedStatus
from github.CommitComment import CommitComment
from github.CommitStats import CommitStats
from github.CommitStatus import CommitStatus
from github.File import File
from github.GitCommit import GitCommit
from github.NamedUser import NamedUser
from github.PullRequest import PullRequest
class Commit(CompletableGithubObject):
"""
This class represents Commits.
The reference can be found here
https://docs.github.com/en/rest/reference/git#commits
"""
def _initAttributes(self) -> None:
self._author: Attribute[NamedUser] = NotSet
self._comments_url: Attribute[str] = NotSet
self._commit: Attribute[GitCommit] = NotSet
self._committer: Attribute[NamedUser] = NotSet
self._html_url: Attribute[str] = NotSet
self._parents: Attribute[list[Commit]] = NotSet
self._sha: Attribute[str] = NotSet
self._stats: Attribute[CommitStats] = NotSet
self._url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"sha": self._sha.value})
@property
def author(self) -> NamedUser:
self._completeIfNotSet(self._author)
return self._author.value
@property
def comments_url(self) -> str:
self._completeIfNotSet(self._comments_url)
return self._comments_url.value
@property
def commit(self) -> GitCommit:
self._completeIfNotSet(self._commit)
return self._commit.value
@property
def committer(self) -> NamedUser:
self._completeIfNotSet(self._committer)
return self._committer.value
# This should be a method, but this used to be a property and cannot be changed without breaking user code
# TODO: remove @property on version 3
@property
def files(self) -> PaginatedList[File]:
return PaginatedList(
github.File.File,
self._requester,
self.url,
{},
None,
"files",
"total_files",
self.raw_data,
self.raw_headers,
)
@property
def html_url(self) -> str:
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def parents(self) -> list[Commit]:
self._completeIfNotSet(self._parents)
return self._parents.value
@property
def sha(self) -> str:
self._completeIfNotSet(self._sha)
return self._sha.value
@property
def stats(self) -> CommitStats:
self._completeIfNotSet(self._stats)
return self._stats.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
def create_comment(
self,
body: str,
line: Opt[int] = NotSet,
path: Opt[str] = NotSet,
position: Opt[int] = NotSet,
) -> CommitComment:
"""
:calls: `POST /repos/{owner}/{repo}/commits/{sha}/comments <https://docs.github.com/en/rest/reference/repos#comments>`_
"""
assert isinstance(body, str), body
assert is_optional(line, int), line
assert is_optional(path, str), path
assert is_optional(position, int), position
post_parameters = NotSet.remove_unset_items({"body": body, "line": line, "path": path, "position": position})
headers, data = self._requester.requestJsonAndCheck("POST", f"{self.url}/comments", input=post_parameters)
return github.CommitComment.CommitComment(self._requester, headers, data, completed=True)
def create_status(
self,
state: str,
target_url: Opt[str] = NotSet,
description: Opt[str] = NotSet,
context: Opt[str] = NotSet,
) -> CommitStatus:
"""
:calls: `POST /repos/{owner}/{repo}/statuses/{sha} <https://docs.github.com/en/rest/reference/repos#statuses>`_
"""
assert isinstance(state, str), state
assert is_optional(target_url, str), target_url
assert is_optional(description, str), description
assert is_optional(context, str), context
post_parameters = NotSet.remove_unset_items(
{
"state": state,
"target_url": target_url,
"description": description,
"context": context,
}
)
headers, data = self._requester.requestJsonAndCheck(
"POST",
f"{self._parentUrl(self._parentUrl(self.url))}/statuses/{self.sha}",
input=post_parameters,
)
return github.CommitStatus.CommitStatus(self._requester, headers, data, completed=True)
def get_comments(self) -> PaginatedList[CommitComment]:
"""
:calls: `GET /repos/{owner}/{repo}/commits/{sha}/comments <https://docs.github.com/en/rest/reference/repos#comments>`_
"""
return PaginatedList(
github.CommitComment.CommitComment,
self._requester,
f"{self.url}/comments",
None,
)
def get_statuses(self) -> PaginatedList[CommitStatus]:
"""
:calls: `GET /repos/{owner}/{repo}/statuses/{ref} <https://docs.github.com/en/rest/reference/repos#statuses>`_
"""
return PaginatedList(
github.CommitStatus.CommitStatus,
self._requester,
f"{self._parentUrl(self._parentUrl(self.url))}/statuses/{self.sha}",
None,
)
def get_combined_status(self) -> CommitCombinedStatus:
"""
:calls: `GET /repos/{owner}/{repo}/commits/{ref}/status/ <http://docs.github.com/en/rest/reference/repos#statuses>`_
"""
headers, data = self._requester.requestJsonAndCheck("GET", f"{self.url}/status")
return github.CommitCombinedStatus.CommitCombinedStatus(self._requester, headers, data, completed=True)
def get_pulls(self) -> PaginatedList[PullRequest]:
"""
:calls: `GET /repos/{owner}/{repo}/commits/{sha}/pulls <https://docs.github.com/en/rest/reference/repos#list-pull-requests-associated-with-a-commit>`_
"""
return PaginatedList(
github.PullRequest.PullRequest,
self._requester,
f"{self.url}/pulls",
None,
headers={"Accept": "application/vnd.github.groot-preview+json"},
)
def get_check_runs(
self,
check_name: Opt[str] = NotSet,
status: Opt[str] = NotSet,
filter: Opt[str] = NotSet,
) -> PaginatedList[CheckRun]:
"""
:calls: `GET /repos/{owner}/{repo}/commits/{sha}/check-runs <https://docs.github.com/en/rest/reference/checks#list-check-runs-for-a-git-reference>`_
"""
assert is_optional(check_name, str), check_name
assert is_optional(status, str), status
assert is_optional(filter, str), filter
url_parameters = NotSet.remove_unset_items({"check_name": check_name, "status": status, "filter": filter})
return PaginatedList(
github.CheckRun.CheckRun,
self._requester,
f"{self.url}/check-runs",
url_parameters,
headers={"Accept": "application/vnd.github.v3+json"},
list_item="check_runs",
)
def get_check_suites(self, app_id: Opt[int] = NotSet, check_name: Opt[str] = NotSet) -> PaginatedList[CheckSuite]:
"""
:class: `GET /repos/{owner}/{repo}/commits/{ref}/check-suites <https://docs.github.com/en/rest/reference/checks#list-check-suites-for-a-git-reference>`_
"""
assert is_optional(app_id, int), app_id
assert is_optional(check_name, str), check_name
parameters = NotSet.remove_unset_items({"app_id": app_id, "check_name": check_name})
request_headers = {"Accept": "application/vnd.github.v3+json"}
return PaginatedList(
github.CheckSuite.CheckSuite,
self._requester,
f"{self.url}/check-suites",
parameters,
headers=request_headers,
list_item="check_suites",
)
@property
def _identity(self) -> str:
return self.sha
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "author" in attributes: # pragma no branch
self._author = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["author"])
if "comments_url" in attributes: # pragma no branch
self._comments_url = self._makeStringAttribute(attributes["comments_url"])
if "commit" in attributes: # pragma no branch
self._commit = self._makeClassAttribute(github.GitCommit.GitCommit, attributes["commit"])
if "committer" in attributes: # pragma no branch
self._committer = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["committer"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "parents" in attributes: # pragma no branch
self._parents = self._makeListOfClassesAttribute(Commit, attributes["parents"])
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "stats" in attributes: # pragma no branch
self._stats = self._makeClassAttribute(github.CommitStats.CommitStats, attributes["stats"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| 13,743
|
Python
|
.py
| 279
| 41.81362
| 160
| 0.592107
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,371
|
Installation.py
|
PyGithub_PyGithub/github/Installation.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2017 Simon <spam@esemi.ru> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Adam Baratz <adam.baratz@gmail.com> #
# Copyright 2019 Rigas Papathanasopoulos <rigaspapas@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import github.Authorization
import github.Event
import github.Gist
import github.GithubObject
import github.Issue
import github.Notification
import github.Organization
import github.PaginatedList
import github.Plan
import github.Repository
import github.UserKey
from github import Consts
from github.Auth import AppAuth
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
from github.PaginatedList import PaginatedList
from github.Requester import Requester
if TYPE_CHECKING:
from github.MainClass import Github
INTEGRATION_PREVIEW_HEADERS = {"Accept": Consts.mediaTypeIntegrationPreview}
class Installation(NonCompletableGithubObject):
"""
This class represents Installations.
The reference can be found here
https://docs.github.com/en/rest/reference/apps#installations
"""
def __init__(
self,
requester: Requester,
headers: dict[str, str | int],
attributes: Any,
completed: bool,
) -> None:
super().__init__(requester, headers, attributes, completed)
auth = self._requester.auth if self._requester is not None else None
# Usually, an Installation is created from a Requester with App authentication
if isinstance(auth, AppAuth):
# But the installation has to authenticate as an installation (e.g. for get_repos())
auth = auth.get_installation_auth(self.id, requester=self._requester)
self._requester = self._requester.withAuth(auth)
def _initAttributes(self) -> None:
self._id: Attribute[int] = NotSet
self._app_id: Attribute[int] = NotSet
self._target_id: Attribute[int] = NotSet
self._target_type: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"id": self._id.value})
def get_github_for_installation(self) -> Github:
return github.Github(**self._requester.kwargs)
@property
def requester(self) -> Requester:
"""
Return my Requester object.
For example, to make requests to API endpoints not yet supported by PyGitHub.
"""
return self._requester
@property
def id(self) -> int:
return self._id.value
@property
def app_id(self) -> int:
return self._app_id.value
@property
def target_id(self) -> int:
return self._target_id.value
@property
def target_type(self) -> str:
return self._target_type.value
def get_repos(self) -> PaginatedList[github.Repository.Repository]:
"""
:calls: `GET /installation/repositories <https://docs.github.com/en/rest/reference/integrations/installations#list-repositories>`_
"""
url_parameters: dict[str, Any] = {}
return PaginatedList(
contentClass=github.Repository.Repository,
requester=self._requester,
firstUrl="/installation/repositories",
firstParams=url_parameters,
headers=INTEGRATION_PREVIEW_HEADERS,
list_item="repositories",
)
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "app_id" in attributes: # pragma no branch
self._app_id = self._makeIntAttribute(attributes["app_id"])
if "target_id" in attributes: # pragma no branch
self._target_id = self._makeIntAttribute(attributes["target_id"])
if "target_type" in attributes: # pragma no branch
self._target_type = self._makeStringAttribute(attributes["target_type"])
| 7,066
|
Python
|
.py
| 132
| 48.409091
| 138
| 0.569754
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,372
|
InputGitAuthor.py
|
PyGithub_PyGithub/github/InputGitAuthor.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Nic Dahlquist <nic@snapchat.com> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import Any
from github.GithubObject import NotSet, Opt, is_defined, is_optional
class InputGitAuthor:
"""
This class represents InputGitAuthors.
"""
def __init__(self, name: str, email: str, date: Opt[str] = NotSet):
assert isinstance(name, str), name
assert isinstance(email, str), email
assert is_optional(date, str), date # @todo Datetime?
self.__name: str = name
self.__email: str = email
self.__date: Opt[str] = date
def __repr__(self) -> str:
return f'InputGitAuthor(name="{self.__name}")'
@property
def _identity(self) -> dict[str, str]:
identity: dict[str, Any] = {
"name": self.__name,
"email": self.__email,
}
if is_defined(self.__date):
identity["date"] = self.__date
return identity
| 3,820
|
Python
|
.py
| 61
| 58.983607
| 80
| 0.474667
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,373
|
Team.py
|
PyGithub_PyGithub/github/Team.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# Copyright 2014 Jan Orel <jan.orel@gooddata.com> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2015 Aron Culotta <aronwc@gmail.com> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2016 mattjmorrison <mattjmorrison@mattjmorrison.com> #
# Copyright 2018 Isuru Fernando <isuruf@gmail.com> #
# Copyright 2018 Jacopo Notarstefano <jacopo.notarstefano@gmail.com> #
# Copyright 2018 James D'Amato <james.j.damato@gmail.com> #
# Copyright 2018 Maarten Fonville <mfonville@users.noreply.github.com> #
# Copyright 2018 Manu Hortet <manuhortet@gmail.com> #
# Copyright 2018 Michał Górny <mgorny@gentoo.org> #
# Copyright 2018 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2018 Tim Boring <tboring@hearst.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Adam Baratz <adam.baratz@gmail.com> #
# Copyright 2019 Shibasis Patel <smartshibasish@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Adrian Bridgett <58699309+tl-adrian-bridgett@users.noreply.github.com>#
# Copyright 2020 Andy Grunwald <andygrunwald@gmail.com> #
# Copyright 2020 Gilad Shefer <giladshefer@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2020 Tal Machani <12785464+talmachani@users.noreply.github.com> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 秋葉 <ambiguous404@gmail.com> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Kevin Grandjean <Muscaw@users.noreply.github.com> #
# Copyright 2023 Mark Amery <markamery@btinternet.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Andrii Kezikov <cheshirez@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
import urllib.parse
from typing import TYPE_CHECKING, Any
from deprecated import deprecated
import github.NamedUser
import github.Organization
import github.PaginatedList
import github.Repository
import github.TeamDiscussion
from github import Consts
from github.GithubException import UnknownObjectException
from github.GithubObject import Attribute, CompletableGithubObject, NotSet, Opt
if TYPE_CHECKING:
from github.Membership import Membership
from github.NamedUser import NamedUser
from github.Organization import Organization
from github.PaginatedList import PaginatedList
from github.Permissions import Permissions
from github.Repository import Repository
from github.TeamDiscussion import TeamDiscussion
class Team(CompletableGithubObject):
"""
This class represents Teams.
The reference can be found here
https://docs.github.com/en/rest/reference/teams
"""
def _initAttributes(self) -> None:
self._id: Attribute[int] = NotSet
self._members_count: Attribute[int] = NotSet
self._members_url: Attribute[str] = NotSet
self._name: Attribute[str] = NotSet
self._description: Attribute[str] = NotSet
self._notification_setting: Attribute[str] = NotSet
self._permission: Attribute[str] = NotSet
self._repos_count: Attribute[int] = NotSet
self._repositories_url: Attribute[str] = NotSet
self._slug: Attribute[str] = NotSet
self._url: Attribute[str] = NotSet
self._organization: Attribute[github.Organization.Organization] = NotSet
self._privacy: Attribute[str] = NotSet
self._parent: Attribute[github.Team.Team] = NotSet
self._html_url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"id": self._id.value, "name": self._name.value})
@property
def id(self) -> int:
self._completeIfNotSet(self._id)
return self._id.value
@property
def members_count(self) -> int:
self._completeIfNotSet(self._members_count)
return self._members_count.value
@property
def members_url(self) -> str:
self._completeIfNotSet(self._members_url)
return self._members_url.value
@property
def name(self) -> str:
self._completeIfNotSet(self._name)
return self._name.value
@property
def description(self) -> str:
self._completeIfNotSet(self._description)
return self._description.value
@property
def notification_setting(self) -> str:
self._completeIfNotSet(self._notification_setting)
return self._notification_setting.value
@property
def permission(self) -> str:
self._completeIfNotSet(self._permission)
return self._permission.value
@property
def repos_count(self) -> int:
self._completeIfNotSet(self._repos_count)
return self._repos_count.value
@property
def repositories_url(self) -> str:
self._completeIfNotSet(self._repositories_url)
return self._repositories_url.value
@property
def slug(self) -> str:
self._completeIfNotSet(self._slug)
return self._slug.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
@property
def organization(self) -> Organization:
self._completeIfNotSet(self._organization)
return self._organization.value
@property
def privacy(self) -> str:
self._completeIfNotSet(self._privacy)
return self._privacy.value
@property
def parent(self) -> Team:
self._completeIfNotSet(self._parent)
return self._parent.value
@property
def html_url(self) -> str:
self._completeIfNotSet(self._html_url)
return self._html_url.value
def add_to_members(self, member: NamedUser) -> None:
"""
This API call is deprecated. Use `add_membership` instead.
https://docs.github.com/en/rest/reference/teams#add-or-update-team-membership-for-a-user-legacy
:calls: `PUT /teams/{id}/members/{user} <https://docs.github.com/en/rest/reference/teams>`_
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck("PUT", f"{self.url}/members/{member._identity}")
def add_membership(self, member: NamedUser, role: Opt[str] = NotSet) -> None:
"""
:calls: `PUT /teams/{id}/memberships/{user} <https://docs.github.com/en/rest/reference/teams>`_
"""
assert isinstance(member, github.NamedUser.NamedUser), member
assert role is NotSet or isinstance(role, str), role
if role is not NotSet:
assert role in ["member", "maintainer"]
put_parameters = {
"role": role,
}
else:
put_parameters = {
"role": "member",
}
headers, data = self._requester.requestJsonAndCheck(
"PUT", f"{self.url}/memberships/{member._identity}", input=put_parameters
)
def get_team_membership(self, member: str | NamedUser) -> Membership:
"""
:calls: `GET /orgs/{org}/memberships/team/{team_id}/{username} <https://docs.github.com/en/rest/reference/teams#get-team-membership-for-a-user>`_
"""
assert isinstance(member, str) or isinstance(member, github.NamedUser.NamedUser), member
if isinstance(member, github.NamedUser.NamedUser):
member = member._identity
else:
member = urllib.parse.quote(member)
headers, data = self._requester.requestJsonAndCheck("GET", f"{self.url}/memberships/{member}")
return github.Membership.Membership(self._requester, headers, data, completed=True)
def add_to_repos(self, repo: Repository) -> None:
"""
:calls: `PUT /teams/{id}/repos/{org}/{repo} <https://docs.github.com/en/rest/reference/teams>`_
"""
assert isinstance(repo, github.Repository.Repository), repo
headers, data = self._requester.requestJsonAndCheck("PUT", f"{self.url}/repos/{repo._identity}")
def get_repo_permission(self, repo: Repository) -> Permissions | None:
"""
:calls: `GET /teams/{id}/repos/{org}/{repo} <https://docs.github.com/en/rest/reference/teams>`_
"""
assert isinstance(repo, github.Repository.Repository) or isinstance(repo, str), repo
if isinstance(repo, github.Repository.Repository):
repo = repo._identity # type: ignore
else:
repo = urllib.parse.quote(repo)
try:
headers, data = self._requester.requestJsonAndCheck(
"GET",
f"{self.url}/repos/{repo}",
headers={"Accept": Consts.teamRepositoryPermissions},
)
return github.Permissions.Permissions(self._requester, headers, data["permissions"], completed=True)
except UnknownObjectException:
return None
@deprecated(
reason="""
Team.set_repo_permission() is deprecated, use Team.update_team_repository() instead.
"""
)
def set_repo_permission(self, repo: Repository, permission: str) -> None:
"""
:calls: `PUT /teams/{id}/repos/{org}/{repo} <https://docs.github.com/en/rest/reference/teams>`_
:param repo: :class:`github.Repository.Repository`
:param permission: string
:rtype: None
"""
assert isinstance(repo, github.Repository.Repository), repo
put_parameters = {
"permission": permission,
}
headers, data = self._requester.requestJsonAndCheck(
"PUT", f"{self.url}/repos/{repo._identity}", input=put_parameters
)
def update_team_repository(self, repo: Repository, permission: str) -> bool:
"""
:calls: `PUT /orgs/{org}/teams/{team_slug}/repos/{owner}/{repo} <https://docs.github.com/en/rest/reference/teams#check-team-permissions-for-a-repository>`_
"""
assert isinstance(repo, github.Repository.Repository) or isinstance(repo, str), repo
assert isinstance(permission, str), permission
if isinstance(repo, github.Repository.Repository):
repo_url_param = repo._identity
else:
repo_url_param = urllib.parse.quote(repo)
put_parameters = {
"permission": permission,
}
status, _, _ = self._requester.requestJson(
"PUT",
f"{self.organization.url}/teams/{self.slug}/repos/{repo_url_param}",
input=put_parameters,
)
return status == 204
def delete(self) -> None:
"""
:calls: `DELETE /teams/{id} <https://docs.github.com/en/rest/reference/teams#delete-a-team>`_
"""
headers, data = self._requester.requestJsonAndCheck("DELETE", self.url)
def edit(
self,
name: str,
description: Opt[str] = NotSet,
permission: Opt[str] = NotSet,
privacy: Opt[str] = NotSet,
parent_team_id: Opt[int] = NotSet,
notification_setting: Opt[str] = NotSet,
) -> None:
"""
:calls: `PATCH /teams/{id} <https://docs.github.com/en/rest/reference/teams#update-a-team>`_
"""
assert isinstance(name, str), name
assert description is NotSet or isinstance(description, str), description
assert permission is NotSet or isinstance(permission, str), permission
assert privacy is NotSet or isinstance(privacy, str), privacy
assert parent_team_id is NotSet or isinstance(parent_team_id, (int, type(None))), parent_team_id
assert notification_setting in ["notifications_enabled", "notifications_disabled", NotSet], notification_setting
post_parameters = NotSet.remove_unset_items(
{
"name": name,
"description": description,
"permission": permission,
"privacy": privacy,
"parent_team_id": parent_team_id,
"notification_setting": notification_setting,
}
)
headers, data = self._requester.requestJsonAndCheck("PATCH", self.url, input=post_parameters)
self._useAttributes(data)
def get_teams(self) -> PaginatedList[Team]:
"""
:calls: `GET /teams/{id}/teams <https://docs.github.com/en/rest/reference/teams#list-teams>`_
"""
return github.PaginatedList.PaginatedList(
github.Team.Team,
self._requester,
f"{self.url}/teams",
None,
)
def get_discussions(self) -> PaginatedList[TeamDiscussion]:
"""
:calls: `GET /teams/{id}/discussions <https://docs.github.com/en/rest/reference/teams#list-discussions>`_
"""
return github.PaginatedList.PaginatedList(
github.TeamDiscussion.TeamDiscussion,
self._requester,
f"{self.url}/discussions",
None,
headers={"Accept": Consts.mediaTypeTeamDiscussionsPreview},
)
def get_members(self, role: Opt[str] = NotSet) -> PaginatedList[NamedUser]:
"""
:calls: `GET /teams/{id}/members <https://docs.github.com/en/rest/reference/teams#list-team-members>`_
"""
assert role is NotSet or isinstance(role, str), role
url_parameters: dict[str, Any] = {}
if role is not NotSet:
assert role in ["member", "maintainer", "all"]
url_parameters["role"] = role
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
f"{self.url}/members",
url_parameters,
)
def get_repos(self) -> PaginatedList[Repository]:
"""
:calls: `GET /teams/{id}/repos <https://docs.github.com/en/rest/reference/teams>`_
"""
return github.PaginatedList.PaginatedList(
github.Repository.Repository, self._requester, f"{self.url}/repos", None
)
def invitations(self) -> PaginatedList[NamedUser]:
"""
:calls: `GET /teams/{id}/invitations <https://docs.github.com/en/rest/reference/teams#members>`_
"""
return github.PaginatedList.PaginatedList(
github.NamedUser.NamedUser,
self._requester,
f"{self.url}/invitations",
None,
headers={"Accept": Consts.mediaTypeOrganizationInvitationPreview},
)
def has_in_members(self, member: NamedUser) -> bool:
"""
:calls: `GET /teams/{id}/members/{user} <https://docs.github.com/en/rest/reference/teams>`_
"""
assert isinstance(member, github.NamedUser.NamedUser), member
status, headers, data = self._requester.requestJson("GET", f"{self.url}/members/{member._identity}")
return status == 204
def has_in_repos(self, repo: Repository) -> bool:
"""
:calls: `GET /teams/{id}/repos/{owner}/{repo} <https://docs.github.com/en/rest/reference/teams>`_
"""
assert isinstance(repo, github.Repository.Repository), repo
status, headers, data = self._requester.requestJson("GET", f"{self.url}/repos/{repo._identity}")
return status == 204
def remove_membership(self, member: NamedUser) -> None:
"""
:calls: `DELETE /teams/{team_id}/memberships/{username} <https://docs.github.com/en/rest/reference/teams#remove-team-membership-for-a-user>`_
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck("DELETE", f"{self.url}/memberships/{member._identity}")
def remove_from_members(self, member: NamedUser) -> None:
"""
This API call is deprecated. Use `remove_membership` instead:
https://docs.github.com/en/rest/reference/teams#add-or-update-team-membership-for-a-user-legacy
:calls: `DELETE /teams/{id}/members/{user} <https://docs.github.com/en/rest/reference/teams>`_
"""
assert isinstance(member, github.NamedUser.NamedUser), member
headers, data = self._requester.requestJsonAndCheck("DELETE", f"{self.url}/members/{member._identity}")
def remove_from_repos(self, repo: Repository) -> None:
"""
:calls: `DELETE /teams/{id}/repos/{owner}/{repo} <https://docs.github.com/en/rest/reference/teams>`_
"""
assert isinstance(repo, github.Repository.Repository), repo
headers, data = self._requester.requestJsonAndCheck("DELETE", f"{self.url}/repos/{repo._identity}")
@property
def _identity(self) -> int:
return self.id
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "members_count" in attributes: # pragma no branch
self._members_count = self._makeIntAttribute(attributes["members_count"])
if "members_url" in attributes: # pragma no branch
self._members_url = self._makeStringAttribute(attributes["members_url"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "notification_setting" in attributes: # pragma no branch
self._notification_setting = self._makeStringAttribute(attributes["notification_setting"])
if "permission" in attributes: # pragma no branch
self._permission = self._makeStringAttribute(attributes["permission"])
if "repos_count" in attributes: # pragma no branch
self._repos_count = self._makeIntAttribute(attributes["repos_count"])
if "repositories_url" in attributes: # pragma no branch
self._repositories_url = self._makeStringAttribute(attributes["repositories_url"])
if "slug" in attributes: # pragma no branch
self._slug = self._makeStringAttribute(attributes["slug"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "organization" in attributes: # pragma no branch
self._organization = self._makeClassAttribute(github.Organization.Organization, attributes["organization"])
if "privacy" in attributes: # pragma no branch
self._privacy = self._makeStringAttribute(attributes["privacy"])
if "parent" in attributes: # pragma no branch
self._parent = self._makeClassAttribute(github.Team.Team, attributes["parent"])
if "html_url" in attributes:
self._html_url = self._makeStringAttribute(attributes["html_url"])
| 21,929
|
Python
|
.py
| 421
| 44.254157
| 163
| 0.608736
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,374
|
SelfHostedActionsRunner.py
|
PyGithub_PyGithub/github/SelfHostedActionsRunner.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2020 Victor Zeng <zacker150@users.noreply.github.com> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import Any
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class SelfHostedActionsRunner(NonCompletableGithubObject):
"""
This class represents Self-hosted GitHub Actions Runners.
The reference can be found at
https://docs.github.com/en/free-pro-team@latest/rest/reference/actions#self-hosted-runners
"""
def _initAttributes(self) -> None:
self._id: Attribute[int] = NotSet
self._name: Attribute[str] = NotSet
self._os: Attribute[str] = NotSet
self._status: Attribute[str] = NotSet
self._busy: Attribute[bool] = NotSet
self._labels: Attribute[list[dict[str, int | str]]] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"name": self._name.value})
@property
def id(self) -> int:
return self._id.value
@property
def name(self) -> str:
return self._name.value
@property
def os(self) -> str:
return self._os.value
@property
def status(self) -> str:
return self._status.value
@property
def busy(self) -> bool:
return self._busy.value
def labels(self) -> list[dict[str, int | str]]:
return self._labels.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "os" in attributes: # pragma no branch
self._os = self._makeStringAttribute(attributes["os"])
if "status" in attributes: # pragma no branch
self._status = self._makeStringAttribute(attributes["status"])
if "busy" in attributes:
self._busy = self._makeBoolAttribute(attributes["busy"])
if "labels" in attributes:
self._labels = self._makeListOfDictsAttribute(attributes["labels"])
| 5,051
|
Python
|
.py
| 85
| 54.705882
| 94
| 0.525051
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,375
|
Authorization.py
|
PyGithub_PyGithub/github/Authorization.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import TYPE_CHECKING, Any
import github.AuthorizationApplication
import github.GithubObject
from github.GithubObject import Attribute, NotSet, Opt, _NotSetType
if TYPE_CHECKING:
from github.AuthorizationApplication import AuthorizationApplication
class Authorization(github.GithubObject.CompletableGithubObject):
"""
This class represents Authorizations.
The reference can be found here
https://docs.github.com/en/enterprise-server@3.0/rest/reference/oauth-authorizations
"""
def _initAttributes(self) -> None:
self._app: Attribute[AuthorizationApplication] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._id: Attribute[int] = NotSet
self._note: Attribute[str | None] = NotSet
self._note_url: Attribute[str | None] = NotSet
self._scopes: Attribute[str] = NotSet
self._token: Attribute[str] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"scopes": self._scopes.value})
@property
def app(self) -> AuthorizationApplication:
self._completeIfNotSet(self._app)
return self._app.value
@property
def created_at(self) -> datetime:
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def id(self) -> int:
self._completeIfNotSet(self._id)
return self._id.value
@property
def note(self) -> str | None:
self._completeIfNotSet(self._note)
return self._note.value
@property
def note_url(self) -> str | None:
self._completeIfNotSet(self._note_url)
return self._note_url.value
@property
def scopes(self) -> str:
self._completeIfNotSet(self._scopes)
return self._scopes.value
@property
def token(self) -> str:
self._completeIfNotSet(self._token)
return self._token.value
@property
def updated_at(self) -> datetime:
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
def delete(self) -> None:
"""
:calls: `DELETE /authorizations/{id} <https://docs.github.com/en/developers/apps/authorizing-oauth-apps>`_
"""
headers, data = self._requester.requestJsonAndCheck("DELETE", self.url)
def edit(
self,
scopes: Opt[list[str]] = NotSet,
add_scopes: Opt[list[str]] = NotSet,
remove_scopes: Opt[list[str]] = NotSet,
note: Opt[str] = NotSet,
note_url: Opt[str] = NotSet,
) -> None:
"""
:calls: `PATCH /authorizations/{id} <https://docs.github.com/en/developers/apps/authorizing-oauth-apps>`_
:param scopes: list of string
:param add_scopes: list of string
:param remove_scopes: list of string
:param note: string
:param note_url: string
:rtype: None
"""
assert isinstance(scopes, _NotSetType) or all(isinstance(element, str) for element in scopes), scopes
assert isinstance(add_scopes, _NotSetType) or all(
isinstance(element, str) for element in add_scopes
), add_scopes
assert isinstance(remove_scopes, _NotSetType) or all(
isinstance(element, str) for element in remove_scopes
), remove_scopes
assert isinstance(note, (_NotSetType, str)), note
assert isinstance(note_url, (_NotSetType, str)), note_url
post_parameters = NotSet.remove_unset_items(
{
"scopes": scopes,
"add_scopes": add_scopes,
"remove_scopes": remove_scopes,
"note": note,
"note_url": note_url,
}
)
headers, data = self._requester.requestJsonAndCheck("PATCH", self.url, input=post_parameters)
self._useAttributes(data)
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "app" in attributes: # pragma no branch
self._app = self._makeClassAttribute(
github.AuthorizationApplication.AuthorizationApplication,
attributes["app"],
)
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "note" in attributes: # pragma no branch
self._note = self._makeStringAttribute(attributes["note"])
if "note_url" in attributes: # pragma no branch
self._note_url = self._makeStringAttribute(attributes["note_url"])
if "scopes" in attributes: # pragma no branch
self._scopes = self._makeListOfStringsAttribute(attributes["scopes"])
if "token" in attributes: # pragma no branch
self._token = self._makeStringAttribute(attributes["token"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| 8,589
|
Python
|
.py
| 167
| 44.538922
| 114
| 0.568231
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,376
|
CheckRunOutput.py
|
PyGithub_PyGithub/github/CheckRunOutput.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Dhruv Manilawala <dhruvmanila@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from typing import Any, Dict
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class CheckRunOutput(NonCompletableGithubObject):
"""
This class represents the output of check run.
"""
def _initAttributes(self) -> None:
self._annotations_count: Attribute[int] = NotSet
self._annotations_url: Attribute[str] = NotSet
self._summary: Attribute[str] = NotSet
self._text: Attribute[str] = NotSet
self._title: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"title": self._title.value})
@property
def annotations_count(self) -> int:
return self._annotations_count.value
@property
def annotations_url(self) -> str:
return self._annotations_url.value
@property
def summary(self) -> str:
return self._summary.value
@property
def text(self) -> str:
return self._text.value
@property
def title(self) -> str:
return self._title.value
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "annotations_count" in attributes: # pragma no branch
self._annotations_count = self._makeIntAttribute(attributes["annotations_count"])
if "annotations_url" in attributes: # pragma no branch
self._annotations_url = self._makeStringAttribute(attributes["annotations_url"])
if "summary" in attributes: # pragma no branch
self._summary = self._makeStringAttribute(attributes["summary"])
if "text" in attributes: # pragma no branch
self._text = self._makeStringAttribute(attributes["text"])
if "title" in attributes: # pragma no branch
self._title = self._makeStringAttribute(attributes["title"])
| 4,882
|
Python
|
.py
| 78
| 58.205128
| 93
| 0.527337
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,377
|
Enterprise.py
|
PyGithub_PyGithub/github/Enterprise.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Mark Amery <markamery@btinternet.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2023 YugoHino <henom06@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
import urllib.parse
from typing import Any, Dict
from github.EnterpriseConsumedLicenses import EnterpriseConsumedLicenses
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
from github.Requester import Requester
class Enterprise(NonCompletableGithubObject):
"""
This class represents Enterprises.
Such objects do not exist in the Github API, so this class merely collects all endpoints the start with
/enterprises/{enterprise}/. See methods below for specific endpoints and docs.
https://docs.github.com/en/enterprise-cloud@latest/rest/enterprise-admin?apiVersion=2022-11-28
"""
def __init__(
self,
requester: Requester,
enterprise: str,
):
enterprise = urllib.parse.quote(enterprise)
super().__init__(requester, {}, {"enterprise": enterprise, "url": f"/enterprises/{enterprise}"}, True)
def _initAttributes(self) -> None:
self._enterprise: Attribute[str] = NotSet
self._url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"enterprise": self._enterprise.value})
@property
def enterprise(self) -> str:
return self._enterprise.value
@property
def url(self) -> str:
return self._url.value
def get_consumed_licenses(self) -> EnterpriseConsumedLicenses:
"""
:calls: `GET /enterprises/{enterprise}/consumed-licenses <https://docs.github.com/en/enterprise-cloud@latest/rest/enterprise-admin/license#list-enterprise-consumed-licenses>`_
"""
headers, data = self._requester.requestJsonAndCheck("GET", self.url + "/consumed-licenses")
if "url" not in data:
data["url"] = self.url + "/consumed-licenses"
return EnterpriseConsumedLicenses(self._requester, headers, data, completed=True)
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "enterprise" in attributes: # pragma no branch
self._enterprise = self._makeStringAttribute(attributes["enterprise"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| 5,252
|
Python
|
.py
| 81
| 60.654321
| 183
| 0.54489
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,378
|
RateLimit.py
|
PyGithub_PyGithub/github/RateLimit.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import github.Rate
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
if TYPE_CHECKING:
from github.Rate import Rate
class RateLimit(NonCompletableGithubObject):
"""
This class represents RateLimits.
The reference can be found here
https://docs.github.com/en/rest/reference/rate-limit
"""
def _initAttributes(self) -> None:
self._core: Attribute[Rate] = NotSet
self._search: Attribute[Rate] = NotSet
self._graphql: Attribute[Rate] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"core": self._core.value})
@property
def core(self) -> Rate:
"""
Rate limit for the non-search-related API.
"""
return self._core.value
@property
def search(self) -> Rate:
"""
Rate limit for the Search API.
"""
return self._search.value
@property
def graphql(self) -> Rate:
"""
(Experimental) Rate limit for GraphQL API, use with caution.
"""
return self._graphql.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "core" in attributes: # pragma no branch
self._core = self._makeClassAttribute(github.Rate.Rate, attributes["core"])
if "search" in attributes: # pragma no branch
self._search = self._makeClassAttribute(github.Rate.Rate, attributes["search"])
if "graphql" in attributes: # pragma no branch
self._graphql = self._makeClassAttribute(github.Rate.Rate, attributes["graphql"])
| 4,742
|
Python
|
.py
| 81
| 54.308642
| 93
| 0.520121
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,379
|
DependabotAlertAdvisory.py
|
PyGithub_PyGithub/github/DependabotAlertAdvisory.py
|
############################ Copyrights and license ############################
# #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2024 Thomas Cooper <coopernetes@proton.me> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import github.AdvisoryBase
import github.DependabotAlertVulnerability
from github.GithubObject import Attribute, NotSet
if TYPE_CHECKING:
from github.DependabotAlertVulnerability import DependabotAlertVulnerability
class DependabotAlertAdvisory(github.AdvisoryBase.AdvisoryBase):
"""
This class represents a package flagged by a Dependabot alert that is vulnerable to a parent SecurityAdvisory.
The reference can be found here
https://docs.github.com/en/rest/dependabot/alerts
"""
def _initAttributes(self) -> None:
super()._initAttributes()
self._references: Attribute[list[dict]] = NotSet
self._vulnerabilities: Attribute[list[DependabotAlertVulnerability]] = NotSet
@property
def references(self) -> list[dict]:
return self._references.value
@property
def vulnerabilities(self) -> list[DependabotAlertVulnerability]:
return self._vulnerabilities.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "references" in attributes:
self._references = self._makeListOfDictsAttribute(
attributes["references"],
)
if "vulnerabilities" in attributes:
self._vulnerabilities = self._makeListOfClassesAttribute(
github.DependabotAlertVulnerability.DependabotAlertVulnerability,
attributes["vulnerabilities"],
)
super()._useAttributes(attributes)
| 3,411
|
Python
|
.py
| 57
| 54.982456
| 114
| 0.5386
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,380
|
License.py
|
PyGithub_PyGithub/github/License.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Aaron Levine <allevin@sandia.gov> #
# Copyright 2017 Mike Miller <github@mikeage.net> #
# Copyright 2018 Darragh Bailey <daragh.bailey@gmail.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Adam Baratz <adam.baratz@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import Any
from github.GithubObject import Attribute, CompletableGithubObject, NotSet
class License(CompletableGithubObject):
"""
This class represents Licenses.
The reference can be found here
https://docs.github.com/en/rest/reference/licenses
"""
def _initAttributes(self) -> None:
self._key: Attribute[str] = NotSet
self._name: Attribute[str] = NotSet
self._spdx_id: Attribute[str] = NotSet
self._url: Attribute[str] = NotSet
self._html_url: Attribute[str] = NotSet
self._description: Attribute[str] = NotSet
self._implementation: Attribute[str] = NotSet
self._body: Attribute[str] = NotSet
self._permissions: Attribute[list[str]] = NotSet
self._conditions: Attribute[list[str]] = NotSet
self._limitations: Attribute[list[str]] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"name": self._name.value})
@property
def key(self) -> str:
self._completeIfNotSet(self._key)
return self._key.value
@property
def name(self) -> str:
self._completeIfNotSet(self._name)
return self._name.value
@property
def spdx_id(self) -> str:
self._completeIfNotSet(self._spdx_id)
return self._spdx_id.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
@property
def html_url(self) -> str:
self._completeIfNotSet(self._html_url)
return self._html_url.value
@property
def description(self) -> str:
self._completeIfNotSet(self._description)
return self._description.value
@property
def implementation(self) -> str:
self._completeIfNotSet(self._implementation)
return self._implementation.value
@property
def body(self) -> str:
self._completeIfNotSet(self._body)
return self._body.value
@property
def permissions(self) -> list[str]:
self._completeIfNotSet(self._permissions)
return self._permissions.value
@property
def conditions(self) -> list[str]:
self._completeIfNotSet(self._conditions)
return self._conditions.value
@property
def limitations(self) -> list[str]:
self._completeIfNotSet(self._limitations)
return self._limitations.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "key" in attributes: # pragma no branch
self._key = self._makeStringAttribute(attributes["key"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "spdx_id" in attributes: # pragma no branch
self._spdx_id = self._makeStringAttribute(attributes["spdx_id"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "html_url" in attributes: # pragma no branch
self._html_url = self._makeStringAttribute(attributes["html_url"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "implementation" in attributes: # pragma no branch
self._implementation = self._makeStringAttribute(attributes["implementation"])
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "permissions" in attributes: # pragma no branch
self._permissions = self._makeListOfStringsAttribute(attributes["permissions"])
if "conditions" in attributes: # pragma no branch
self._conditions = self._makeListOfStringsAttribute(attributes["conditions"])
if "limitations" in attributes: # pragma no branch
self._limitations = self._makeListOfStringsAttribute(attributes["limitations"])
| 7,410
|
Python
|
.py
| 132
| 50.340909
| 91
| 0.564834
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,381
|
Plan.py
|
PyGithub_PyGithub/github/Plan.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Geoff Low <glow@mdsol.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from typing import Any, Dict
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class Plan(NonCompletableGithubObject):
"""
This class represents Plans.
"""
def _initAttributes(self) -> None:
self._collaborators: Attribute[int] = NotSet
self._name: Attribute[str] = NotSet
self._private_repos: Attribute[int] = NotSet
self._space: Attribute[int] = NotSet
self._filled_seats: Attribute[int] = NotSet
self._seats: Attribute[int] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"name": self._name.value})
@property
def collaborators(self) -> int:
return self._collaborators.value
@property
def name(self) -> str:
return self._name.value
@property
def private_repos(self) -> int:
return self._private_repos.value
@property
def space(self) -> int:
return self._space.value
@property
def filled_seats(self) -> int:
return self._filled_seats.value
@property
def seats(self) -> int:
return self._seats.value
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "collaborators" in attributes: # pragma no branch
self._collaborators = self._makeIntAttribute(attributes["collaborators"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "private_repos" in attributes: # pragma no branch
self._private_repos = self._makeIntAttribute(attributes["private_repos"])
if "space" in attributes: # pragma no branch
self._space = self._makeIntAttribute(attributes["space"])
if "seats" in attributes: # pragma no branch
self._seats = self._makeIntAttribute(attributes["seats"])
if "filled_seats" in attributes: # pragma no branch
self._filled_seats = self._makeIntAttribute(attributes["filled_seats"])
| 5,001
|
Python
|
.py
| 83
| 55.53012
| 85
| 0.52212
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,382
|
Variable.py
|
PyGithub_PyGithub/github/Variable.py
|
############################ Copyrights and license ############################
# #
# Copyright 2023 Andrew Dawes <53574062+AndrewJDawes@users.noreply.github.com> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Mauricio Alejandro Martínez Pacheco <mauricio.martinez@premise.com>#
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import Any
from github.GithubObject import Attribute, CompletableGithubObject, NotSet
class Variable(CompletableGithubObject):
"""
This class represents a GitHub variable.
The reference can be found here
https://docs.github.com/en/rest/actions/variables
"""
def _initAttributes(self) -> None:
self._name: Attribute[str] = NotSet
self._value: Attribute[str] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._variables_url: Attribute[str] = NotSet
self._url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"name": self.name})
@property
def name(self) -> str:
"""
:type: string
"""
self._completeIfNotSet(self._name)
return self._name.value
@property
def value(self) -> str:
"""
:type: string
"""
self._completeIfNotSet(self._value)
return self._value.value
@property
def created_at(self) -> datetime:
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def updated_at(self) -> datetime:
"""
:type: datetime.datetime
"""
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def variables_url(self) -> str:
"""
:type: string
"""
return self._variables_url.value
@property
def url(self) -> str:
"""
:type: string
"""
# Construct url from variables_url and name, if self._url. is not set
if self._url is NotSet:
self._url = self._makeStringAttribute(self.variables_url + "/" + self.name)
return self._url.value
def edit(self, value: str) -> bool:
"""
:calls: `PATCH /repos/{owner}/{repo}/actions/variables/{variable_name} <https://docs.github.com/en/rest/reference/actions/variables#update-a-repository-variable>`_
:param variable_name: string
:param value: string
:rtype: bool
"""
assert isinstance(value, str), value
patch_parameters = {
"name": self.name,
"value": value,
}
status, _, _ = self._requester.requestJson(
"PATCH",
self.url,
input=patch_parameters,
)
return status == 204
def delete(self) -> None:
"""
:calls: `DELETE {variable_url} <https://docs.github.com/en/rest/actions/variables>`_
:rtype: None
"""
self._requester.requestJsonAndCheck("DELETE", self.url)
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "name" in attributes:
self._name = self._makeStringAttribute(attributes["name"])
if "value" in attributes:
self._value = self._makeStringAttribute(attributes["value"])
if "created_at" in attributes:
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "updated_at" in attributes:
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "variables_url" in attributes:
self._variables_url = self._makeStringAttribute(attributes["variables_url"])
if "url" in attributes:
self._url = self._makeStringAttribute(attributes["url"])
| 5,618
|
Python
|
.py
| 124
| 38.419355
| 171
| 0.537984
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,383
|
StatsPunchCard.py
|
PyGithub_PyGithub/github/StatsPunchCard.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Adam Baratz <adam.baratz@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Dominic Davis-Foster <dominic@davis-foster.co.uk> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from typing import Any, Dict, Tuple
import github.GithubObject
import github.NamedUser # TODO remove unused
class StatsPunchCard(github.GithubObject.NonCompletableGithubObject):
"""
This class represents StatsPunchCards.
The reference can be found here
https://docs.github.com/en/rest/reference/repos#get-the-hourly-commit-count-for-each-day
"""
_dict: Dict[Tuple[int, int], int]
def get(self, day: int, hour: int) -> int:
"""
Get a specific element.
"""
return self._dict[(day, hour)]
def _initAttributes(self) -> None:
self._dict = {}
def _useAttributes(self, attributes: Any) -> None:
for day, hour, commits in attributes:
self._dict[(day, hour)] = commits
| 3,845
|
Python
|
.py
| 58
| 63.465517
| 92
| 0.492719
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,384
|
CodeScanAlertInstanceLocation.py
|
PyGithub_PyGithub/github/CodeScanAlertInstanceLocation.py
|
############################ Copyrights and license ############################
# #
# Copyright 2020 Dhruv Manilawala <dhruvmanila@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2022 Eric Nieuwland <eric.nieuwland@gmail.com> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from typing import Any, Dict
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class CodeScanAlertInstanceLocation(NonCompletableGithubObject):
"""
This class represents code scanning alert instance locations.
The reference can be found here
https://docs.github.com/en/rest/reference/code-scanning.
"""
def _initAttributes(self) -> None:
self._path: Attribute[str] = NotSet
self._start_line: Attribute[int] = NotSet
self._start_column: Attribute[int] = NotSet
self._end_line: Attribute[int] = NotSet
self._end_column: Attribute[int] = NotSet
def __str__(self) -> str:
return f"{self.path} @ l{self.start_line}:c{self.start_column}-l{self.end_line}:c{self.end_column}"
def __repr__(self) -> str:
return self.get__repr__(
{
"path": self.path,
"start_line": self.start_line,
"start_column": self.start_column,
"end_line": self.end_line,
"end_column": self.end_column,
}
)
@property
def path(self) -> str:
return self._path.value
@property
def start_line(self) -> int:
return self._start_line.value
@property
def start_column(self) -> int:
return self._start_column.value
@property
def end_line(self) -> int:
return self._end_line.value
@property
def end_column(self) -> int:
return self._end_column.value
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "path" in attributes: # pragma no branch
self._path = self._makeStringAttribute(attributes["path"])
if "start_line" in attributes: # pragma no branch
self._start_line = self._makeIntAttribute(attributes["start_line"])
if "start_column" in attributes: # pragma no branch
self._start_column = self._makeIntAttribute(attributes["start_column"])
if "end_line" in attributes: # pragma no branch
self._end_line = self._makeIntAttribute(attributes["end_line"])
if "end_column" in attributes: # pragma no branch
self._end_column = self._makeIntAttribute(attributes["end_column"])
| 4,458
|
Python
|
.py
| 79
| 50.379747
| 107
| 0.52154
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,385
|
OrganizationSecret.py
|
PyGithub_PyGithub/github/OrganizationSecret.py
|
############################ Copyrights and license ############################
# #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Mauricio Alejandro Martínez Pacheco <mauricio.martinez@premise.com>#
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2024 Thomas Crowley <15927917+thomascrowley@users.noreply.github.com>#
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from datetime import datetime
from typing import Any, Dict
from github.GithubObject import Attribute, NotSet
from github.PaginatedList import PaginatedList
from github.Repository import Repository
from github.Secret import Secret
class OrganizationSecret(Secret):
"""
This class represents a org level GitHub secret.
The reference can be found here
https://docs.github.com/en/rest/actions/secrets
"""
def _initAttributes(self) -> None:
self._name: Attribute[str] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._visibility: Attribute[str] = NotSet
self._selected_repositories: Attribute[PaginatedList[Repository]] = NotSet
self._selected_repositories_url: Attribute[str] = NotSet
self._url: Attribute[str] = NotSet
@property
def visibility(self) -> str:
"""
:type: string
"""
self._completeIfNotSet(self._visibility)
return self._visibility.value
@property
def selected_repositories(self) -> PaginatedList[Repository]:
return PaginatedList(
Repository,
self._requester,
self._selected_repositories_url.value,
None,
list_item="repositories",
)
def edit(
self,
value: str,
visibility: str = "all",
secret_type: str = "actions",
) -> bool:
"""
:calls: `PATCH /orgs/{org}/{secret_type}/secrets/{variable_name} <https://docs.github.com/en/rest/reference/actions/secrets#update-an-organization-variable>`_
:param variable_name: string
:param value: string
:param visibility: string
:param secret_type: string options actions or dependabot
:rtype: bool
"""
assert isinstance(value, str), value
assert isinstance(visibility, str), visibility
assert secret_type in ["actions", "dependabot"], "secret_type should be actions or dependabot"
patch_parameters: Dict[str, Any] = {
"name": self.name,
"value": value,
"visibility": visibility,
}
status, _, _ = self._requester.requestJson(
"PATCH",
f"{self.url}/{secret_type}/secrets/{self.name}",
input=patch_parameters,
)
return status == 204
def add_repo(self, repo: Repository) -> bool:
"""
:calls: 'PUT {org_url}/actions/secrets/{secret_name} <https://docs.github.com/en/rest/actions/secrets#add-selected-repository-to-an-organization-secret>`_
:param repo: github.Repository.Repository
:rtype: bool
"""
if self.visibility != "selected":
return False
self._requester.requestJsonAndCheck("PUT", f"{self._selected_repositories_url.value}/{repo.id}")
return True
def remove_repo(self, repo: Repository) -> bool:
"""
:calls: 'DELETE {org_url}/actions/secrets/{secret_name} <https://docs.github.com/en/rest/actions/secrets#add-selected-repository-to-an-organization-secret>`_
:param repo: github.Repository.Repository
:rtype: bool
"""
if self.visibility != "selected":
return False
self._requester.requestJsonAndCheck("DELETE", f"{self._selected_repositories_url.value}/{repo.id}")
return True
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "name" in attributes:
self._name = self._makeStringAttribute(attributes["name"])
if "created_at" in attributes:
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "updated_at" in attributes:
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "visibility" in attributes:
self._visibility = self._makeStringAttribute(attributes["visibility"])
if "selected_repositories_url" in attributes:
self._selected_repositories_url = self._makeStringAttribute(attributes["selected_repositories_url"])
if "url" in attributes:
self._url = self._makeStringAttribute(attributes["url"])
| 6,255
|
Python
|
.py
| 122
| 44.114754
| 166
| 0.571102
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,386
|
TimelineEvent.py
|
PyGithub_PyGithub/github/TimelineEvent.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Aaron Levine <allevin@sandia.gov> #
# Copyright 2017 Mike Miller <github@mikeage.net> #
# Copyright 2018 Darragh Bailey <daragh.bailey@gmail.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Nick Campbell <nicholas.j.campbell@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import Any
import github.GithubObject
import github.NamedUser
import github.TimelineEventSource
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class TimelineEvent(NonCompletableGithubObject):
"""
This class represents IssueTimelineEvents.
The reference can be found here
https://docs.github.com/en/rest/reference/issues#timeline
"""
def _initAttributes(self) -> None:
self._actor: Attribute[github.NamedUser.NamedUser] = NotSet
self._commit_id: Attribute[str] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._event: Attribute[str] = NotSet
self._id: Attribute[int] = NotSet
self._node_id: Attribute[str] = NotSet
self._commit_url: Attribute[str] = NotSet
self._source: Attribute[github.TimelineEventSource.TimelineEventSource] = NotSet
self._url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"id": self._id.value})
@property
def actor(self) -> github.NamedUser.NamedUser:
return self._actor.value
@property
def commit_id(self) -> str:
return self._commit_id.value
@property
def created_at(self) -> datetime:
return self._created_at.value
@property
def event(self) -> str:
return self._event.value
@property
def id(self) -> int:
return self._id.value
@property
def node_id(self) -> str:
return self._node_id.value
@property
def commit_url(self) -> str:
return self._commit_url.value
@property
def source(self) -> github.TimelineEventSource.TimelineEventSource | None:
# only available on `cross-referenced` events.
if self.event == "cross-referenced" and self._source is not NotSet:
return self._source.value
return None
@property
def body(self) -> str | None:
if self.event == "commented" and self._body is not NotSet:
return self._body.value
return None
@property
def author_association(self) -> str | None:
if self.event == "commented" and self._author_association is not NotSet:
return self._author_association.value
return None
@property
def url(self) -> str:
return self._url.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "actor" in attributes: # pragma no branch
self._actor = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["actor"])
if "commit_id" in attributes: # pragma no branch
self._commit_id = self._makeStringAttribute(attributes["commit_id"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "event" in attributes: # pragma no branch
self._event = self._makeStringAttribute(attributes["event"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "node_id" in attributes: # pragma no branch
self._node_id = self._makeStringAttribute(attributes["node_id"])
if "commit_url" in attributes: # pragma no branch
self._commit_url = self._makeStringAttribute(attributes["commit_url"])
if "source" in attributes: # pragma no branch
self._source = self._makeClassAttribute(
github.TimelineEventSource.TimelineEventSource, attributes["source"]
)
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "author_association" in attributes: # pragma no branch
self._author_association = self._makeStringAttribute(attributes["author_association"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| 7,452
|
Python
|
.py
| 132
| 50.719697
| 99
| 0.567338
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,387
|
Tag.py
|
PyGithub_PyGithub/github/Tag.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import github.Commit
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
if TYPE_CHECKING:
from github.Commit import Commit
class Tag(NonCompletableGithubObject):
"""
This class represents Tags.
The reference can be found here
https://docs.github.com/en/rest/reference/repos#list-repository-tags
"""
def __repr__(self) -> str:
return self.get__repr__({"name": self._name.value, "commit": self._commit.value})
def _initAttributes(self) -> None:
self._commit: Attribute[Commit] = NotSet
self._name: Attribute[str] = NotSet
self._tarball_url: Attribute[str] = NotSet
self._zipball_url: Attribute[str] = NotSet
@property
def commit(self) -> Commit:
return self._commit.value
@property
def name(self) -> str:
return self._name.value
@property
def tarball_url(self) -> str:
return self._tarball_url.value
@property
def zipball_url(self) -> str:
return self._zipball_url.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "commit" in attributes: # pragma no branch
self._commit = self._makeClassAttribute(github.Commit.Commit, attributes["commit"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "tarball_url" in attributes: # pragma no branch
self._tarball_url = self._makeStringAttribute(attributes["tarball_url"])
if "zipball_url" in attributes: # pragma no branch
self._zipball_url = self._makeStringAttribute(attributes["zipball_url"])
| 4,991
|
Python
|
.py
| 80
| 58.45
| 95
| 0.527778
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,388
|
IssueEvent.py
|
PyGithub_PyGithub/github/IssueEvent.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Simon <spam@esemi.ru> #
# Copyright 2018 Aaron L. Levine <allevin@sandia.gov> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import Any
import github.GithubObject
import github.Issue
import github.Label
import github.Milestone
import github.NamedUser
from github.GithubObject import Attribute, CompletableGithubObject, NotSet
class IssueEvent(CompletableGithubObject):
"""
This class represents IssueEvents.
The reference can be found here
https://docs.github.com/en/rest/reference/issues#events
"""
def _initAttributes(self) -> None:
self._actor: Attribute[github.NamedUser.NamedUser] = NotSet
self._commit_id: Attribute[str] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._event: Attribute[str] = NotSet
self._id: Attribute[int] = NotSet
self._issue: Attribute[github.Issue.Issue] = NotSet
self._url: Attribute[str] = NotSet
self._node_id: Attribute[str] = NotSet
self._commit_url: Attribute[str] = NotSet
self._label: Attribute[github.Label.Label] = NotSet
self._assignee: Attribute[github.NamedUser.NamedUser] = NotSet
self._assigner: Attribute[github.NamedUser.NamedUser] = NotSet
self._review_requester: Attribute[github.NamedUser.NamedUser] = NotSet
self._requested_reviewer: Attribute[github.NamedUser.NamedUser] = NotSet
self._milestone: Attribute[github.Milestone.Milestone] = NotSet
self._rename: Attribute[dict] = NotSet
self._dismissed_review: Attribute[dict] = NotSet
self._lock_reason: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"id": self._id.value})
@property
def actor(self) -> github.NamedUser.NamedUser:
self._completeIfNotSet(self._actor)
return self._actor.value
@property
def commit_id(self) -> str:
self._completeIfNotSet(self._commit_id)
return self._commit_id.value
@property
def created_at(self) -> datetime:
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def event(self) -> str:
self._completeIfNotSet(self._event)
return self._event.value
@property
def id(self) -> int:
self._completeIfNotSet(self._id)
return self._id.value
@property
def issue(self) -> github.Issue.Issue:
self._completeIfNotSet(self._issue)
return self._issue.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
@property
def node_id(self) -> str:
self._completeIfNotSet(self._node_id)
return self._node_id.value
@property
def commit_url(self) -> str:
self._completeIfNotSet(self._commit_url)
return self._commit_url.value
@property
def label(self) -> github.Label.Label:
self._completeIfNotSet(self._label)
return self._label.value
@property
def assignee(self) -> github.NamedUser.NamedUser:
self._completeIfNotSet(self._assignee)
return self._assignee.value
@property
def assigner(self) -> github.NamedUser.NamedUser:
self._completeIfNotSet(self._assigner)
return self._assigner.value
@property
def review_requester(self) -> github.NamedUser.NamedUser:
self._completeIfNotSet(self._review_requester)
return self._review_requester.value
@property
def requested_reviewer(self) -> github.NamedUser.NamedUser:
self._completeIfNotSet(self._requested_reviewer)
return self._requested_reviewer.value
@property
def milestone(self) -> github.Milestone.Milestone:
self._completeIfNotSet(self._milestone)
return self._milestone.value
@property
def rename(self) -> dict:
self._completeIfNotSet(self._rename)
return self._rename.value
@property
def dismissed_review(self) -> dict:
self._completeIfNotSet(self._dismissed_review)
return self._dismissed_review.value
@property
def lock_reason(self) -> str:
self._completeIfNotSet(self._lock_reason)
return self._lock_reason.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "actor" in attributes: # pragma no branch
self._actor = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["actor"])
if "commit_id" in attributes: # pragma no branch
self._commit_id = self._makeStringAttribute(attributes["commit_id"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "event" in attributes: # pragma no branch
self._event = self._makeStringAttribute(attributes["event"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "issue" in attributes: # pragma no branch
self._issue = self._makeClassAttribute(github.Issue.Issue, attributes["issue"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "node_id" in attributes: # pragma no branch
self._node_id = self._makeStringAttribute(attributes["node_id"])
if "commit_url" in attributes: # pragma no branch
self._commit_url = self._makeStringAttribute(attributes["commit_url"])
if "label" in attributes: # pragma no branch
self._label = self._makeClassAttribute(github.Label.Label, attributes["label"])
if "assignee" in attributes: # pragma no branch
self._assignee = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["assignee"])
if "assigner" in attributes: # pragma no branch
self._assigner = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["assigner"])
if "review_requester" in attributes: # pragma no branch
self._review_requester = self._makeClassAttribute(
github.NamedUser.NamedUser, attributes["review_requester"]
)
if "requested_reviewer" in attributes: # pragma no branch
self._requested_reviewer = self._makeClassAttribute(
github.NamedUser.NamedUser, attributes["requested_reviewer"]
)
if "milestone" in attributes: # pragma no branch
self._milestone = self._makeClassAttribute(github.Milestone.Milestone, attributes["milestone"])
if "rename" in attributes: # pragma no branch
self._rename = self._makeDictAttribute(attributes["rename"])
if "dismissed_review" in attributes: # pragma no branch
self._dismissed_review = self._makeDictAttribute(attributes["dismissed_review"])
if "lock_reason" in attributes: # pragma no branch
self._lock_reason = self._makeStringAttribute(attributes["lock_reason"])
| 10,216
|
Python
|
.py
| 189
| 47.444444
| 107
| 0.60066
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,389
|
CodeScanRule.py
|
PyGithub_PyGithub/github/CodeScanRule.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2020 Victor Zeng <zacker150@users.noreply.github.com> #
# Copyright 2022 Eric Nieuwland <eric.nieuwland@gmail.com> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import Any
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class CodeScanRule(NonCompletableGithubObject):
"""
This class represents Alerts from code scanning.
The reference can be found here
https://docs.github.com/en/rest/reference/code-scanning.
"""
def _initAttributes(self) -> None:
self._id: Attribute[str] = NotSet
self._name: Attribute[str] = NotSet
self._severity: Attribute[str] = NotSet
self._security_severity_level: Attribute[str] = NotSet
self._description: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"id": self.id, "name": self.name})
@property
def id(self) -> str:
return self._id.value
@property
def name(self) -> str:
return self._name.value
@property
def severity(self) -> str:
return self._severity.value
@property
def security_severity_level(self) -> str:
return self._security_severity_level.value
@property
def description(self) -> str:
return self._description.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "id" in attributes: # pragma no branch
self._id = self._makeStringAttribute(attributes["id"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "severity" in attributes: # pragma no branch
self._severity = self._makeStringAttribute(attributes["severity"])
if "security_severity_level" in attributes: # pragma no branch
self._security_severity_level = self._makeStringAttribute(attributes["security_severity_level"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
| 5,101
|
Python
|
.py
| 82
| 57.853659
| 108
| 0.535372
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,390
|
Consts.py
|
PyGithub_PyGithub/github/Consts.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jakub Wilk <jwilk@jwilk.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Aaron L. Levine <allevin@sandia.gov> #
# Copyright 2018 Alice GIRARD <bouhahah@gmail.com> #
# Copyright 2018 Maarten Fonville <mfonville@users.noreply.github.com> #
# Copyright 2018 Shubham Singh <41840111+singh811@users.noreply.github.com> #
# Copyright 2018 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 Yossarian King <yggy@blackbirdinteractive.com> #
# Copyright 2018 h.shi <10385628+AnYeMoWang@users.noreply.github.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Adam Baratz <adam.baratz@gmail.com> #
# Copyright 2019 Nick Campbell <nicholas.j.campbell@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Tim Gates <tim.gates@iress.com> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2019 Will Li <cuichen.li94@gmail.com> #
# Copyright 2020 Adrian Bridgett <58699309+tl-adrian-bridgett@users.noreply.github.com>#
# Copyright 2020 Anuj Bansal <bansalanuj1996@gmail.com> #
# Copyright 2020 Colby Gallup <colbygallup@gmail.com> #
# Copyright 2020 Pascal Hofmann <mail@pascalhofmann.de> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Tanner <51724788+lightningboltemoji@users.noreply.github.com> #
# Copyright 2022 KimSia Sim <245021+simkimsia@users.noreply.github.com> #
# Copyright 2023 Denis Blanchette <dblanchette@coveo.com> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
REQ_IF_NONE_MATCH = "If-None-Match"
REQ_IF_MODIFIED_SINCE = "If-Modified-Since"
PROCESSING_202_WAIT_TIME = 2
# ##############################################################################
# Response Header #
# (Lower Case) #
# ##############################################################################
RES_ETAG = "etag"
RES_LAST_MODIFIED = "last-modified"
# Inspired by https://github.com/google/go-github
# Headers
headerRateLimit = "x-ratelimit-limit"
headerRateRemaining = "x-ratelimit-remaining"
headerRateReset = "x-ratelimit-reset"
headerOAuthScopes = "x-oauth-scopes"
headerOTP = "x-github-otp"
defaultMediaType = "application/octet-stream"
# Custom media type for preview API
# https://developer.github.com/changes/2014-12-09-new-attributes-for-stars-api/
mediaTypeStarringPreview = "application/vnd.github.v3.star+json"
# https://developer.github.com/changes/2016-02-19-source-import-preview-api/
mediaTypeImportPreview = "application/vnd.github.barred-rock-preview"
# https://developer.github.com/changes/2016-05-12-reactions-api-preview/
mediaTypeReactionsPreview = "application/vnd.github.squirrel-girl-preview"
# https://developer.github.com/changes/2016-09-14-Integrations-Early-Access/
mediaTypeIntegrationPreview = "application/vnd.github.machine-man-preview+json"
# https://developer.github.com/changes/2016-09-14-projects-api/
mediaTypeProjectsPreview = "application/vnd.github.inertia-preview+json"
# https://developer.github.com/changes/2017-01-05-commit-search-api/
mediaTypeCommitSearchPreview = "application/vnd.github.cloak-preview"
# https://developer.github.com/changes/2017-02-28-user-blocking-apis-and-webhook/
mediaTypeBlockUsersPreview = "application/vnd.github.giant-sentry-fist-preview+json"
# https://developer.github.com/changes/2017-07-17-update-topics-on-repositories/
mediaTypeTopicsPreview = "application/vnd.github.mercy-preview+json"
# https://developer.github.com/changes/2018-02-22-label-description-search-preview/
mediaTypeLabelDescriptionSearchPreview = "application/vnd.github.symmetra-preview+json"
# https://developer.github.com/changes/2018-01-10-lock-reason-api-preview/
mediaTypeLockReasonPreview = "application/vnd.github.sailor-v-preview+json"
# https://developer.github.com/changes/2018-01-25-organization-invitation-api-preview/
mediaTypeOrganizationInvitationPreview = "application/vnd.github.dazzler-preview+json"
# https://developer.github.com/changes/2018-02-07-team-discussions-api
mediaTypeTeamDiscussionsPreview = "application/vnd.github.echo-preview+json"
# https://developer.github.com/changes/2018-03-16-protected-branches-required-approving-reviews/
mediaTypeRequireMultipleApprovingReviews = "application/vnd.github.luke-cage-preview+json"
# https://developer.github.com/changes/2018-05-24-user-migration-api/
mediaTypeMigrationPreview = "application/vnd.github.wyandotte-preview+json"
# https://developer.github.com/changes/2019-07-16-repository-templates-api/
mediaTypeTemplatesPreview = "application/vnd.github.baptiste-preview+json"
# https://docs.github.com/en/rest/reference/search#highlighting-code-search-results-1
highLightSearchPreview = "application/vnd.github.v3.text-match+json"
# https://developer.github.com/changes/2018-02-22-protected-branches-required-signatures/
signaturesProtectedBranchesPreview = "application/vnd.github.zzzax-preview+json"
# https://developer.github.com/changes/2019-04-24-vulnerability-alerts/
vulnerabilityAlertsPreview = "application/vnd.github.dorian-preview+json"
# https://developer.github.com/changes/2019-06-04-automated-security-fixes/
automatedSecurityFixes = "application/vnd.github.london-preview+json"
# https://developer.github.com/changes/2019-05-29-update-branch-api/
updateBranchPreview = "application/vnd.github.lydian-preview+json"
# https://developer.github.com/changes/2016-05-23-timeline-preview-api/
issueTimelineEventsPreview = "application/vnd.github.mockingbird-preview"
# https://docs.github.com/en/rest/reference/teams#check-if-a-team-manages-a-repository
teamRepositoryPermissions = "application/vnd.github.v3.repository+json"
# https://developer.github.com/changes/2016-04-06-deployment-and-deployment-status-enhancements/
deploymentEnhancementsPreview = "application/vnd.github.ant-man-preview+json"
# https://developer.github.com/changes/2018-10-16-deployments-environments-states-and-auto-inactive-updates/
deploymentStatusEnhancementsPreview = "application/vnd.github.flash-preview+json"
# https://developer.github.com/changes/2019-12-03-internal-visibility-changes/
repoVisibilityPreview = "application/vnd.github.nebula-preview+json"
DEFAULT_BASE_URL = "https://api.github.com"
DEFAULT_OAUTH_URL = "https://github.com/login/oauth"
DEFAULT_STATUS_URL = "https://status.github.com"
DEFAULT_USER_AGENT = "PyGithub/Python"
# As of 2018-05-17, Github imposes a 10s limit for completion of API requests.
# Thus, the timeout should be slightly > 10s to account for network/front-end
# latency.
DEFAULT_TIMEOUT = 15
DEFAULT_PER_PAGE = 30
# JWT expiry in seconds. Could be set for max 600 seconds (10 minutes).
# https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#authenticating-as-a-github-app
DEFAULT_JWT_EXPIRY = 300
MIN_JWT_EXPIRY = 15
MAX_JWT_EXPIRY = 600
# https://docs.github.com/en/developers/apps/building-github-apps/authenticating-with-github-apps#generating-a-json-web-token-jwt
# "The time the JWT was created. To protect against clock drift, we recommend you set this 60 seconds in the past."
DEFAULT_JWT_ISSUED_AT = -60
# https://docs.github.com/en/apps/creating-github-apps/authenticating-with-a-github-app/generating-a-json-web-token-jwt-for-a-github-app
# "Your JWT must be signed using the RS256 algorithm"
DEFAULT_JWT_ALGORITHM = "RS256"
# https://docs.github.com/en/rest/guides/best-practices-for-integrators?apiVersion=2022-11-28#dealing-with-secondary-rate-limits
DEFAULT_SECONDS_BETWEEN_REQUESTS = 0.25
DEFAULT_SECONDS_BETWEEN_WRITES = 1.0
| 10,492
|
Python
|
.py
| 145
| 71.110345
| 136
| 0.660363
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,391
|
File.py
|
PyGithub_PyGithub/github/File.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Jeffrey Melvin <jeffrey.melvin@workiva.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2017 Simon <spam@esemi.ru> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from typing import Any, Dict
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class File(NonCompletableGithubObject):
"""
This class represents Files.
"""
def _initAttributes(self) -> None:
self._additions: Attribute[int] = NotSet
self._blob_url: Attribute[str] = NotSet
self._changes: Attribute[int] = NotSet
self._contents_url: Attribute[str] = NotSet
self._deletions: Attribute[int] = NotSet
self._filename: Attribute[str] = NotSet
self._patch: Attribute[str] = NotSet
self._previous_filename: Attribute[str] = NotSet
self._raw_url: Attribute[str] = NotSet
self._sha: Attribute[str] = NotSet
self._status: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"sha": self._sha.value, "filename": self._filename.value})
@property
def additions(self) -> int:
return self._additions.value
@property
def blob_url(self) -> str:
return self._blob_url.value
@property
def changes(self) -> int:
return self._changes.value
@property
def contents_url(self) -> str:
return self._contents_url.value
@property
def deletions(self) -> int:
return self._deletions.value
@property
def filename(self) -> str:
return self._filename.value
@property
def patch(self) -> str:
return self._patch.value
@property
def previous_filename(self) -> str:
return self._previous_filename.value
@property
def raw_url(self) -> str:
return self._raw_url.value
@property
def sha(self) -> str:
return self._sha.value
@property
def status(self) -> str:
return self._status.value
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "additions" in attributes: # pragma no branch
self._additions = self._makeIntAttribute(attributes["additions"])
if "blob_url" in attributes: # pragma no branch
self._blob_url = self._makeStringAttribute(attributes["blob_url"])
if "changes" in attributes: # pragma no branch
self._changes = self._makeIntAttribute(attributes["changes"])
if "contents_url" in attributes: # pragma no branch
self._contents_url = self._makeStringAttribute(attributes["contents_url"])
if "deletions" in attributes: # pragma no branch
self._deletions = self._makeIntAttribute(attributes["deletions"])
if "filename" in attributes: # pragma no branch
self._filename = self._makeStringAttribute(attributes["filename"])
if "patch" in attributes: # pragma no branch
self._patch = self._makeStringAttribute(attributes["patch"])
if "previous_filename" in attributes: # pragma no branch
self._previous_filename = self._makeStringAttribute(attributes["previous_filename"])
if "raw_url" in attributes: # pragma no branch
self._raw_url = self._makeStringAttribute(attributes["raw_url"])
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "status" in attributes: # pragma no branch
self._status = self._makeStringAttribute(attributes["status"])
| 6,525
|
Python
|
.py
| 115
| 51.095652
| 96
| 0.549906
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,392
|
CodeScanTool.py
|
PyGithub_PyGithub/github/CodeScanTool.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2022 Eric Nieuwland <eric.nieuwland@gmail.com> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from typing import Any, Dict
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class CodeScanTool(NonCompletableGithubObject):
"""
This class represents code scanning tools.
The reference can be found here
https://docs.github.com/en/rest/reference/code-scanning.
"""
def _initAttributes(self) -> None:
self._name: Attribute[str] = NotSet
self._version: Attribute[str] = NotSet
self._guid: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__(
{
"guid": self.guid,
"name": self.name,
"version": self.version,
}
)
@property
def name(self) -> str:
return self._name.value
@property
def version(self) -> str:
return self._version.value
@property
def guid(self) -> str:
return self._guid.value
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
if "version" in attributes: # pragma no branch
self._version = self._makeStringAttribute(attributes["version"])
if "guid" in attributes: # pragma no branch
self._guid = self._makeStringAttribute(attributes["guid"])
| 4,464
|
Python
|
.py
| 74
| 55.756757
| 80
| 0.498173
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,393
|
Milestone.py
|
PyGithub_PyGithub/github/Milestone.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2013 martinqt <m.ki2@laposte.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Michell Stuttgart <michellstut@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Mark Walker <mark.walker@realbuzz.com> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import date, datetime
from typing import Any
import github.GithubObject
import github.Label
import github.NamedUser
import github.PaginatedList
from github.GithubObject import Attribute, CompletableGithubObject, NotSet, Opt, is_defined
from github.PaginatedList import PaginatedList
class Milestone(CompletableGithubObject):
"""
This class represents Milestones.
The reference can be found here
https://docs.github.com/en/rest/reference/issues#milestones
"""
def _initAttributes(self) -> None:
self._closed_issues: Attribute[int] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._creator: Attribute[github.NamedUser.NamedUser] = NotSet
self._description: Attribute[str] = NotSet
self._due_on: Attribute[datetime] = NotSet
self._id: Attribute[int] = NotSet
self._labels_url: Attribute[str] = NotSet
self._number: Attribute[int] = NotSet
self._open_issues: Attribute[int] = NotSet
self._state: Attribute[str] = NotSet
self._title: Attribute[str] = NotSet
self._updated_at: Attribute[datetime] = NotSet
self._url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"number": self._number.value, "title": self._title.value})
@property
def closed_issues(self) -> int:
self._completeIfNotSet(self._closed_issues)
return self._closed_issues.value
@property
def created_at(self) -> datetime:
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def creator(self) -> github.NamedUser.NamedUser:
self._completeIfNotSet(self._creator)
return self._creator.value
@property
def description(self) -> str:
self._completeIfNotSet(self._description)
return self._description.value
@property
def due_on(self) -> datetime | None:
self._completeIfNotSet(self._due_on)
return self._due_on.value
@property
def id(self) -> int:
self._completeIfNotSet(self._id)
return self._id.value
@property
def labels_url(self) -> str:
self._completeIfNotSet(self._labels_url)
return self._labels_url.value
@property
def number(self) -> int:
self._completeIfNotSet(self._number)
return self._number.value
@property
def open_issues(self) -> int:
self._completeIfNotSet(self._open_issues)
return self._open_issues.value
@property
def state(self) -> str:
self._completeIfNotSet(self._state)
return self._state.value
@property
def title(self) -> str:
self._completeIfNotSet(self._title)
return self._title.value
@property
def updated_at(self) -> datetime:
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
def delete(self) -> None:
"""
:calls: `DELETE /repos/{owner}/{repo}/milestones/{number} <https://docs.github.com/en/rest/reference/issues#milestones>`_
"""
headers, data = self._requester.requestJsonAndCheck("DELETE", self.url)
def edit(
self, title: str, state: Opt[str] = NotSet, description: Opt[str] = NotSet, due_on: Opt[date] = NotSet
) -> None:
"""
:calls: `PATCH /repos/{owner}/{repo}/milestones/{number} <https://docs.github.com/en/rest/reference/issues#milestones>`_
"""
assert isinstance(title, str), title
assert state is NotSet or isinstance(state, str), state
assert description is NotSet or isinstance(description, str), description
assert due_on is NotSet or isinstance(due_on, date), due_on
post_parameters = NotSet.remove_unset_items(
{
"title": title,
"state": state,
"description": description,
}
)
if is_defined(due_on):
post_parameters["due_on"] = due_on.strftime("%Y-%m-%d")
headers, data = self._requester.requestJsonAndCheck("PATCH", self.url, input=post_parameters)
self._useAttributes(data)
def get_labels(self) -> PaginatedList[github.Label.Label]:
"""
:calls: `GET /repos/{owner}/{repo}/milestones/{number}/labels <https://docs.github.com/en/rest/reference/issues#labels>`_
"""
return PaginatedList(github.Label.Label, self._requester, f"{self.url}/labels", None)
@property
def _identity(self) -> int:
return self.number
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "closed_issues" in attributes: # pragma no branch
self._closed_issues = self._makeIntAttribute(attributes["closed_issues"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "creator" in attributes: # pragma no branch
self._creator = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["creator"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "due_on" in attributes: # pragma no branch
self._due_on = self._makeDatetimeAttribute(attributes["due_on"])
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "labels_url" in attributes: # pragma no branch
self._labels_url = self._makeStringAttribute(attributes["labels_url"])
if "number" in attributes: # pragma no branch
self._number = self._makeIntAttribute(attributes["number"])
if "open_issues" in attributes: # pragma no branch
self._open_issues = self._makeIntAttribute(attributes["open_issues"])
if "state" in attributes: # pragma no branch
self._state = self._makeStringAttribute(attributes["state"])
if "title" in attributes: # pragma no branch
self._title = self._makeStringAttribute(attributes["title"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
| 9,784
|
Python
|
.py
| 184
| 46.581522
| 129
| 0.585832
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,394
|
AccessToken.py
|
PyGithub_PyGithub/github/AccessToken.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Adam Baratz <adam.baratz@gmail.com> #
# Copyright 2019 Nick Campbell <nicholas.j.campbell@gmail.com> #
# Copyright 2019 Rigas Papathanasopoulos <rigaspapas@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2022 Liuyang Wan <tsfdye@gmail.com> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2023 chantra <chantra@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime, timedelta, timezone
from typing import Any
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class AccessToken(NonCompletableGithubObject):
"""
This class represents access tokens.
"""
_created: datetime
def _initAttributes(self) -> None:
self._token: Attribute[str] = NotSet
self._type: Attribute[str] = NotSet
self._scope: Attribute[str] = NotSet
self._expires_in: Attribute[int | None] = NotSet
self._refresh_token: Attribute[str] = NotSet
self._refresh_expires_in: Attribute[int | None] = NotSet
def __repr__(self) -> str:
return self.get__repr__(
{
"token": f"{self.token[:5]}...",
"scope": self.scope,
"type": self.type,
"expires_in": self.expires_in,
"refresh_token": (f"{self.refresh_token[:5]}..." if self.refresh_token else None),
"refresh_token_expires_in": self.refresh_expires_in,
}
)
@property
def token(self) -> str:
"""
:type: string
"""
return self._token.value
@property
def type(self) -> str:
"""
:type: string
"""
return self._type.value
@property
def scope(self) -> str:
"""
:type: string
"""
return self._scope.value
@property
def created(self) -> datetime:
"""
:type: datetime
"""
return self._created
@property
def expires_in(self) -> int | None:
"""
:type: Optional[int]
"""
return self._expires_in.value
@property
def expires_at(self) -> datetime | None:
"""
:type: Optional[datetime]
"""
seconds = self.expires_in
if seconds is not None:
return self._created + timedelta(seconds=seconds)
return None
@property
def refresh_token(self) -> str | None:
"""
:type: Optional[string]
"""
return self._refresh_token.value
@property
def refresh_expires_in(self) -> int | None:
"""
:type: Optional[int]
"""
return self._refresh_expires_in.value
@property
def refresh_expires_at(self) -> datetime | None:
"""
:type: Optional[datetime]
"""
seconds = self.refresh_expires_in
if seconds is not None:
return self._created + timedelta(seconds=seconds)
return None
def _useAttributes(self, attributes: dict[str, Any]) -> None:
self._created = datetime.now(timezone.utc)
if "access_token" in attributes: # pragma no branch
self._token = self._makeStringAttribute(attributes["access_token"])
if "token_type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["token_type"])
if "scope" in attributes: # pragma no branch
self._scope = self._makeStringAttribute(attributes["scope"])
if "expires_in" in attributes: # pragma no branch
self._expires_in = self._makeIntAttribute(attributes["expires_in"])
if "refresh_token" in attributes: # pragma no branch
self._refresh_token = self._makeStringAttribute(attributes["refresh_token"])
if "refresh_token_expires_in" in attributes: # pragma no branch
self._refresh_expires_in = self._makeIntAttribute(attributes["refresh_token_expires_in"])
| 6,675
|
Python
|
.py
| 139
| 41.453237
| 101
| 0.524701
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,395
|
Clones.py
|
PyGithub_PyGithub/github/Clones.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Justin Kufro <jkufro@andrew.cmu.edu> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from datetime import datetime
from typing import Any, Dict
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class Clones(NonCompletableGithubObject):
"""
This class represents a popular Path for a GitHub repository.
The reference can be found here
https://docs.github.com/en/rest/reference/repos#get-repository-clones
"""
def _initAttributes(self) -> None:
self._timestamp: Attribute[datetime] = NotSet
self._count: Attribute[int] = NotSet
self._uniques: Attribute[int] = NotSet
def __repr__(self) -> str:
return self.get__repr__(
{
"timestamp": self._timestamp.value,
"count": self._count.value,
"uniques": self._uniques.value,
}
)
@property
def timestamp(self) -> datetime:
return self._timestamp.value
@property
def count(self) -> int:
return self._count.value
@property
def uniques(self) -> int:
return self._uniques.value
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "timestamp" in attributes: # pragma no branch
self._timestamp = self._makeDatetimeAttribute(attributes["timestamp"])
if "count" in attributes: # pragma no branch
self._count = self._makeIntAttribute(attributes["count"])
if "uniques" in attributes: # pragma no branch
self._uniques = self._makeIntAttribute(attributes["uniques"])
| 4,595
|
Python
|
.py
| 75
| 56.746667
| 82
| 0.508429
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,396
|
AdvisoryCredit.py
|
PyGithub_PyGithub/github/AdvisoryCredit.py
|
############################ Copyrights and license ############################
# #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Jonathan Leitschuh <jonathan.leitschuh@gmail.com> #
# Copyright 2023 Joseph Henrich <crimsonknave@gmail.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from typing import Any, Union
from typing_extensions import TypedDict
import github.NamedUser
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class SimpleCredit(TypedDict):
"""
A simple credit for a security advisory.
"""
login: str | github.NamedUser.NamedUser
type: str
Credit = Union[SimpleCredit, "AdvisoryCredit"]
class AdvisoryCredit(NonCompletableGithubObject):
"""
This class represents a credit that is assigned to a SecurityAdvisory.
The reference can be found here
https://docs.github.com/en/rest/security-advisories/repository-advisories
"""
@property
def login(self) -> str:
"""
:type: string
"""
return self._login.value
@property
def type(self) -> str:
"""
:type: string
"""
return self._type.value
def _initAttributes(self) -> None:
self._login: Attribute[str] = NotSet
self._type: Attribute[str] = NotSet
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "login" in attributes: # pragma no branch
self._login = self._makeStringAttribute(attributes["login"])
if "type" in attributes: # pragma no branch
self._type = self._makeStringAttribute(attributes["type"])
@staticmethod
def _validate_credit(credit: Credit) -> None:
assert isinstance(credit, (dict, AdvisoryCredit)), credit
if isinstance(credit, dict):
assert "login" in credit, credit
assert "type" in credit, credit
assert isinstance(credit["login"], (str, github.NamedUser.NamedUser)), credit["login"]
assert isinstance(credit["type"], str), credit["type"]
else:
assert isinstance(credit.login, str), credit.login
assert isinstance(credit.type, str), credit.type
@staticmethod
def _to_github_dict(credit: Credit) -> SimpleCredit:
assert isinstance(credit, (dict, AdvisoryCredit)), credit
if isinstance(credit, dict):
assert "login" in credit, credit
assert "type" in credit, credit
assert isinstance(credit["login"], (str, github.NamedUser.NamedUser)), credit["login"]
login = credit["login"]
if isinstance(login, github.NamedUser.NamedUser):
login = login.login
return {
"login": login,
"type": credit["type"],
}
else:
return {
"login": credit.login,
"type": credit.type,
}
| 4,846
|
Python
|
.py
| 95
| 44.463158
| 98
| 0.532333
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,397
|
GitAuthor.py
|
PyGithub_PyGithub/github/GitAuthor.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from datetime import datetime
from typing import Any, Dict
from github.GithubObject import Attribute, NonCompletableGithubObject, NotSet
class GitAuthor(NonCompletableGithubObject):
"""
This class represents GitAuthors.
"""
def _initAttributes(self) -> None:
self._name: Attribute[str] = NotSet
self._email: Attribute[str] = NotSet
self._date: Attribute[datetime] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"name": self._name.value})
@property
def date(self) -> datetime:
return self._date.value
@property
def email(self) -> str:
return self._email.value
@property
def name(self) -> str:
return self._name.value
def _useAttributes(self, attributes: Dict[str, Any]) -> None:
if "date" in attributes: # pragma no branch
self._date = self._makeDatetimeAttribute(attributes["date"])
if "email" in attributes: # pragma no branch
self._email = self._makeStringAttribute(attributes["email"])
if "name" in attributes: # pragma no branch
self._name = self._makeStringAttribute(attributes["name"])
| 4,099
|
Python
|
.py
| 65
| 59.384615
| 80
| 0.499254
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,398
|
PublicKey.py
|
PyGithub_PyGithub/github/PublicKey.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Chris Keating <christopherkeating@gmail.com> #
# Copyright 2021 MeggyCal <MeggyCal@users.noreply.github.com> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
# https://docs.github.com/en/rest/reference/actions#example-encrypting-a-secret-using-python
from __future__ import annotations
from base64 import b64encode
from typing import Any
from nacl import encoding, public
from github.GithubObject import Attribute, CompletableGithubObject, NotSet
def encrypt(public_key: str, secret_value: str) -> str:
"""
Encrypt a Unicode string using the public key.
"""
pk = public.PublicKey(public_key.encode("utf-8"), encoding.Base64Encoder)
sealed_box = public.SealedBox(pk)
encrypted = sealed_box.encrypt(secret_value.encode("utf-8"))
return b64encode(encrypted).decode("utf-8")
class PublicKey(CompletableGithubObject):
"""
This class represents either an organization public key or a repository public key.
The reference can be found here
https://docs.github.com/en/rest/reference/actions#get-an-organization-public-key
or here
https://docs.github.com/en/rest/reference/actions#get-a-repository-public-key
"""
def _initAttributes(self) -> None:
self._key_id: Attribute[str | int] = NotSet
self._key: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"key_id": self._key_id.value, "key": self._key.value})
@property
def key(self) -> str:
self._completeIfNotSet(self._key)
return self._key.value
@property
def key_id(self) -> str | int:
self._completeIfNotSet(self._key_id)
return self._key_id.value
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "key" in attributes: # pragma no branch
self._key = self._makeStringAttribute(attributes["key"])
if "key_id" in attributes: # pragma no branch
if isinstance(attributes["key_id"], str):
self._key_id = self._makeStringAttribute(attributes["key_id"])
else:
self._key_id = self._makeIntAttribute(attributes["key_id"])
def encrypt(self, unencrypted_value: str) -> str:
return encrypt(self._key.value, unencrypted_value)
| 5,256
|
Python
|
.py
| 84
| 58.571429
| 92
| 0.543251
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|
12,399
|
Deployment.py
|
PyGithub_PyGithub/github/Deployment.py
|
############################ Copyrights and license ############################
# #
# Copyright 2012 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2012 Zearin <zearin@gonk.net> #
# Copyright 2013 AKFish <akfish@gmail.com> #
# Copyright 2013 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2014 Vincent Jacques <vincent@vincent-jacques.net> #
# Copyright 2015 Matt Babineau <mbabineau@dataxu.com> #
# Copyright 2016 Jannis Gebauer <ja.geb@me.com> #
# Copyright 2016 Martijn Koster <mak-github@greenhills.co.uk> #
# Copyright 2016 Peter Buckley <dx-pbuckley@users.noreply.github.com> #
# Copyright 2018 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2018 sfdye <tsfdye@gmail.com> #
# Copyright 2019 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2019 Wan Liuyang <tsfdye@gmail.com> #
# Copyright 2020 Colby Gallup <colbygallup@gmail.com> #
# Copyright 2020 Pascal Hofmann <mail@pascalhofmann.de> #
# Copyright 2020 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2021 Steve Kowalik <steven@wedontsleep.org> #
# Copyright 2023 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2023 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# Copyright 2023 Nevins <nevins-b@users.noreply.github.com> #
# Copyright 2023 Trim21 <trim21.me@gmail.com> #
# Copyright 2024 Enrico Minack <github@enrico.minack.dev> #
# Copyright 2024 Jirka Borovec <6035284+Borda@users.noreply.github.com> #
# #
# This file is part of PyGithub. #
# http://pygithub.readthedocs.io/ #
# #
# PyGithub is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Lesser General Public License as published by the Free #
# Software Foundation, either version 3 of the License, or (at your option) #
# any later version. #
# #
# PyGithub is distributed in the hope that it will be useful, but WITHOUT ANY #
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS #
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more #
# details. #
# #
# You should have received a copy of the GNU Lesser General Public License #
# along with PyGithub. If not, see <http://www.gnu.org/licenses/>. #
# #
################################################################################
from __future__ import annotations
from datetime import datetime
from typing import Any
import github.Consts
import github.DeploymentStatus
import github.NamedUser
from github.GithubObject import Attribute, CompletableGithubObject, NotSet, Opt
from github.PaginatedList import PaginatedList
class Deployment(CompletableGithubObject):
"""
This class represents Deployments.
The reference can be found here
https://docs.github.com/en/rest/reference/repos#deployments
"""
def _initAttributes(self) -> None:
self._id: Attribute[int] = NotSet
self._ref: Attribute[str] = NotSet
self._url: Attribute[str] = NotSet
self._sha: Attribute[str] = NotSet
self._task: Attribute[str] = NotSet
self._payload: Attribute[dict[str, Any]] = NotSet
self._original_environment: Attribute[str] = NotSet
self._environment: Attribute[str] = NotSet
self._production_environment: Attribute[bool] = NotSet
self._transient_environment: Attribute[bool] = NotSet
self._description: Attribute[str] = NotSet
self._creator: Attribute[github.NamedUser.NamedUser] = NotSet
self._created_at: Attribute[datetime] = NotSet
self._updated_at: Attribute[datetime | None] = NotSet
self._statuses_url: Attribute[str] = NotSet
self._repository_url: Attribute[str] = NotSet
def __repr__(self) -> str:
return self.get__repr__({"id": self._id.value, "url": self._url.value})
@property
def id(self) -> int:
self._completeIfNotSet(self._id)
return self._id.value
@property
def ref(self) -> str:
self._completeIfNotSet(self._ref)
return self._ref.value
@property
def url(self) -> str:
self._completeIfNotSet(self._url)
return self._url.value
@property
def sha(self) -> str:
self._completeIfNotSet(self._sha)
return self._sha.value
@property
def task(self) -> str:
self._completeIfNotSet(self._task)
return self._task.value
@property
def payload(self) -> dict[str, Any]:
self._completeIfNotSet(self._payload)
return self._payload.value
@property
def original_environment(self) -> str:
self._completeIfNotSet(self._original_environment)
return self._original_environment.value
@property
def environment(self) -> str:
self._completeIfNotSet(self._environment)
return self._environment.value
@property
def production_environment(self) -> bool:
self._completeIfNotSet(self._production_environment)
return self._production_environment.value
@property
def transient_environment(self) -> bool:
self._completeIfNotSet(self._transient_environment)
return self._transient_environment.value
@property
def description(self) -> str:
self._completeIfNotSet(self._description)
return self._description.value
@property
def creator(self) -> github.NamedUser.NamedUser:
self._completeIfNotSet(self._creator)
return self._creator.value
@property
def created_at(self) -> datetime:
self._completeIfNotSet(self._created_at)
return self._created_at.value
@property
def updated_at(self) -> datetime | None:
self._completeIfNotSet(self._updated_at)
return self._updated_at.value
@property
def statuses_url(self) -> str:
self._completeIfNotSet(self._statuses_url)
return self._statuses_url.value
@property
def repository_url(self) -> str:
self._completeIfNotSet(self._repository_url)
return self._repository_url.value
def get_statuses(self) -> PaginatedList[github.DeploymentStatus.DeploymentStatus]:
"""
:calls: `GET /repos/{owner}/deployments/{deployment_id}/statuses <https://docs.github.com/en/rest/reference/repos#list-deployments>`_
"""
return PaginatedList(
github.DeploymentStatus.DeploymentStatus,
self._requester,
f"{self.url}/statuses",
None,
headers={"Accept": self._get_accept_header()},
)
def get_status(self, id_: int) -> github.DeploymentStatus.DeploymentStatus:
"""
:calls: `GET /repos/{owner}/deployments/{deployment_id}/statuses/{status_id} <https://docs.github.com/en/rest/reference/repos#get-a-deployment>`_
"""
assert isinstance(id_, int), id_
headers, data = self._requester.requestJsonAndCheck(
"GET",
f"{self.url}/statuses/{id_}",
headers={"Accept": self._get_accept_header()},
)
return github.DeploymentStatus.DeploymentStatus(self._requester, headers, data, completed=True)
def create_status(
self,
state: str,
target_url: Opt[str] = NotSet,
description: Opt[str] = NotSet,
environment: Opt[str] = NotSet,
environment_url: Opt[str] = NotSet,
auto_inactive: Opt[bool] = NotSet,
) -> github.DeploymentStatus.DeploymentStatus:
"""
:calls: `POST /repos/{owner}/{repo}/deployments/{deployment_id}/statuses <https://docs.github.com/en/rest/reference/repos#create-a-deployment-status>`_
"""
assert isinstance(state, str), state
assert target_url is NotSet or isinstance(target_url, str), target_url
assert description is NotSet or isinstance(description, str), description
assert environment is NotSet or isinstance(environment, str), environment
assert environment_url is NotSet or isinstance(environment_url, str), environment_url
assert auto_inactive is NotSet or isinstance(auto_inactive, bool), auto_inactive
post_parameters = NotSet.remove_unset_items(
{
"state": state,
"target_url": target_url,
"description": description,
"environment": environment,
"environment_url": environment_url,
"auto_inactive": auto_inactive,
}
)
headers, data = self._requester.requestJsonAndCheck(
"POST",
f"{self.url}/statuses",
input=post_parameters,
headers={"Accept": self._get_accept_header()},
)
return github.DeploymentStatus.DeploymentStatus(self._requester, headers, data, completed=True)
@staticmethod
def _get_accept_header() -> str:
return ", ".join(
[
github.Consts.deploymentEnhancementsPreview,
github.Consts.deploymentStatusEnhancementsPreview,
]
)
def _useAttributes(self, attributes: dict[str, Any]) -> None:
if "id" in attributes: # pragma no branch
self._id = self._makeIntAttribute(attributes["id"])
if "production_environment" in attributes: # pragma no branch
self._production_environment = self._makeBoolAttribute(attributes["production_environment"])
if "ref" in attributes: # pragma no branch
self._ref = self._makeStringAttribute(attributes["ref"])
if "transient_environment" in attributes: # pragma no branch
self._transient_environment = self._makeBoolAttribute(attributes["transient_environment"])
if "url" in attributes: # pragma no branch
self._url = self._makeStringAttribute(attributes["url"])
if "sha" in attributes: # pragma no branch
self._sha = self._makeStringAttribute(attributes["sha"])
if "task" in attributes: # pragma no branch
self._task = self._makeStringAttribute(attributes["task"])
if "payload" in attributes: # pragma no branch
self._payload = self._makeDictAttribute(attributes["payload"])
if "original_environment" in attributes: # pragma no branch
self._original_environment = self._makeStringAttribute(attributes["original_environment"])
if "environment" in attributes: # pragma no branch
self._environment = self._makeStringAttribute(attributes["environment"])
if "description" in attributes: # pragma no branch
self._description = self._makeStringAttribute(attributes["description"])
if "creator" in attributes: # pragma no branch
self._creator = self._makeClassAttribute(github.NamedUser.NamedUser, attributes["creator"])
if "created_at" in attributes: # pragma no branch
self._created_at = self._makeDatetimeAttribute(attributes["created_at"])
if "updated_at" in attributes: # pragma no branch
self._updated_at = self._makeDatetimeAttribute(attributes["updated_at"])
if "statuses_url" in attributes: # pragma no branch
self._statuses_url = self._makeStringAttribute(attributes["statuses_url"])
if "repository_url" in attributes: # pragma no branch
self._repository_url = self._makeStringAttribute(attributes["repository_url"])
| 12,542
|
Python
|
.py
| 238
| 45.277311
| 159
| 0.601206
|
PyGithub/PyGithub
| 6,892
| 1,756
| 334
|
LGPL-3.0
|
9/5/2024, 5:11:50 PM (Europe/Amsterdam)
|