sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def get_orders(self, instrument=None, count=50):
"""
See more:
http://developer.oanda.com/rest-live/orders/#getOrdersForAnAccount
"""
url = "{0}/{1}/accounts/{2}/orders".format(
self.domain,
self.API_VERSION,
self.account_id
)
params = {"instrument": instrument, "count": count}
try:
return self._Client__call(uri=url, params=params, method="get")
except RequestException:
return False
except AssertionError:
return False
|
See more:
http://developer.oanda.com/rest-live/orders/#getOrdersForAnAccount
|
entailment
|
def get_order(self, order_id):
"""
See more:
http://developer.oanda.com/rest-live/orders/#getInformationForAnOrder
"""
url = "{0}/{1}/accounts/{2}/orders/{3}".format(
self.domain,
self.API_VERSION,
self.account_id,
order_id
)
try:
return self._Client__call(uri=url, method="get")
except RequestException:
return False
except AssertionError:
return False
|
See more:
http://developer.oanda.com/rest-live/orders/#getInformationForAnOrder
|
entailment
|
def create_order(self, order):
"""
See more:
http://developer.oanda.com/rest-live/orders/#createNewOrder
"""
url = "{0}/{1}/accounts/{2}/orders".format(
self.domain,
self.API_VERSION,
self.account_id
)
try:
return self._Client__call(
uri=url,
params=order.__dict__,
method="post"
)
except RequestException:
return False
except AssertionError:
return False
|
See more:
http://developer.oanda.com/rest-live/orders/#createNewOrder
|
entailment
|
def get_trades(self, max_id=None, count=None, instrument=None, ids=None):
""" Get a list of open trades
Parameters
----------
max_id : int
The server will return trades with id less than or equal
to this, in descending order (for pagination)
count : int
Maximum number of open trades to return. Default: 50 Max
value: 500
instrument : str
Retrieve open trades for a specific instrument only
Default: all
ids : list
A list of trades to retrieve. Maximum number of ids: 50.
No other parameter may be specified with the ids
parameter.
See more:
http://developer.oanda.com/rest-live/trades/#getListOpenTrades
"""
url = "{0}/{1}/accounts/{2}/trades".format(
self.domain,
self.API_VERSION,
self.account_id
)
params = {
"maxId": int(max_id) if max_id and max_id > 0 else None,
"count": int(count) if count and count > 0 else None,
"instrument": instrument,
"ids": ','.join(ids) if ids else None
}
try:
return self._Client__call(uri=url, params=params, method="get")
except RequestException:
return False
except AssertionError:
return False
|
Get a list of open trades
Parameters
----------
max_id : int
The server will return trades with id less than or equal
to this, in descending order (for pagination)
count : int
Maximum number of open trades to return. Default: 50 Max
value: 500
instrument : str
Retrieve open trades for a specific instrument only
Default: all
ids : list
A list of trades to retrieve. Maximum number of ids: 50.
No other parameter may be specified with the ids
parameter.
See more:
http://developer.oanda.com/rest-live/trades/#getListOpenTrades
|
entailment
|
def update_trade(
self,
trade_id,
stop_loss=None,
take_profit=None,
trailing_stop=None
):
""" Modify an existing trade.
Note: Only the specified parameters will be modified. All
other parameters will remain unchanged. To remove an
optional parameter, set its value to 0.
Parameters
----------
trade_id : int
The id of the trade to modify.
stop_loss : number
Stop Loss value.
take_profit : number
Take Profit value.
trailing_stop : number
Trailing Stop distance in pips, up to one decimal place
See more:
http://developer.oanda.com/rest-live/trades/#modifyExistingTrade
"""
url = "{0}/{1}/accounts/{2}/trades/{3}".format(
self.domain,
self.API_VERSION,
self.account_id,
trade_id
)
params = {
"stopLoss": stop_loss,
"takeProfit": take_profit,
"trailingStop": trailing_stop
}
try:
return self._Client__call(uri=url, params=params, method="patch")
except RequestException:
return False
except AssertionError:
return False
raise NotImplementedError()
|
Modify an existing trade.
Note: Only the specified parameters will be modified. All
other parameters will remain unchanged. To remove an
optional parameter, set its value to 0.
Parameters
----------
trade_id : int
The id of the trade to modify.
stop_loss : number
Stop Loss value.
take_profit : number
Take Profit value.
trailing_stop : number
Trailing Stop distance in pips, up to one decimal place
See more:
http://developer.oanda.com/rest-live/trades/#modifyExistingTrade
|
entailment
|
def request_transaction_history(self):
""" Request full account history.
Submit a request for a full transaction history. A
successfully accepted submission results in a response
containing a URL in the Location header to a file that will
be available once the request is served. Response for the
URL will be HTTP 404 until the file is ready. Once served
the URL will be valid for a certain amount of time.
See more:
http://developer.oanda.com/rest-live/transaction-history/#getFullAccountHistory
http://developer.oanda.com/rest-live/transaction-history/#transactionTypes
"""
url = "{0}/{1}/accounts/{2}/alltransactions".format(
self.domain,
self.API_VERSION,
self.account_id
)
try:
resp = self.__get_response(url)
return resp.headers['location']
except RequestException:
return False
except AssertionError:
return False
|
Request full account history.
Submit a request for a full transaction history. A
successfully accepted submission results in a response
containing a URL in the Location header to a file that will
be available once the request is served. Response for the
URL will be HTTP 404 until the file is ready. Once served
the URL will be valid for a certain amount of time.
See more:
http://developer.oanda.com/rest-live/transaction-history/#getFullAccountHistory
http://developer.oanda.com/rest-live/transaction-history/#transactionTypes
|
entailment
|
def get_transaction_history(self, max_wait=5.0):
""" Download full account history.
Uses request_transaction_history to get the transaction
history URL, then polls the given URL until it's ready (or
the max_wait time is reached) and provides the decoded
response.
Parameters
----------
max_wait : float
The total maximum time to spend waiting for the file to
be ready; if this is exceeded a failed response will be
returned. This is not guaranteed to be strictly
followed, as one last attempt will be made to check the
file before giving up.
See more:
http://developer.oanda.com/rest-live/transaction-history/#getFullAccountHistory
http://developer.oanda.com/rest-live/transaction-history/#transactionTypes
"""
url = self.request_transaction_history()
if not url:
return False
ready = False
start = time()
delay = 0.1
while not ready and delay:
response = requests.head(url)
ready = response.ok
if not ready:
sleep(delay)
time_remaining = max_wait - time() + start
max_delay = max(0., time_remaining - .1)
delay = min(delay * 2, max_delay)
if not ready:
return False
response = requests.get(url)
try:
with ZipFile(BytesIO(response.content)) as container:
files = container.namelist()
if not files:
log.error('Transaction ZIP has no files.')
return False
history = container.open(files[0])
raw = history.read().decode('ascii')
except BadZipfile:
log.error('Response is not a valid ZIP file', exc_info=True)
return False
return json.loads(raw, **self.json_options)
|
Download full account history.
Uses request_transaction_history to get the transaction
history URL, then polls the given URL until it's ready (or
the max_wait time is reached) and provides the decoded
response.
Parameters
----------
max_wait : float
The total maximum time to spend waiting for the file to
be ready; if this is exceeded a failed response will be
returned. This is not guaranteed to be strictly
followed, as one last attempt will be made to check the
file before giving up.
See more:
http://developer.oanda.com/rest-live/transaction-history/#getFullAccountHistory
http://developer.oanda.com/rest-live/transaction-history/#transactionTypes
|
entailment
|
def create_account(self, currency=None):
""" Create a new account.
This call is only available on the sandbox system. Please
create accounts on fxtrade.oanda.com on our production
system.
See more:
http://developer.oanda.com/rest-sandbox/accounts/#-a-name-createtestaccount-a-create-a-test-account
"""
url = "{0}/{1}/accounts".format(self.domain, self.API_VERSION)
params = {"currency": currency}
try:
return self._Client__call(uri=url, params=params, method="post")
except RequestException:
return False
except AssertionError:
return False
|
Create a new account.
This call is only available on the sandbox system. Please
create accounts on fxtrade.oanda.com on our production
system.
See more:
http://developer.oanda.com/rest-sandbox/accounts/#-a-name-createtestaccount-a-create-a-test-account
|
entailment
|
def get_accounts(self, username=None):
""" Get a list of accounts owned by the user.
Parameters
----------
username : string
The name of the user. Note: This is only required on the
sandbox, on production systems your access token will
identify you.
See more:
http://developer.oanda.com/rest-sandbox/accounts/#-a-name-getaccountsforuser-a-get-accounts-for-a-user
"""
url = "{0}/{1}/accounts".format(self.domain, self.API_VERSION)
params = {"username": username}
try:
return self._Client__call(uri=url, params=params, method="get")
except RequestException:
return False
except AssertionError:
return False
|
Get a list of accounts owned by the user.
Parameters
----------
username : string
The name of the user. Note: This is only required on the
sandbox, on production systems your access token will
identify you.
See more:
http://developer.oanda.com/rest-sandbox/accounts/#-a-name-getaccountsforuser-a-get-accounts-for-a-user
|
entailment
|
def choose(self):
"""Marks the item as the one the user is in."""
if not self.choosed:
self.choosed = True
self.pos = self.pos + Sep(5, 0)
|
Marks the item as the one the user is in.
|
entailment
|
def stop_choose(self):
"""Marks the item as the one the user is not in."""
if self.choosed:
self.choosed = False
self.pos = self.pos + Sep(-5, 0)
|
Marks the item as the one the user is not in.
|
entailment
|
def get_darker_color(self):
"""The color of the clicked version of the MenuElement. Darker than the normal one."""
# we change a bit the color in one direction
if bw_contrasted(self._true_color, 30) == WHITE:
color = mix(self._true_color, WHITE, 0.9)
else:
color = mix(self._true_color, BLACK, 0.9)
return color
|
The color of the clicked version of the MenuElement. Darker than the normal one.
|
entailment
|
def render(self, screen):
"""Renders the MenuElement"""
self.rect.render(screen)
super(MenuElement, self).render(screen)
|
Renders the MenuElement
|
entailment
|
def gui():
"""Main function"""
# #######
# setup all objects
# #######
zones = [ALL]
last_zones = []
COLORS.remove(WHITE)
screen = pygame.display.set_mode(SCREEN_SIZE, DOUBLEBUF)
pygame.display.set_caption('Bezier simulator')
pygame.event.set_allowed([QUIT, KEYDOWN, MOUSEBUTTONDOWN])
points = [
(40, 40),
(100, 400),
(200, 100),
(650, 420)
]
bezier = Bezier((0, 0), SCREEN_SIZE, points, ORANGE, 8)
points = [Point(p, 24, choice(COLORS)) for p in points]
clock = pygame.time.Clock()
fps = FPSIndicator(clock)
dragging = None
render = True
while True:
# #######
# Input loop
# #######
mouse = pygame.mouse.get_pos()
for e in pygame.event.get():
if e.type == QUIT:
return 0
elif e.type == KEYDOWN:
if e.key == K_ESCAPE:
return 0
if e.key == K_F4 and e.mod & KMOD_ALT:
return 0
elif e.type == MOUSEBUTTONDOWN:
if e.button == 1:
dragging = not dragging
if e.button == 3:
points.append(Point(mouse, 24, choice(COLORS)))
bezier.points.append(V2(mouse))
render = True
if dragging:
mdist = 10000
the_p = None
for i, p in enumerate(points):
if p.dist_to(mouse) < mdist:
mdist = p.dist_to(mouse)
the_p = i
render = points[the_p].pos != mouse
points[the_p].pos = mouse
bezier.points[the_p] = V2(mouse)
# #######
# Draw all
# #######
if render:
render = False
screen.fill(WHITE)
bezier.render(screen)
for p in points:
p.render(screen)
zones.append(ALL)
_ = fps.render(screen)
zones.append(_)
pygame.display.update(zones + last_zones)
last_zones = zones[:]
zones.clear()
clock.tick(FPS)
|
Main function
|
entailment
|
def gui():
"""Main function"""
# #######
# setup all objects
# #######
os.environ['SDL_VIDEO_CENTERED'] = '1'
clock = pygame.time.Clock()
screen = pygame.display.set_mode(SCREEN_SIZE, DOUBLEBUF | NOFRAME)
pygame.event.set_allowed([QUIT, KEYDOWN, MOUSEBUTTONDOWN])
game = Morpion()
run = True
while run:
# #######
# Input loop
# #######
mouse = pygame.mouse.get_pos()
for e in pygame.event.get():
if e.type == QUIT:
run = False
elif e.type == KEYDOWN:
if e.key == K_ESCAPE:
run = False
if e.key == K_F4 and e.mod & KMOD_ALT:
return 0
elif e.type == MOUSEBUTTONDOWN:
if e.button == 1:
if pos_from_mouse(mouse):
if game.is_full() or game.is_won():
game = Morpion()
continue
x, y = pos_from_mouse(mouse)
try:
game.play(x, y)
except IndexError:
pass
if pos_from_mouse(mouse):
x, y = pos_from_mouse(mouse)
game.hint(x, y)
# #######
# Draw all
# #######
screen.fill(WHITE)
game.render(screen)
pygame.display.update()
clock.tick(FPS)
|
Main function
|
entailment
|
def px_to_pt(self, px):
"""Convert a size in pxel to a size in points."""
if px < 200:
pt = self.PX_TO_PT[px]
else:
pt = int(floor((px - 1.21) / 1.332))
return pt
|
Convert a size in pxel to a size in points.
|
entailment
|
def set_size(self, pt=None, px=None):
"""
Set the size of the font, in px or pt.
The px method is a bit inacurate, there can be one or two px less, and max 4 for big numbers (like 503)
but the size is never over-estimated. It makes almost the good value.
"""
assert (pt, px) != (None, None)
if pt is not None:
self.__init__(pt, self.font_name)
else:
self.__init__(self.px_to_pt(px), self.font_name)
|
Set the size of the font, in px or pt.
The px method is a bit inacurate, there can be one or two px less, and max 4 for big numbers (like 503)
but the size is never over-estimated. It makes almost the good value.
|
entailment
|
def text(self):
"""Return the string to render."""
if callable(self._text):
return str(self._text())
return str(self._text)
|
Return the string to render.
|
entailment
|
def color(self, value):
"""Set the color to a new value (tuple). Renders the text if needed."""
if value != self.color:
self._color = value
self._render()
|
Set the color to a new value (tuple). Renders the text if needed.
|
entailment
|
def bg_color(self, value):
"""Sets the color to a new value (tuple). Renders the text if needed."""
if value != self.bg_color:
self._bg_color = value
self._render()
|
Sets the color to a new value (tuple). Renders the text if needed.
|
entailment
|
def set_font_size(self, pt=None, px=None):
"""Set the font size to the desired size, in pt or px."""
self.font.set_size(pt, px)
self._render()
|
Set the font size to the desired size, in pt or px.
|
entailment
|
def _render(self):
"""
Render the text.
Avoid using this fonction too many time as it is slow as it is low to render text and blit it.
"""
self._last_text = self.text
self._surface = self.font.render(self.text, True, self.color, self.bg_color)
rect = self._surface.get_rect()
self.size = rect.size
|
Render the text.
Avoid using this fonction too many time as it is slow as it is low to render text and blit it.
|
entailment
|
def render(self, display):
"""Render basicly the text."""
# to handle changing objects / callable
if self.text != self._last_text:
self._render()
display.blit(self._surface, (self.topleft, self.size))
|
Render basicly the text.
|
entailment
|
def cursor(self):
"""The position of the cursor in the text."""
if self._cursor < 0:
self.cursor = 0
if self._cursor > len(self):
self.cursor = len(self)
return self._cursor
|
The position of the cursor in the text.
|
entailment
|
def move_cursor_one_letter(self, letter=RIGHT):
"""Move the cursor of one letter to the right (1) or the the left."""
assert letter in (self.RIGHT, self.LEFT)
if letter == self.RIGHT:
self.cursor += 1
if self.cursor > len(self.text):
self.cursor -= 1
else:
self.cursor -= 1
if self.cursor < 0:
self.cursor += 1
|
Move the cursor of one letter to the right (1) or the the left.
|
entailment
|
def move_cursor_one_word(self, word=LEFT):
"""Move the cursor of one word to the right (1) or the the left (-1)."""
assert word in (self.RIGHT, self.LEFT)
if word == self.RIGHT:
papy = self.text.find(' ', self.cursor) + 1
if not papy:
papy = len(self)
self.cursor = papy
else:
papy = self.text.rfind(' ', 0, self.cursor)
if papy == -1:
papy = 0
self.cursor = papy
|
Move the cursor of one word to the right (1) or the the left (-1).
|
entailment
|
def delete_one_letter(self, letter=RIGHT):
"""Delete one letter the right or the the left of the cursor."""
assert letter in (self.RIGHT, self.LEFT)
if letter == self.LEFT:
papy = self.cursor
self.text = self.text[:self.cursor - 1] + self.text[self.cursor:]
self.cursor = papy - 1
else:
self.text = self.text[:self.cursor] + self.text[self.cursor + 1:]
|
Delete one letter the right or the the left of the cursor.
|
entailment
|
def delete_one_word(self, word=RIGHT):
"""Delete one word the right or the the left of the cursor."""
assert word in (self.RIGHT, self.LEFT)
if word == self.RIGHT:
papy = self.text.find(' ', self.cursor) + 1
if not papy:
papy = len(self.text)
self.text = self.text[:self.cursor] + self.text[papy:]
else:
papy = self.text.rfind(' ', 0, self.cursor)
if papy == -1:
papy = 0
self.text = self.text[:papy] + self.text[self.cursor:]
self.cursor = papy
|
Delete one word the right or the the left of the cursor.
|
entailment
|
def add_letter(self, letter):
"""Add a letter at the cursor pos."""
assert isinstance(letter, str)
assert len(letter) == 1
self.text = self.text[:self.cursor] + letter + self.text[self.cursor:]
self.cursor += 1
|
Add a letter at the cursor pos.
|
entailment
|
def update(self, event_or_list):
"""Update the text and position of cursor according to the event passed."""
event_or_list = super().update(event_or_list)
for e in event_or_list:
if e.type == KEYDOWN:
if e.key == K_RIGHT:
if e.mod * KMOD_CTRL:
self.move_cursor_one_word(self.RIGHT)
else:
self.move_cursor_one_letter(self.RIGHT)
elif e.key == K_LEFT:
if e.mod * KMOD_CTRL:
self.move_cursor_one_word(self.LEFT)
else:
self.move_cursor_one_letter(self.LEFT)
elif e.key == K_BACKSPACE:
if self.cursor == 0:
continue
if e.mod & KMOD_CTRL:
self.delete_one_word(self.LEFT)
else:
self.delete_one_letter(self.LEFT)
elif e.key == K_DELETE:
if e.mod & KMOD_CTRL:
self.delete_one_word(self.RIGHT)
else:
self.delete_one_letter(self.RIGHT)
elif e.unicode != '' and e.unicode.isprintable():
self.add_letter(e.unicode)
|
Update the text and position of cursor according to the event passed.
|
entailment
|
def _render(self):
"""
Render the text.
Avoid using this fonction too many times as it is slow as it is slow to render text and blit it.
"""
self._last_text = self.text
self._surface = self.font.render(self.text, True, self.color, self.bg_color)
size = self.width, self._surface.get_height()
self.size = size
|
Render the text.
Avoid using this fonction too many times as it is slow as it is slow to render text and blit it.
|
entailment
|
def shawn_text(self):
"""The text displayed instead of the real one."""
if len(self._shawn_text) == len(self):
return self._shawn_text
if self.style == self.DOTS:
return chr(0x2022) * len(self)
ranges = [
(902, 1366),
(192, 683),
(33, 122)
]
s = ''
while len(s) < len(self.text):
apolo = randint(33, 1366)
for a, b in ranges:
if a <= apolo <= b:
s += chr(apolo)
break
self._shawn_text = s
return s
|
The text displayed instead of the real one.
|
entailment
|
def cursor_pos(self):
"""The cursor position in pixels."""
if len(self) == 0:
return self.left + self.default_text.get_width()
papy = self._surface.get_width()
if papy > self.w:
shift = papy - self.width
else:
shift = 0
return self.left + self.font.size(self.shawn_text[:self.cursor])[0] - shift
|
The cursor position in pixels.
|
entailment
|
def _render(self):
"""
Render the text.
Avoid using this fonction too many times as it is slow as it is slow to render text and blit it.
"""
self._last_text = self.shawn_text
self._surface = self.font.render(self.shawn_text, True, self.color, self.bg_color)
size = self.w, self._surface.get_height()
self.size = size
|
Render the text.
Avoid using this fonction too many times as it is slow as it is slow to render text and blit it.
|
entailment
|
def render(self, display):
"""Render basicly the text."""
# to handle changing objects / callable
if self.shawn_text != self._last_text:
self._render()
if self.text:
papy = self._surface.get_width()
if papy <= self.width:
display.blit(self._surface, (self.topleft, self.size))
else:
display.blit(self._surface, (self.topleft, self.size), ((papy - self.w, 0), self.size))
else:
display.blit(self.default_text, (self.topleft, self.size))
if self._focus:
groom = self.cursor_pos()
line(display, (groom, self.top), (groom, self.bottom), CONCRETE)
|
Render basicly the text.
|
entailment
|
def latex_to_img(tex):
"""Return a pygame image from a latex template."""
with tempfile.TemporaryDirectory() as tmpdirname:
with open(tmpdirname + r'\tex.tex', 'w') as f:
f.write(tex)
os.system(r"latex {0}\tex.tex -halt-on-error -interaction=batchmode -disable-installer -aux-directory={0} "
r"-output-directory={0}".format(tmpdirname))
os.system(r"dvipng -T tight -z 9 --truecolor -o {0}\tex.png {0}\tex.dvi".format(tmpdirname))
# os.system(r'latex2png ' + tmpdirname)
image = pygame.image.load(tmpdirname + r'\tex.png')
return image
|
Return a pygame image from a latex template.
|
entailment
|
def mix(color1, color2, pos=0.5):
"""
Return the mix of two colors at a state of :pos:
Retruns color1 * pos + color2 * (1 - pos)
"""
opp_pos = 1 - pos
red = color1[0] * pos + color2[0] * opp_pos
green = color1[1] * pos + color2[1] * opp_pos
blue = color1[2] * pos + color2[2] * opp_pos
return int(red), int(green), int(blue)
|
Return the mix of two colors at a state of :pos:
Retruns color1 * pos + color2 * (1 - pos)
|
entailment
|
def name2rgb(name):
"""Convert the name of a color into its RGB value"""
try:
import colour
except ImportError:
raise ImportError('You need colour to be installed: pip install colour')
c = colour.Color(name)
color = int(c.red * 255), int(c.green * 255), int(c.blue * 255)
return color
|
Convert the name of a color into its RGB value
|
entailment
|
def parse_page(page):
"""Parse the command man page."""
colors = get_config()['colors']
with io.open(page, encoding='utf-8') as f:
lines = f.readlines()
output_lines = []
for line in lines[1:]:
if is_headline(line):
continue
elif is_description(line):
output_lines.append(click.style(line.replace('>', ' '),
fg=colors['description']))
elif is_old_usage(line):
output_lines.append(click.style(line, fg=colors['usage']))
elif is_code_example(line):
line = ' ' + line if line.startswith('`') else line[2:]
output_lines.append(click.style(line.replace('`', ''),
fg=colors['command']))
elif is_line_break(line):
output_lines.append(click.style(line))
else:
output_lines.append(click.style('- ' + line, fg=colors['usage']))
return output_lines
|
Parse the command man page.
|
entailment
|
def configure_logging(level=logging.DEBUG):
"""Configure the module logging engine."""
if level == logging.DEBUG:
# For debugging purposes, log from everyone!
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s - %(levelname)s - %(message)s'
)
return logging
logger = logging.getLogger(__name__)
logger.setLevel(level)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
ch = logging.StreamHandler()
ch.setLevel(level)
ch.setFormatter(formatter)
logger.addHandler(ch)
return logger
|
Configure the module logging engine.
|
entailment
|
def path_join(*args):
"""
Wrapper around `os.path.join`.
Makes sure to join paths of the same type (bytes).
"""
args = (paramiko.py3compat.u(arg) for arg in args)
return os.path.join(*args)
|
Wrapper around `os.path.join`.
Makes sure to join paths of the same type (bytes).
|
entailment
|
def parse_username_password_hostname(remote_url):
"""
Parse a command line string and return username, password, remote hostname and remote path.
:param remote_url: A command line string.
:return: A tuple, containing username, password, remote hostname and remote path.
"""
assert remote_url
assert ':' in remote_url
if '@' in remote_url:
username, hostname = remote_url.rsplit('@', 1)
else:
username, hostname = None, remote_url
hostname, remote_path = hostname.split(':', 1)
password = None
if username and ':' in username:
username, password = username.split(':', 1)
assert hostname
assert remote_path
return username, password, hostname, remote_path
|
Parse a command line string and return username, password, remote hostname and remote path.
:param remote_url: A command line string.
:return: A tuple, containing username, password, remote hostname and remote path.
|
entailment
|
def get_ssh_agent_keys(logger):
"""
Ask the SSH agent for a list of keys, and return it.
:return: A reference to the SSH agent and a list of keys.
"""
agent, agent_keys = None, None
try:
agent = paramiko.agent.Agent()
_agent_keys = agent.get_keys()
if not _agent_keys:
agent.close()
logger.error(
"SSH agent didn't provide any valid key. Trying to continue..."
)
else:
agent_keys = tuple(k for k in _agent_keys)
except paramiko.SSHException:
if agent:
agent.close()
agent = None
logger.error("SSH agent speaks a non-compatible protocol. Ignoring it.")
finally:
return agent, agent_keys
|
Ask the SSH agent for a list of keys, and return it.
:return: A reference to the SSH agent and a list of keys.
|
entailment
|
def create_parser():
"""Create the CLI argument parser."""
parser = argparse.ArgumentParser(
description='Sync a local and a remote folder through SFTP.'
)
parser.add_argument(
"path",
type=str,
metavar="local-path",
help="the path of the local folder",
)
parser.add_argument(
"remote",
type=str,
metavar="user[:password]@hostname:remote-path",
help="the ssh-url ([user[:password]@]hostname:remote-path) of the remote folder. "
"The hostname can be specified as a ssh_config's hostname too. "
"Every missing information will be gathered from there",
)
parser.add_argument(
"-k",
"--key",
metavar="identity-path",
action="append",
help="private key identity path (defaults to ~/.ssh/id_rsa)"
)
parser.add_argument(
"-l",
"--logging",
choices=['CRITICAL',
'ERROR',
'WARNING',
'INFO',
'DEBUG',
'NOTSET'],
default='ERROR',
help="set logging level"
)
parser.add_argument(
"-p",
"--port",
default=22,
type=int,
help="SSH remote port (defaults to 22)"
)
parser.add_argument(
"-f",
"--fix-symlinks",
action="store_true",
help="fix symbolic links on remote side"
)
parser.add_argument(
"-a",
"--ssh-agent",
action="store_true",
help="enable ssh-agent support"
)
parser.add_argument(
"-c",
"--ssh-config",
metavar="ssh_config path",
default="~/.ssh/config",
type=str,
help="path to the ssh-configuration file (default to ~/.ssh/config)"
)
parser.add_argument(
"-n",
"--known-hosts",
metavar="known_hosts path",
default="~/.ssh/known_hosts",
type=str,
help="path to the openSSH known_hosts file"
)
parser.add_argument(
"-d",
"--disable-known-hosts",
action="store_true",
help="disable known_hosts fingerprint checking (security warning!)"
)
parser.add_argument(
"-e",
"--exclude-from",
metavar="exclude-from-file-path",
type=str,
help="exclude files matching pattern in exclude-from-file-path"
)
parser.add_argument(
"-t",
"--do-not-delete",
action="store_true",
help="do not delete remote files missing from local folder"
)
parser.add_argument(
"-o",
"--allow-unknown",
action="store_true",
help="allow connection to unknown hosts"
)
parser.add_argument(
"-r",
"--create-remote-directory",
action="store_true",
help="Create remote base directory if missing on remote"
)
return parser
|
Create the CLI argument parser.
|
entailment
|
def main(args=None):
"""The main."""
parser = create_parser()
args = vars(parser.parse_args(args))
log_mapping = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTSET': logging.NOTSET,
}
log_level = log_mapping[args['logging']]
del(args['logging'])
global logger
logger = configure_logging(log_level)
args_mapping = {
"path": "local_path",
"remote": "remote_url",
"ssh_config": "ssh_config_path",
"exclude_from": "exclude_file",
"known_hosts": "known_hosts_path",
"do_not_delete": "delete",
"key": "identity_files",
}
kwargs = { # convert the argument names to class constructor parameters
args_mapping[k]: v
for k, v in args.items()
if v and k in args_mapping
}
kwargs.update({
k: v
for k, v in args.items()
if v and k not in args_mapping
})
# Special case: disable known_hosts check
if args['disable_known_hosts']:
kwargs['known_hosts_path'] = None
del(kwargs['disable_known_hosts'])
# Toggle `do_not_delete` flag
if "delete" in kwargs:
kwargs["delete"] = not kwargs["delete"]
# Manually set the default identity file.
kwargs["identity_files"] = kwargs.get("identity_files", None) or ["~/.ssh/id_rsa"]
sync = SFTPClone(
**kwargs
)
sync.run()
|
The main.
|
entailment
|
def _must_be_deleted(local_path, r_st):
"""Return True if the remote correspondent of local_path has to be deleted.
i.e. if it doesn't exists locally or if it has a different type from the remote one."""
# if the file doesn't exists
if not os.path.lexists(local_path):
return True
# or if the file type is different
l_st = os.lstat(local_path)
if S_IFMT(r_st.st_mode) != S_IFMT(l_st.st_mode):
return True
return False
|
Return True if the remote correspondent of local_path has to be deleted.
i.e. if it doesn't exists locally or if it has a different type from the remote one.
|
entailment
|
def _match_modes(self, remote_path, l_st):
"""Match mod, utime and uid/gid with locals one."""
self.sftp.chmod(remote_path, S_IMODE(l_st.st_mode))
self.sftp.utime(remote_path, (l_st.st_atime, l_st.st_mtime))
if self.chown:
self.sftp.chown(remote_path, l_st.st_uid, l_st.st_gid)
|
Match mod, utime and uid/gid with locals one.
|
entailment
|
def file_upload(self, local_path, remote_path, l_st):
"""Upload local_path to remote_path and set permission and mtime."""
self.sftp.put(local_path, remote_path)
self._match_modes(remote_path, l_st)
|
Upload local_path to remote_path and set permission and mtime.
|
entailment
|
def remote_delete(self, remote_path, r_st):
"""Remove the remote directory node."""
# If it's a directory, then delete content and directory
if S_ISDIR(r_st.st_mode):
for item in self.sftp.listdir_attr(remote_path):
full_path = path_join(remote_path, item.filename)
self.remote_delete(full_path, item)
self.sftp.rmdir(remote_path)
# Or simply delete files
else:
try:
self.sftp.remove(remote_path)
except FileNotFoundError as e:
self.logger.error(
"error while removing {}. trace: {}".format(remote_path, e)
)
|
Remove the remote directory node.
|
entailment
|
def check_for_deletion(self, relative_path=None):
"""Traverse the entire remote_path tree.
Find files/directories that need to be deleted,
not being present in the local folder.
"""
if not relative_path:
relative_path = str() # root of shared directory tree
remote_path = path_join(self.remote_path, relative_path)
local_path = path_join(self.local_path, relative_path)
for remote_st in self.sftp.listdir_attr(remote_path):
r_lstat = self.sftp.lstat(path_join(remote_path, remote_st.filename))
inner_remote_path = path_join(remote_path, remote_st.filename)
inner_local_path = path_join(local_path, remote_st.filename)
# check if remote_st is a symlink
# otherwise could delete file outside shared directory
if S_ISLNK(r_lstat.st_mode):
if self._must_be_deleted(inner_local_path, r_lstat):
self.remote_delete(inner_remote_path, r_lstat)
continue
if self._must_be_deleted(inner_local_path, remote_st):
self.remote_delete(inner_remote_path, remote_st)
elif S_ISDIR(remote_st.st_mode):
self.check_for_deletion(
path_join(relative_path, remote_st.filename)
)
|
Traverse the entire remote_path tree.
Find files/directories that need to be deleted,
not being present in the local folder.
|
entailment
|
def create_update_symlink(self, link_destination, remote_path):
"""Create a new link pointing to link_destination in remote_path position."""
try: # if there's anything, delete it
self.sftp.remove(remote_path)
except IOError: # that's fine, nothing exists there!
pass
finally: # and recreate the link
try:
self.sftp.symlink(link_destination, remote_path)
except OSError as e:
# Sometimes, if links are "too" different, symlink fails.
# Sadly, nothing we can do about it.
self.logger.error("error while symlinking {} to {}: {}".format(
remote_path, link_destination, e))
|
Create a new link pointing to link_destination in remote_path position.
|
entailment
|
def node_check_for_upload_create(self, relative_path, f):
"""Check if the given directory tree node has to be uploaded/created on the remote folder."""
if not relative_path:
# we're at the root of the shared directory tree
relative_path = str()
# the (absolute) local address of f.
local_path = path_join(self.local_path, relative_path, f)
try:
l_st = os.lstat(local_path)
except OSError as e:
"""A little background here.
Sometimes, in big clusters configurations (mail, etc.),
files could disappear or be moved, suddenly.
There's nothing to do about it,
system should be stopped before doing backups.
Anyway, we log it, and skip it.
"""
self.logger.error("error while checking {}: {}".format(relative_path, e))
return
if local_path in self.exclude_list:
self.logger.info("Skipping excluded file %s.", local_path)
return
# the (absolute) remote address of f.
remote_path = path_join(self.remote_path, relative_path, f)
# First case: f is a directory
if S_ISDIR(l_st.st_mode):
# we check if the folder exists on the remote side
# it has to be a folder, otherwise it would have already been
# deleted
try:
self.sftp.stat(remote_path)
except IOError: # it doesn't exist yet on remote side
self.sftp.mkdir(remote_path)
self._match_modes(remote_path, l_st)
# now, we should traverse f too (recursion magic!)
self.check_for_upload_create(path_join(relative_path, f))
# Second case: f is a symbolic link
elif S_ISLNK(l_st.st_mode):
# read the local link
local_link = os.readlink(local_path)
absolute_local_link = os.path.realpath(local_link)
# is it absolute?
is_absolute = local_link.startswith("/")
# and does it point inside the shared directory?
# add trailing slash (security)
trailing_local_path = path_join(self.local_path, '')
relpath = os.path.commonprefix(
[absolute_local_link,
trailing_local_path]
) == trailing_local_path
if relpath:
relative_link = absolute_local_link[len(trailing_local_path):]
else:
relative_link = None
"""
# Refactor them all, be efficient!
# Case A: absolute link pointing outside shared directory
# (we can only update the remote part)
if is_absolute and not relpath:
self.create_update_symlink(local_link, remote_path)
# Case B: absolute link pointing inside shared directory
# (we can leave it as it is or fix the prefix to match the one of the remote server)
elif is_absolute and relpath:
if self.fix_symlinks:
self.create_update_symlink(
join(
self.remote_path,
relative_link,
),
remote_path
)
else:
self.create_update_symlink(local_link, remote_path)
# Case C: relative link pointing outside shared directory
# (all we can do is try to make the link anyway)
elif not is_absolute and not relpath:
self.create_update_symlink(local_link, remote_path)
# Case D: relative link pointing inside shared directory
# (we preserve the relativity and link it!)
elif not is_absolute and relpath:
self.create_update_symlink(local_link, remote_path)
"""
if is_absolute and relpath:
if self.fix_symlinks:
self.create_update_symlink(
path_join(
self.remote_path,
relative_link,
),
remote_path
)
else:
self.create_update_symlink(local_link, remote_path)
# Third case: regular file
elif S_ISREG(l_st.st_mode):
try:
r_st = self.sftp.lstat(remote_path)
if self._file_need_upload(l_st, r_st):
self.file_upload(local_path, remote_path, l_st)
except IOError as e:
if e.errno == errno.ENOENT:
self.file_upload(local_path, remote_path, l_st)
# Anything else.
else:
self.logger.warning("Skipping unsupported file %s.", local_path)
|
Check if the given directory tree node has to be uploaded/created on the remote folder.
|
entailment
|
def check_for_upload_create(self, relative_path=None):
"""Traverse the relative_path tree and check for files that need to be uploaded/created.
Relativity here refers to the shared directory tree."""
for f in os.listdir(
path_join(
self.local_path, relative_path) if relative_path else self.local_path
):
self.node_check_for_upload_create(relative_path, f)
|
Traverse the relative_path tree and check for files that need to be uploaded/created.
Relativity here refers to the shared directory tree.
|
entailment
|
def run(self):
"""Run the sync.
Confront the local and the remote directories and perform the needed changes."""
# Check if remote path is present
try:
self.sftp.stat(self.remote_path)
except FileNotFoundError as e:
if self.create_remote_directory:
self.sftp.mkdir(self.remote_path)
self.logger.info(
"Created missing remote dir: '" + self.remote_path + "'")
else:
self.logger.error(
"Remote folder does not exists. "
"Add '-r' to create it if missing.")
sys.exit(1)
try:
if self.delete:
# First check for items to be removed
self.check_for_deletion()
# Now scan local for items to upload/create
self.check_for_upload_create()
except FileNotFoundError:
# If this happens, probably the remote folder doesn't exist.
self.logger.error(
"Error while opening remote folder. Are you sure it does exist?")
sys.exit(1)
|
Run the sync.
Confront the local and the remote directories and perform the needed changes.
|
entailment
|
def list_files(start_path):
"""tree unix command replacement."""
s = u'\n'
for root, dirs, files in os.walk(start_path):
level = root.replace(start_path, '').count(os.sep)
indent = ' ' * 4 * level
s += u'{}{}/\n'.format(indent, os.path.basename(root))
sub_indent = ' ' * 4 * (level + 1)
for f in files:
s += u'{}{}\n'.format(sub_indent, f)
return s
|
tree unix command replacement.
|
entailment
|
def file_tree(start_path):
"""
Create a nested dictionary that represents the folder structure of `start_path`.
Liberally adapted from
http://code.activestate.com/recipes/577879-create-a-nested-dictionary-from-oswalk/
"""
nested_dirs = {}
root_dir = start_path.rstrip(os.sep)
start = root_dir.rfind(os.sep) + 1
for path, dirs, files in os.walk(root_dir):
folders = path[start:].split(os.sep)
subdir = dict.fromkeys(files)
parent = reduce(dict.get, folders[:-1], nested_dirs)
parent[folders[-1]] = subdir
return nested_dirs
|
Create a nested dictionary that represents the folder structure of `start_path`.
Liberally adapted from
http://code.activestate.com/recipes/577879-create-a-nested-dictionary-from-oswalk/
|
entailment
|
def capture_sys_output():
"""Capture standard output and error."""
capture_out, capture_err = StringIO(), StringIO()
current_out, current_err = sys.stdout, sys.stderr
try:
sys.stdout, sys.stderr = capture_out, capture_err
yield capture_out, capture_err
finally:
sys.stdout, sys.stderr = current_out, current_err
|
Capture standard output and error.
|
entailment
|
def suppress_logging(log_level=logging.CRITICAL):
"""Suppress logging."""
logging.disable(log_level)
yield
logging.disable(logging.NOTSET)
|
Suppress logging.
|
entailment
|
def override_env_variables():
"""Override user environmental variables with custom one."""
env_vars = ("LOGNAME", "USER", "LNAME", "USERNAME")
old = [os.environ[v] if v in os.environ else None for v in env_vars]
for v in env_vars:
os.environ[v] = "test"
yield
for i, v in enumerate(env_vars):
if old[i]:
os.environ[v] = old[i]
|
Override user environmental variables with custom one.
|
entailment
|
def override_ssh_auth_env():
"""Override the `$SSH_AUTH_SOCK `env variable to mock the absence of an SSH agent."""
ssh_auth_sock = "SSH_AUTH_SOCK"
old_ssh_auth_sock = os.environ.get(ssh_auth_sock)
del os.environ[ssh_auth_sock]
yield
if old_ssh_auth_sock:
os.environ[ssh_auth_sock] = old_ssh_auth_sock
|
Override the `$SSH_AUTH_SOCK `env variable to mock the absence of an SSH agent.
|
entailment
|
def get_config():
"""Get the configurations from .tldrrc and return it as a dict."""
config_path = path.join(
(os.environ.get('TLDR_CONFIG_DIR') or path.expanduser('~')),
'.tldrrc')
if not path.exists(config_path):
sys.exit("Can't find config file at: {0}. You may use `tldr init` "
"to init the config file.".format(config_path))
with io.open(config_path, encoding='utf-8') as f:
try:
config = yaml.safe_load(f)
except yaml.scanner.ScannerError:
sys.exit("The config file is not a valid YAML file.")
supported_colors = ['black', 'red', 'green', 'yellow', 'blue',
'magenta', 'cyan', 'white']
if not set(config['colors'].values()).issubset(set(supported_colors)):
sys.exit("Unsupported colors in config file: {0}.".format(
', '.join(set(config['colors'].values()) - set(supported_colors))))
if not path.exists(config['repo_directory']):
sys.exit("Can't find the tldr repo, check the `repo_directory` "
"setting in config file.")
return config
|
Get the configurations from .tldrrc and return it as a dict.
|
entailment
|
def parse_man_page(command, platform):
"""Parse the man page and return the parsed lines."""
page_path = find_page_location(command, platform)
output_lines = parse_page(page_path)
return output_lines
|
Parse the man page and return the parsed lines.
|
entailment
|
def find_page_location(command, specified_platform):
"""Find the command man page in the pages directory."""
repo_directory = get_config()['repo_directory']
default_platform = get_config()['platform']
command_platform = (
specified_platform if specified_platform else default_platform)
with io.open(path.join(repo_directory, 'pages/index.json'),
encoding='utf-8') as f:
index = json.load(f)
command_list = [item['name'] for item in index['commands']]
if command not in command_list:
sys.exit(
("Sorry, we don't support command: {0} right now.\n"
"You can file an issue or send a PR on github:\n"
" https://github.com/tldr-pages/tldr").format(command))
supported_platforms = index['commands'][
command_list.index(command)]['platform']
if command_platform in supported_platforms:
platform = command_platform
elif 'common' in supported_platforms:
platform = 'common'
else:
platform = ''
if not platform:
sys.exit(
("Sorry, command {0} is not supported on your platform.\n"
"You can file an issue or send a PR on github:\n"
" https://github.com/tldr-pages/tldr").format(command))
page_path = path.join(path.join(repo_directory, 'pages'),
path.join(platform, command + '.md'))
return page_path
|
Find the command man page in the pages directory.
|
entailment
|
def find(command, on):
"""Find the command usage."""
output_lines = parse_man_page(command, on)
click.echo(''.join(output_lines))
|
Find the command usage.
|
entailment
|
def update():
"""Update to the latest pages."""
repo_directory = get_config()['repo_directory']
os.chdir(repo_directory)
click.echo("Check for updates...")
local = subprocess.check_output('git rev-parse master'.split()).strip()
remote = subprocess.check_output(
'git ls-remote https://github.com/tldr-pages/tldr/ HEAD'.split()
).split()[0]
if local != remote:
click.echo("Updating...")
subprocess.check_call('git checkout master'.split())
subprocess.check_call('git pull --rebase'.split())
build_index()
click.echo("Update to the latest and rebuild the index.")
else:
click.echo("No need for updates.")
|
Update to the latest pages.
|
entailment
|
def init():
"""Init config file."""
default_config_path = path.join(
(os.environ.get('TLDR_CONFIG_DIR') or path.expanduser('~')),
'.tldrrc')
if path.exists(default_config_path):
click.echo("There is already a config file exists, "
"skip initializing it.")
else:
repo_path = click.prompt("Input the tldr repo path(absolute path)")
if not path.exists(repo_path):
sys.exit("Repo path not exist, clone it first.")
platform = click.prompt("Input your platform(linux, osx or sunos)")
if platform not in ['linux', 'osx', 'sunos']:
sys.exit("Platform should be in linux, osx or sunos.")
colors = {
"description": "blue",
"usage": "green",
"command": "cyan"
}
config = {
"repo_directory": repo_path,
"colors": colors,
"platform": platform
}
with open(default_config_path, 'w') as f:
f.write(yaml.safe_dump(config, default_flow_style=False))
click.echo("Initializing the config file at {0}".format(
default_config_path))
|
Init config file.
|
entailment
|
def locate(command, on):
"""Locate the command's man page."""
location = find_page_location(command, on)
click.echo(location)
|
Locate the command's man page.
|
entailment
|
def relate(cls, propname, *args, **kwargs):
"""Produce a relationship between this mapped table and another
one.
This makes usage of SQLAlchemy's :func:`sqlalchemy.orm.relationship`
construct.
"""
class_mapper(cls)._configure_property(propname, relationship(*args, **kwargs))
|
Produce a relationship between this mapped table and another
one.
This makes usage of SQLAlchemy's :func:`sqlalchemy.orm.relationship`
construct.
|
entailment
|
def execute(self, stmt, **params):
"""Execute a SQL statement.
The statement may be a string SQL string,
an :func:`sqlalchemy.sql.expression.select` construct, or a
:func:`sqlalchemy.sql.expression.text`
construct.
"""
return self.session.execute(sql.text(stmt, bind=self.bind), **params)
|
Execute a SQL statement.
The statement may be a string SQL string,
an :func:`sqlalchemy.sql.expression.select` construct, or a
:func:`sqlalchemy.sql.expression.text`
construct.
|
entailment
|
def map_to(self, attrname, tablename=None, selectable=None,
schema=None, base=None, mapper_args=util.immutabledict()):
"""Configure a mapping to the given attrname.
This is the "master" method that can be used to create any
configuration.
:param attrname: String attribute name which will be
established as an attribute on this :class:.`.SQLSoup`
instance.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
:param tablename: String name of a :class:`.Table` to be
reflected. If a :class:`.Table` is already available,
use the ``selectable`` argument. This argument is
mutually exclusive versus the ``selectable`` argument.
:param selectable: a :class:`.Table`, :class:`.Join`, or
:class:`.Select` object which will be mapped. This
argument is mutually exclusive versus the ``tablename``
argument.
:param schema: String schema name to use if the
``tablename`` argument is present.
"""
if attrname in self._cache:
raise SQLSoupError(
"Attribute '%s' is already mapped to '%s'" % (
attrname,
class_mapper(self._cache[attrname]).mapped_table
))
if tablename is not None:
if not isinstance(tablename, basestring):
raise ArgumentError("'tablename' argument must be a string."
)
if selectable is not None:
raise ArgumentError("'tablename' and 'selectable' "
"arguments are mutually exclusive")
selectable = Table(tablename,
self._metadata,
autoload=True,
autoload_with=self.bind,
schema=schema or self.schema)
elif schema:
raise ArgumentError("'tablename' argument is required when "
"using 'schema'.")
elif selectable is not None:
if not isinstance(selectable, expression.FromClause):
raise ArgumentError("'selectable' argument must be a "
"table, select, join, or other "
"selectable construct.")
else:
raise ArgumentError("'tablename' or 'selectable' argument is "
"required.")
if not selectable.primary_key.columns and not \
'primary_key' in mapper_args:
if tablename:
raise SQLSoupError(
"table '%s' does not have a primary "
"key defined" % tablename)
else:
raise SQLSoupError(
"selectable '%s' does not have a primary "
"key defined" % selectable)
mapped_cls = _class_for_table(
self.session,
self.engine,
selectable,
base or self.base,
mapper_args
)
self._cache[attrname] = mapped_cls
return mapped_cls
|
Configure a mapping to the given attrname.
This is the "master" method that can be used to create any
configuration.
:param attrname: String attribute name which will be
established as an attribute on this :class:.`.SQLSoup`
instance.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
:param tablename: String name of a :class:`.Table` to be
reflected. If a :class:`.Table` is already available,
use the ``selectable`` argument. This argument is
mutually exclusive versus the ``selectable`` argument.
:param selectable: a :class:`.Table`, :class:`.Join`, or
:class:`.Select` object which will be mapped. This
argument is mutually exclusive versus the ``tablename``
argument.
:param schema: String schema name to use if the
``tablename`` argument is present.
|
entailment
|
def map(self, selectable, base=None, **mapper_args):
"""Map a selectable directly.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
:param selectable: an :func:`.expression.select` construct.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
"""
return _class_for_table(
self.session,
self.engine,
selectable,
base or self.base,
mapper_args
)
|
Map a selectable directly.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
:param selectable: an :func:`.expression.select` construct.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
|
entailment
|
def with_labels(self, selectable, base=None, **mapper_args):
"""Map a selectable directly, wrapping the
selectable in a subquery with labels.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
:param selectable: an :func:`.expression.select` construct.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
"""
# TODO give meaningful aliases
return self.map(
expression._clause_element_as_expr(selectable).
select(use_labels=True).
alias('foo'), base=base, **mapper_args)
|
Map a selectable directly, wrapping the
selectable in a subquery with labels.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
:param selectable: an :func:`.expression.select` construct.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
|
entailment
|
def join(self, left, right, onclause=None, isouter=False,
base=None, **mapper_args):
"""Create an :func:`.expression.join` and map to it.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
:param left: a mapped class or table object.
:param right: a mapped class or table object.
:param onclause: optional "ON" clause construct..
:param isouter: if True, the join will be an OUTER join.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
"""
j = join(left, right, onclause=onclause, isouter=isouter)
return self.map(j, base=base, **mapper_args)
|
Create an :func:`.expression.join` and map to it.
The class and its mapping are not cached and will
be discarded once dereferenced (as of 0.6.6).
:param left: a mapped class or table object.
:param right: a mapped class or table object.
:param onclause: optional "ON" clause construct..
:param isouter: if True, the join will be an OUTER join.
:param base: a Python class which will be used as the
base for the mapped class. If ``None``, the "base"
argument specified by this :class:`.SQLSoup`
instance's constructor will be used, which defaults to
``object``.
:param mapper_args: Dictionary of arguments which will
be passed directly to :func:`.orm.mapper`.
|
entailment
|
def entity(self, attr, schema=None):
"""Return the named entity from this :class:`.SQLSoup`, or
create if not present.
For more generalized mapping, see :meth:`.map_to`.
"""
try:
return self._cache[attr]
except KeyError, ke:
return self.map_to(attr, tablename=attr, schema=schema)
|
Return the named entity from this :class:`.SQLSoup`, or
create if not present.
For more generalized mapping, see :meth:`.map_to`.
|
entailment
|
def distance(f1, f2):
"""\
Distance between 2 features. The integer result is always positive or zero.
If the features overlap or touch, it is zero.
>>> from intersecter import Feature, distance
>>> distance(Feature(1, 2), Feature(12, 13))
10
>>> distance(Feature(1, 2), Feature(2, 3))
0
>>> distance(Feature(1, 100), Feature(20, 30))
0
"""
if f1.end < f2.start: return f2.start - f1.end
if f2.end < f1.start: return f1.start - f2.end
return 0
|
\
Distance between 2 features. The integer result is always positive or zero.
If the features overlap or touch, it is zero.
>>> from intersecter import Feature, distance
>>> distance(Feature(1, 2), Feature(12, 13))
10
>>> distance(Feature(1, 2), Feature(2, 3))
0
>>> distance(Feature(1, 100), Feature(20, 30))
0
|
entailment
|
def find(self, start, end, chrom=None):
"""Return a object of all stored intervals intersecting between (start, end) inclusive."""
intervals = self.intervals[chrom]
ilen = len(intervals)
# NOTE: we only search for starts, since any feature that starts within max_len of
# the query could overlap, we must subtract max_len from the start to get the needed
# search space. everything else proceeds like a binary search.
# (but add distance calc for candidates).
if not chrom in self.max_len: return []
ileft = binsearch_left_start(intervals, start - self.max_len[chrom], 0, ilen)
iright = binsearch_right_end(intervals, end, ileft, ilen)
query = Feature(start, end)
# we have to check the distance to make sure we didnt pick up anything
# that started within max_len, but wasnt as long as max_len
return [f for f in intervals[ileft:iright] if distance(f, query) == 0]
|
Return a object of all stored intervals intersecting between (start, end) inclusive.
|
entailment
|
def left(self, f, n=1):
"""return the nearest n features strictly to the left of a Feature f.
Overlapping features are not considered as to the left.
f: a Feature object
n: the number of features to return
"""
intervals = self.intervals[f.chrom]
if intervals == []: return []
iright = binsearch_left_start(intervals, f.start, 0 , len(intervals)) + 1
ileft = binsearch_left_start(intervals, f.start - self.max_len[f.chrom] - 1, 0, 0)
results = sorted((distance(other, f), other) for other in intervals[ileft:iright] if other.end < f.start and distance(f, other) != 0)
if len(results) == n:
return [r[1] for r in results]
# have to do some extra work here since intervals are sorted
# by starts, and we dont know which end may be around...
# in this case, we got some extras, just return as many as
# needed once we see a gap in distances.
for i in range(n, len(results)):
if results[i - 1][0] != results[i][0]:
return [r[1] for r in results[:i]]
if ileft == 0:
return [r[1] for r in results]
# here, didn't get enough, so move left and try again.
1/0
|
return the nearest n features strictly to the left of a Feature f.
Overlapping features are not considered as to the left.
f: a Feature object
n: the number of features to return
|
entailment
|
def right(self, f, n=1):
"""return the nearest n features strictly to the right of a Feature f.
Overlapping features are not considered as to the right.
f: a Feature object
n: the number of features to return
"""
intervals = self.intervals[f.chrom]
ilen = len(intervals)
iright = binsearch_right_end(intervals, f.end, 0, ilen)
results = []
while iright < ilen:
i = len(results)
if i > n:
if distance(f, results[i - 1]) != distance(f, results[i - 2]):
return results[:i - 1]
other = intervals[iright]
iright += 1
if distance(other, f) == 0: continue
results.append(other)
return results
|
return the nearest n features strictly to the right of a Feature f.
Overlapping features are not considered as to the right.
f: a Feature object
n: the number of features to return
|
entailment
|
def upstream(self, f, n=1):
"""find n upstream features where upstream is determined by
the strand of the query Feature f
Overlapping features are not considered.
f: a Feature object
n: the number of features to return
"""
if f.strand == -1:
return self.right(f, n)
return self.left(f, n)
|
find n upstream features where upstream is determined by
the strand of the query Feature f
Overlapping features are not considered.
f: a Feature object
n: the number of features to return
|
entailment
|
def downstream(self, f, n=1):
"""find n downstream features where downstream is determined by
the strand of the query Feature f
Overlapping features are not considered.
f: a Feature object
n: the number of features to return
"""
if f.strand == -1:
return self.left(f, n)
return self.right(f, n)
|
find n downstream features where downstream is determined by
the strand of the query Feature f
Overlapping features are not considered.
f: a Feature object
n: the number of features to return
|
entailment
|
def knearest(self, f_or_start, end=None, chrom=None, k=1):
"""return the n nearest neighbors to the given feature
f: a Feature object
k: the number of features to return
"""
if end is not None:
f = Feature(f_or_start, end, chrom=chrom)
else:
f = f_or_start
DIST = 2000
feats = filter_feats(self.find(f.start - DIST, f.end + DIST, chrom=f.chrom), f, k)
if len(feats) >= k:
return feats
nfeats = k - len(feats)
fleft = Feature(f.start - DIST, f.start, chrom=f.chrom)
feats.extend(self.left(fleft, n=nfeats))
fright = Feature(f.end, f.end + DIST, chrom=f.chrom)
feats.extend(self.right(fright, n=nfeats))
return filter_feats(feats, f, k)
|
return the n nearest neighbors to the given feature
f: a Feature object
k: the number of features to return
|
entailment
|
def find(self, start, end):
"""find all elements between (or overlapping) start and end"""
if self.intervals and not end < self.intervals[0].start:
overlapping = [i for i in self.intervals if i.end >= start
and i.start <= end]
else:
overlapping = []
if self.left and start <= self.center:
overlapping += self.left.find(start, end)
if self.right and end >= self.center:
overlapping += self.right.find(start, end)
return overlapping
|
find all elements between (or overlapping) start and end
|
entailment
|
def sequence(db, chrom, start, end):
"""
return the sequence for a region using the UCSC DAS
server. note the start is 1-based
each feature will have it's own .sequence method which sends
the correct start and end to this function.
>>> sequence('hg18', 'chr2', 2223, 2230)
'caacttag'
"""
url = "http://genome.ucsc.edu/cgi-bin/das/%s" % db
url += "/dna?segment=%s:%i,%i"
xml = U.urlopen(url % (chrom, start, end)).read()
return _seq_from_xml(xml)
|
return the sequence for a region using the UCSC DAS
server. note the start is 1-based
each feature will have it's own .sequence method which sends
the correct start and end to this function.
>>> sequence('hg18', 'chr2', 2223, 2230)
'caacttag'
|
entailment
|
def set_table(genome, table, table_name, connection_string, metadata):
"""
alter the table to work between different
dialects
"""
table = Table(table_name, genome._metadata, autoload=True,
autoload_with=genome.bind, extend_existing=True)
#print "\t".join([c.name for c in table.columns])
# need to prefix the indexes with the table name to avoid collisions
for i, idx in enumerate(table.indexes):
idx.name = table_name + "." + idx.name + "_ix" + str(i)
cols = []
for i, col in enumerate(table.columns):
# convert mysql-specific types to varchar
#print col.name, col.type, isinstance(col.type, ENUM)
if isinstance(col.type, (LONGBLOB, ENUM)):
if 'sqlite' in connection_string:
col.type = VARCHAR()
elif 'postgres' in connection_string:
if isinstance(col.type, ENUM):
#print dir(col)
col.type = PG_ENUM(*col.type.enums, name=col.name,
create_type=True)
else:
col.type = VARCHAR()
elif str(col.type) == "VARCHAR" \
and ("mysql" in connection_string \
or "postgres" in connection_string):
if col.type.length is None:
col.type.length = 48 if col.name != "description" else None
if not "mysql" in connection_string:
if str(col.type).lower().startswith("set("):
col.type = VARCHAR(15)
cols.append(col)
table = Table(table_name, genome._metadata, *cols,
autoload_replace=True, extend_existing=True)
return table
|
alter the table to work between different
dialects
|
entailment
|
def create_url(self, db="", user="genome", host="genome-mysql.cse.ucsc.edu",
password="", dialect="mysqldb"):
"""
internal: create a dburl from a set of parameters or the defaults on
this object
"""
if os.path.exists(db):
db = "sqlite:///" + db
# Is this a DB URL? If so, use it directly
if self.db_regex.match(db):
self.db = self.url = db
self.dburl = db
self.user = self.host = self.password = ""
else:
self.db = db
if user == "genome" and host != "genome-mysql.cse.ucsc.edu":
import getpass
user = getpass.getuser()
self.host = host
self.user = user
self.password = (":" + password) if password else ""
self.dburl = self.url.format(db=self.db, user=self.user,
host=self.host, password=self.password, dialect=dialect)
|
internal: create a dburl from a set of parameters or the defaults on
this object
|
entailment
|
def mirror(self, tables, dest_url):
"""
miror a set of `tables` from `dest_url`
Returns a new Genome object
Parameters
----------
tables : list
an iterable of tables
dest_url: str
a dburl string, e.g. 'sqlite:///local.db'
"""
from mirror import mirror
return mirror(self, tables, dest_url)
|
miror a set of `tables` from `dest_url`
Returns a new Genome object
Parameters
----------
tables : list
an iterable of tables
dest_url: str
a dburl string, e.g. 'sqlite:///local.db'
|
entailment
|
def dataframe(self, table):
"""
create a pandas dataframe from a table or query
Parameters
----------
table : table
a table in this database or a query
limit: integer
an integer limit on the query
offset: integer
an offset for the query
"""
from pandas import DataFrame
if isinstance(table, six.string_types):
table = getattr(self, table)
try:
rec = table.first()
except AttributeError:
rec = table[0]
if hasattr(table, "all"):
records = table.all()
else:
records = [tuple(t) for t in table]
cols = [c.name for c in rec._table.columns]
return DataFrame.from_records(records, columns=cols)
|
create a pandas dataframe from a table or query
Parameters
----------
table : table
a table in this database or a query
limit: integer
an integer limit on the query
offset: integer
an offset for the query
|
entailment
|
def load_file(self, fname, table=None, sep="\t", bins=False, indexes=None):
"""
use some of the machinery in pandas to load a file into a table
Parameters
----------
fname : str
filename or filehandle to load
table : str
table to load the file to
sep : str
CSV separator
bins : bool
add a "bin" column for efficient spatial queries.
indexes : list[str]
list of columns to index
"""
convs = {"#chr": "chrom", "start": "txStart", "end": "txEnd", "chr":
"chrom", "pos": "start", "POS": "start", "chromStart": "txStart",
"chromEnd": "txEnd"}
if table is None:
import os.path as op
table = op.basename(op.splitext(fname)[0]).replace(".", "_")
print("writing to:", table, file=sys.stderr)
from pandas.io import sql
import pandas as pa
from toolshed import nopen
needs_name = False
for i, chunk in enumerate(pa.read_csv(nopen(fname), iterator=True,
chunksize=100000, sep=sep, encoding="latin-1")):
chunk.columns = [convs.get(k, k) for k in chunk.columns]
if not "name" in chunk.columns:
needs_name = True
chunk['name'] = chunk.get('chrom', chunk[chunk.columns[0]])
if bins:
chunk['bin'] = 1
if i == 0 and not table in self.tables:
flavor = self.url.split(":")[0]
schema = sql.get_schema(chunk, table, flavor)
print(schema)
self.engine.execute(schema)
elif i == 0:
print >>sys.stderr,\
"""adding to existing table, you may want to drop first"""
tbl = getattr(self, table)._table
cols = chunk.columns
data = list(dict(zip(cols, x)) for x in chunk.values)
if needs_name:
for d in data:
d['name'] = "%s:%s" % (d.get("chrom"), d.get("txStart", d.get("chromStart")))
if bins:
for d in data:
d['bin'] = max(Genome.bins(int(d["txStart"]), int(d["txEnd"])))
self.engine.execute(tbl.insert(), data)
self.session.commit()
if i > 0:
print >>sys.stderr, "writing row:", i * 100000
if "txStart" in chunk.columns:
if "chrom" in chunk.columns:
ssql = """CREATE INDEX "%s.chrom_txStart" ON "%s" (chrom, txStart)""" % (table, table)
else:
ssql = """CREATE INDEX "%s.txStart" ON "%s" (txStart)""" % (table, table)
self.engine.execute(ssql)
for index in (indexes or []):
ssql = """CREATE INDEX "%s.%s" ON "%s" (%s)""" % (table,
index, table, index)
self.engine.execute(ssql)
if bins:
ssql = """CREATE INDEX "%s.chrom_bin" ON "%s" (chrom, bin)""" % (table, table)
self.engine.execute(ssql)
self.session.commit()
|
use some of the machinery in pandas to load a file into a table
Parameters
----------
fname : str
filename or filehandle to load
table : str
table to load the file to
sep : str
CSV separator
bins : bool
add a "bin" column for efficient spatial queries.
indexes : list[str]
list of columns to index
|
entailment
|
def david_go(refseq_list, annot=('SP_PIR_KEYWORDS', 'GOTERM_BP_FAT',
'GOTERM_CC_FAT', 'GOTERM_MF_FAT')):
"""
open a web-browser to the DAVID online enrichment tool
Parameters
----------
refseq_list : list
list of refseq names to check for enrichment
annot : list
iterable of DAVID annotations to check for enrichment
"""
URL = "http://david.abcc.ncifcrf.gov/api.jsp?type=REFSEQ_MRNA&ids=%s&tool=term2term&annot="
import webbrowser
webbrowser.open(URL % ",".join(set(refseq_list)) + ",".join(annot))
|
open a web-browser to the DAVID online enrichment tool
Parameters
----------
refseq_list : list
list of refseq names to check for enrichment
annot : list
iterable of DAVID annotations to check for enrichment
|
entailment
|
def bin_query(self, table, chrom, start, end):
"""
perform an efficient spatial query using the bin column if available.
The possible bins are calculated from the `start` and `end` sent to
this function.
Parameters
----------
table : str or table
table to query
chrom : str
chromosome for the query
start : int
0-based start postion
end : int
0-based end position
"""
if isinstance(table, six.string_types):
table = getattr(self, table)
try:
tbl = table._table
except AttributeError:
tbl = table.column_descriptions[0]['type']._table
q = table.filter(tbl.c.chrom == chrom)
if hasattr(tbl.c, "bin"):
bins = Genome.bins(start, end)
if len(bins) < 100:
q = q.filter(tbl.c.bin.in_(bins))
if hasattr(tbl.c, "txStart"):
return q.filter(tbl.c.txStart <= end).filter(tbl.c.txEnd >= start)
return q.filter(tbl.c.chromStart <= end).filter(tbl.c.chromEnd >= start)
|
perform an efficient spatial query using the bin column if available.
The possible bins are calculated from the `start` and `end` sent to
this function.
Parameters
----------
table : str or table
table to query
chrom : str
chromosome for the query
start : int
0-based start postion
end : int
0-based end position
|
entailment
|
def upstream(self, table, chrom_or_feat, start=None, end=None, k=1):
"""
Return k-nearest upstream features
Parameters
----------
table : str or table
table against which to query
chrom_or_feat : str or feat
either a chromosome, e.g. 'chr3' or a feature with .chrom, .start,
.end attributes
start : int
if `chrom_or_feat` is a chrom, then this must be the integer start
end : int
if `chrom_or_feat` is a chrom, then this must be the integer end
k : int
number of upstream neighbors to return
"""
res = self.knearest(table, chrom_or_feat, start, end, k, "up")
end = getattr(chrom_or_feat, "end", end)
start = getattr(chrom_or_feat, "start", start)
rev = getattr(chrom_or_feat, "strand", "+") == "-"
if rev:
return [x for x in res if x.end > start]
else:
return [x for x in res if x.start < end]
|
Return k-nearest upstream features
Parameters
----------
table : str or table
table against which to query
chrom_or_feat : str or feat
either a chromosome, e.g. 'chr3' or a feature with .chrom, .start,
.end attributes
start : int
if `chrom_or_feat` is a chrom, then this must be the integer start
end : int
if `chrom_or_feat` is a chrom, then this must be the integer end
k : int
number of upstream neighbors to return
|
entailment
|
def knearest(self, table, chrom_or_feat, start=None, end=None, k=1,
_direction=None):
"""
Return k-nearest features
Parameters
----------
table : str or table
table against which to query
chrom_or_feat : str or feat
either a chromosome, e.g. 'chr3' or a feature with .chrom, .start,
.end attributes
start : int
if `chrom_or_feat` is a chrom, then this must be the integer start
end : int
if `chrom_or_feat` is a chrom, then this must be the integer end
k : int
number of downstream neighbors to return
_direction : (None, "up", "down")
internal (don't use this)
"""
assert _direction in (None, "up", "down")
# they sent in a feature
if start is None:
assert end is None
chrom, start, end = chrom_or_feat.chrom, chrom_or_feat.start, chrom_or_feat.end
# if the query is directional and the feature as a strand,
# adjust...
if _direction in ("up", "down") and getattr(chrom_or_feat,
"strand", None) == "-":
_direction = "up" if _direction == "down" else "up"
else:
chrom = chrom_or_feat
qstart, qend = long(start), long(end)
res = self.bin_query(table, chrom, qstart, qend)
i, change = 1, 350
try:
while res.count() < k:
if _direction in (None, "up"):
if qstart == 0 and _direction == "up": break
qstart = max(0, qstart - change)
if _direction in (None, "down"):
qend += change
i += 1
change *= (i + 5)
res = self.bin_query(table, chrom, qstart, qend)
except BigException:
return []
def dist(f):
d = 0
if start > f.end:
d = start - f.end
elif f.start > end:
d = f.start - end
# add dist as an attribute to the feature
return d
dists = sorted([(dist(f), f) for f in res])
if len(dists) == 0:
return []
dists, res = zip(*dists)
if len(res) == k:
return res
if k > len(res): # had to break because of end of chrom
if k == 0: return []
k = len(res)
ndist = dists[k - 1]
# include all features that are the same distance as the nth closest
# feature (accounts for ties).
while k < len(res) and dists[k] == ndist:
k = k + 1
return res[:k]
|
Return k-nearest features
Parameters
----------
table : str or table
table against which to query
chrom_or_feat : str or feat
either a chromosome, e.g. 'chr3' or a feature with .chrom, .start,
.end attributes
start : int
if `chrom_or_feat` is a chrom, then this must be the integer start
end : int
if `chrom_or_feat` is a chrom, then this must be the integer end
k : int
number of downstream neighbors to return
_direction : (None, "up", "down")
internal (don't use this)
|
entailment
|
def annotate(self, fname, tables, feature_strand=False, in_memory=False,
header=None, out=sys.stdout, parallel=False):
"""
annotate a file with a number of tables
Parameters
----------
fname : str or file
file name or file-handle
tables : list
list of tables with which to annotate `fname`
feature_strand : bool
if this is True, then the up/downstream designations are based on
the features in `tables` rather than the features in `fname`
in_memoory : bool
if True, then tables are read into memory. This usually makes the
annotation much faster if there are more than 500 features in
`fname` and the number of features in the table is less than 100K.
header : str
header to print out (if True, use existing header)
out : file
where to print output
parallel : bool
if True, use multiprocessing library to execute the annotation of
each chromosome in parallel. Uses more memory.
"""
from .annotate import annotate
return annotate(self, fname, tables, feature_strand, in_memory, header=header,
out=out, parallel=parallel)
|
annotate a file with a number of tables
Parameters
----------
fname : str or file
file name or file-handle
tables : list
list of tables with which to annotate `fname`
feature_strand : bool
if this is True, then the up/downstream designations are based on
the features in `tables` rather than the features in `fname`
in_memoory : bool
if True, then tables are read into memory. This usually makes the
annotation much faster if there are more than 500 features in
`fname` and the number of features in the table is less than 100K.
header : str
header to print out (if True, use existing header)
out : file
where to print output
parallel : bool
if True, use multiprocessing library to execute the annotation of
each chromosome in parallel. Uses more memory.
|
entailment
|
def bins(start, end):
"""
Get all the bin numbers for a particular interval defined by
(start, end]
"""
if end - start < 536870912:
offsets = [585, 73, 9, 1]
else:
raise BigException
offsets = [4681, 585, 73, 9, 1]
binFirstShift = 17
binNextShift = 3
start = start >> binFirstShift
end = (end - 1) >> binFirstShift
bins = [1]
for offset in offsets:
bins.extend(range(offset + start, offset + end + 1))
start >>= binNextShift
end >>= binNextShift
return frozenset(bins)
|
Get all the bin numbers for a particular interval defined by
(start, end]
|
entailment
|
def save_bed(cls, query, filename=sys.stdout):
"""
write a bed12 file of the query.
Parameters
----------
query : query
a table or query to save to file
filename : file
string or filehandle to write output
"""
out = _open(filename, 'w')
for o in query:
out.write(o.bed() + '\n')
|
write a bed12 file of the query.
Parameters
----------
query : query
a table or query to save to file
filename : file
string or filehandle to write output
|
entailment
|
def staticfile_node(parser, token, optimize_if_possible=False):
"""For example:
{% staticfile "/js/foo.js" %}
or
{% staticfile "/js/foo.js" as variable_name %}
Or for multiples:
{% staticfile "/foo.js; /bar.js" %}
or
{% staticfile "/foo.js; /bar.js" as variable_name %}
"""
args = token.split_contents()
tag = args[0]
if len(args) == 4 and args[-2] == 'as':
context_name = args[-1]
args = args[:-2]
else:
context_name = None
filename = parser.compile_filter(args[1])
return StaticFileNode(filename,
symlink_if_possible=_CAN_SYMLINK,
optimize_if_possible=optimize_if_possible,
context_name=context_name)
|
For example:
{% staticfile "/js/foo.js" %}
or
{% staticfile "/js/foo.js" as variable_name %}
Or for multiples:
{% staticfile "/foo.js; /bar.js" %}
or
{% staticfile "/foo.js; /bar.js" as variable_name %}
|
entailment
|
def _mkdir(newdir):
"""works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
"""
if os.path.isdir(newdir):
pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head):
_mkdir(head)
if tail:
os.mkdir(newdir)
|
works the way a good mkdir should :)
- already exists, silently complete
- regular file in the way, raise an exception
- parent directory(ies) does not exist, make them as well
|
entailment
|
def _find_filepath_in_roots(filename):
"""Look for filename in all MEDIA_ROOTS, and return the first one found."""
for root in settings.DJANGO_STATIC_MEDIA_ROOTS:
filepath = _filename2filepath(filename, root)
if os.path.isfile(filepath):
return filepath, root
# havent found it in DJANGO_STATIC_MEDIA_ROOTS look for apps' files if we're
# in DEBUG mode
if settings.DEBUG:
try:
from django.contrib.staticfiles import finders
absolute_path = finders.find(filename)
if absolute_path:
root, filepath = os.path.split(absolute_path)
return absolute_path, root
except ImportError:
pass
return None, None
|
Look for filename in all MEDIA_ROOTS, and return the first one found.
|
entailment
|
def default_combine_filenames_generator(filenames, max_length=40):
"""Return a new filename to use as the combined file name for a
bunch of files.
A precondition is that they all have the same file extension
Given that the list of files can have different paths, we aim to use the
most common path.
Example:
/somewhere/else/foo.js
/somewhere/bar.js
/somewhere/different/too/foobar.js
The result will be
/somewhere/foo_bar_foobar.js
Another thing to note, if the filenames have timestamps in them, combine
them all and use the highest timestamp.
"""
path = None
names = []
extension = None
timestamps = []
for filename in filenames:
name = os.path.basename(filename)
if not extension:
extension = os.path.splitext(name)[1]
elif os.path.splitext(name)[1] != extension:
raise ValueError("Can't combine multiple file extensions")
for each in re.finditer('\.\d{10}\.', name):
timestamps.append(int(each.group().replace('.','')))
name = name.replace(each.group(), '.')
name = os.path.splitext(name)[0]
names.append(name)
if path is None:
path = os.path.dirname(filename)
else:
if len(os.path.dirname(filename)) < len(path):
path = os.path.dirname(filename)
new_filename = '_'.join(names)
if timestamps:
new_filename += ".%s" % max(timestamps)
new_filename = new_filename[:max_length]
new_filename += extension
return os.path.join(path, new_filename)
|
Return a new filename to use as the combined file name for a
bunch of files.
A precondition is that they all have the same file extension
Given that the list of files can have different paths, we aim to use the
most common path.
Example:
/somewhere/else/foo.js
/somewhere/bar.js
/somewhere/different/too/foobar.js
The result will be
/somewhere/foo_bar_foobar.js
Another thing to note, if the filenames have timestamps in them, combine
them all and use the highest timestamp.
|
entailment
|
def render(self, context):
"""inspect the code and look for files that can be turned into combos.
Basically, the developer could type this:
{% slimall %}
<link href="/one.css"/>
<link href="/two.css"/>
{% endslimall %}
And it should be reconsidered like this:
<link href="{% slimfile "/one.css;/two.css" %}"/>
which we already have routines for doing.
"""
code = self.nodelist.render(context)
if not settings.DJANGO_STATIC:
# Append MEDIA_URL if set
# quick and dirty
if settings.DJANGO_STATIC_MEDIA_URL_ALWAYS:
for match in STYLES_REGEX.finditer(code):
for filename in match.groups():
code = (code.replace(filename,
settings.DJANGO_STATIC_MEDIA_URL + filename))
for match in SCRIPTS_REGEX.finditer(code):
for filename in match.groups():
code = (code.replace(filename,
settings.DJANGO_STATIC_MEDIA_URL + filename))
return code
return code
new_js_filenames = []
for match in SCRIPTS_REGEX.finditer(code):
whole_tag = match.group()
async_defer = ASYNC_DEFER_REGEX.search(whole_tag)
for filename in match.groups():
optimize_if_possible = self.optimize_if_possible
if optimize_if_possible and \
(filename.endswith('.min.js') or filename.endswith('.minified.js')):
# Override! Because we simply don't want to run slimmer
# on files that have the file extension .min.js
optimize_if_possible = False
new_js_filenames.append(filename)
code = code.replace(whole_tag, '')
# Now, we need to combine these files into one
if new_js_filenames:
new_js_filename = _static_file(new_js_filenames,
optimize_if_possible=optimize_if_possible,
symlink_if_possible=self.symlink_if_possible)
else:
new_js_filename = None
new_image_filenames = []
def image_replacer(match):
tag = match.group()
for filename in match.groups():
new_filename = _static_file(filename,
symlink_if_possible=self.symlink_if_possible)
if new_filename != filename:
tag = tag.replace(filename, new_filename)
return tag
code = IMG_REGEX.sub(image_replacer, code)
new_css_filenames = defaultdict(list)
# It's less trivial with CSS because we can't combine those that are
# of different media
media_regex = re.compile('media=["\']([^"\']+)["\']')
for match in STYLES_REGEX.finditer(code):
whole_tag = match.group()
try:
media_type = media_regex.findall(whole_tag)[0]
except IndexError:
media_type = ''
for filename in match.groups():
new_css_filenames[media_type].append(filename)
code = code.replace(whole_tag, '')
# Now, we need to combine these files into one
new_css_filenames_combined = {}
if new_css_filenames:
for media_type, filenames in new_css_filenames.items():
r = _static_file(filenames,
optimize_if_possible=self.optimize_if_possible,
symlink_if_possible=self.symlink_if_possible)
new_css_filenames_combined[media_type] = r
if new_js_filename:
# Now is the time to apply the name prefix if there is one
if async_defer:
new_tag = ('<script %s src="%s"></script>' %
(async_defer.group(0), new_js_filename))
else:
new_tag = '<script src="%s"></script>' % new_js_filename
code = "%s%s" % (new_tag, code)
for media_type, new_css_filename in new_css_filenames_combined.items():
extra_params = ''
if media_type:
extra_params += ' media="%s"' % media_type
new_tag = '<link rel="stylesheet"%s href="%s"/>' % \
(extra_params, new_css_filename)
code = "%s%s" % (new_tag, code)
return code
|
inspect the code and look for files that can be turned into combos.
Basically, the developer could type this:
{% slimall %}
<link href="/one.css"/>
<link href="/two.css"/>
{% endslimall %}
And it should be reconsidered like this:
<link href="{% slimfile "/one.css;/two.css" %}"/>
which we already have routines for doing.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.