blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 112 | license_type stringclasses 2 values | repo_name stringlengths 5 115 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 777 values | visit_date timestamp[us]date 2015-08-06 10:31:46 2023-09-06 10:44:38 | revision_date timestamp[us]date 1970-01-01 02:38:32 2037-05-03 13:00:00 | committer_date timestamp[us]date 1970-01-01 02:38:32 2023-09-06 01:08:06 | github_id int64 4.92k 681M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 22 values | gha_event_created_at timestamp[us]date 2012-06-04 01:52:49 2023-09-14 21:59:50 ⌀ | gha_created_at timestamp[us]date 2008-05-22 07:58:19 2023-08-21 12:35:19 ⌀ | gha_language stringclasses 149 values | src_encoding stringclasses 26 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 3 10.2M | extension stringclasses 188 values | content stringlengths 3 10.2M | authors listlengths 1 1 | author_id stringlengths 1 132 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8202e71838d04d0b2b4ccf5c3a73e1b423a6495a | 933ed73cdf117fc6c88c1ebba7a17b82807a16e8 | /docs/02.AI_ML/code-1805/Day06all/audio.py | a484f78da089d31453fcf1e9aa31a0039d15fc49 | [
"Apache-2.0"
] | permissive | wan230114/PythonNote | c4fff80f6f3849ed0b0346526d3c6197a4867d2c | f4989a44c03efdcea3f7aa3484e3470e7fd983eb | refs/heads/master | 2023-01-28T14:51:07.304066 | 2023-01-12T18:38:16 | 2023-01-12T18:38:16 | 242,367,069 | 5 | 6 | Apache-2.0 | 2021-01-05T23:35:10 | 2020-02-22T15:45:01 | JavaScript | UTF-8 | Python | false | false | 886 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import numpy as np
import numpy.fft as nf
import scipy.io.wavfile as wf
import matplotlib.pyplot as mp
sample_rate, sigs = wf.read('../../data/freq.wav')
sigs = sigs / 2 ** 15
times = np.arange(len(sigs)) / sample_rate
freqs = nf.fftfreq(len(sigs), d=1 / sample_rate)
ffts = nf.fft(sigs)
pows = np.abs(ffts)
mp.figure('Audio', facecolor='lightgray')
mp.subplot(121)
mp.title('Time Domain', fontsize=16)
mp.xlabel('Time', fontsize=12)
mp.ylabel('Signal', fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
mp.plot(times, sigs, c='dodgerblue')
mp.subplot(122)
mp.title('Frequency Domain', fontsize=16)
mp.xlabel('Frequency', fontsize=12)
mp.ylabel('Power', fontsize=12)
mp.tick_params(labelsize=10)
mp.grid(linestyle=':')
mp.plot(freqs[freqs >= 0], pows[freqs >= 0], c='orangered')
mp.tight_layout()
mp.show()
| [
"1170101471@qq.com"
] | 1170101471@qq.com |
7b893c27f6bb7e81de39e943a5200791f394b746 | 65a5f74ede8079d693a70bd9597f063acfbf33fd | /tasks/mc6800_hp53xx/hp5370.py | eeeb858175d2527d953467f6cbdd6f7c54c464ae | [] | no_license | pombredanne/PyRevEng | 5372a8322313c81ce065c689eb4b816596c90f8b | 08083744806258cfa31edd0132456d70377a9f71 | refs/heads/master | 2020-09-20T06:55:24.020304 | 2019-11-11T17:18:38 | 2019-11-11T17:18:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,795 | py | #!/usr/local/bin/python
#
# Functions common to:
# HP 5370A
# HP 5370B
# PyRevEng classes
import tree
import math
import const
def chargen(p, adr):
for a in range(adr, adr + 16):
const.seven_segment(p, a)
p.setlabel(adr, "CHARGEN")
#######################################################################
#
def keyboard_dispatch(p, cpu, adr = 0x7962):
assert p.m.rd(adr) == 0xce
assert p.m.rd(adr + 3) == 0x7e
ptr = p.m.b16(adr + 1)
ii = cpu.disass(adr + 3, "ins")
const.ptr(p, ptr, 2)
tbl = p.m.b16(ptr)
aa = tbl
xx = dict()
for col in range(8,0,-1):
p.setlabel(aa, "Keyboard_Column_%d" % col)
for row in range(1,5):
x = const.ptr(p, aa, 2)
dd = p.m.b16(aa)
cpu.disass(dd)
if dd not in xx:
ii.flow("call", "XFUNC", dd)
xx[dd] = True
aa += 2
x = p.t.add(tbl, aa, "tbl")
x.blockcmt += "-\nDispatch table for Keyboard commands\n"
p.setlabel(p.m.b16(tbl + 4), "KEY_Ext_Arm")
p.setlabel(p.m.b16(tbl + 6), "KEY_UNDEF")
p.setlabel(p.m.b16(tbl + 10), "KEY_Ext_Hold_Off")
p.setlabel(p.m.b16(tbl + 14), "KEY_Reset")
#######################################################################
# List of two-letter HPIB commands
#
def hpib_cmd_table(p, adr, len = 26):
p.hpib_cmd = list()
x = p.t.add(adr, adr + 2 * len, "cmd-table")
x.blockcmt += "-\nTable of two-letter HPIB commands\n"
for i in range(adr, adr + 2 * len, 2):
const.txtlen(p,i,2)
p.hpib_cmd.append([p.m.ascii(i, 2),])
def hpib_arg_range(p, adr, len = 14):
x = p.t.add(adr, adr + len * 2, "arg-range")
x.blockcmt += "-\nTable of legal range of numeric argument for HPIB cmd"
for i in range(0, len):
aa = adr + i * 2
x = const.byte(p, aa, 2)
l = p.m.rd(aa)
h = p.m.rd(aa + 1)
x.lcmt(p.hpib_cmd[i][0] + "[%d-%d]" % (l,h))
p.hpib_cmd[i].append(l)
p.hpib_cmd[i].append(h)
def hpib_tbl_idx(p, adr):
aa = adr
for i in p.hpib_cmd:
if len(i) == 1:
break
x = const.byte(p, aa)
i.append(p.m.rd(aa))
x.lcmt(i[0])
aa += 1
x = p.t.add(adr, aa, "idx-table")
x.blockcmt += "-\nIndex into cmd table, add numeric arg"
def dispatch_table_arg(p, adr, cpu):
assert p.m.rd(adr) == 0xce
assert p.m.rd(adr + 3) == 0x7e
ptr = p.m.b16(adr + 1)
ii = cpu.disass(adr + 3, "ins")
const.ptr(p, ptr, 2)
tbl = p.m.b16(ptr)
aa = tbl
xx = dict()
for i in p.hpib_cmd:
if len(i) == 1:
break
for j in range(i[1], i[2] + 1):
x = const.ptr(p, aa, 2)
y = i[0] + "%d" % j
dd = p.m.b16(aa)
cpu.disass(dd)
if dd not in xx:
ii.flow("call", "XFUNC", dd)
xx[dd] = True
p.setlabel(dd, "CMD_" + y + "_" + gpib_expl[y])
aa += 2
x = p.t.add(tbl, aa, "idx-table")
x.blockcmt += "-\nDispatch table for HPIB cmds with arg"
def dispatch_table_noarg(p, adr, cpu):
assert p.m.rd(adr) == 0xce
assert p.m.rd(adr + 3) == 0x7e
ptr = p.m.b16(adr + 1)
ii = cpu.disass(adr + 3, "ins")
const.ptr(p, ptr, 2)
tbl = p.m.b16(ptr)
aa = tbl
xx = dict()
for i in p.hpib_cmd:
if len(i) > 1:
continue
x = const.ptr(p, aa, 2)
y = i[0]
dd = p.m.b16(aa)
cpu.disass(dd)
if dd not in xx:
ii.flow("call", "XFUNC", dd)
xx[dd] = True
p.setlabel(dd, "CMD_" + y + "_" + gpib_expl[y])
aa += 2
x = p.t.add(tbl, aa, "idx-table")
x.blockcmt += "-\nDispatch table for HPIB cmds without arg\n"
# Explanation of the HP5370[AB] HPIB Commands
gpib_expl = {
"FN1": "Time Interval",
"FN2": "Trigger Levels",
"FN3": "Frequency",
"FN4": "Period",
"FN5": "???",
"GT1": "Single Period",
"GT2": "0.01s",
"GT3": "0.1s",
"GT4": "1s",
"ST1": "Mean",
"ST2": "StdDev",
"ST3": "Min",
"ST4": "Max",
"ST5": "Disp Ref",
"ST6": "Clr Ref",
"ST7": "Disp Evts",
"ST8": "Set Ref",
"ST9": "Disp All",
"SS1": "Sample Size = 1",
"SS2": "Sample Size = 100",
"SS3": "Sample Size = 1k",
"SS4": "Sample Size = 10k",
"SS5": "Sample Size = 100k",
"MD1": "FP Rate",
"MD2": "Hold until MR",
"MD3": "Fast",
"MD4": "Fast + SRQ",
"IN1": "Input: Start+Stop",
"IN2": "Input: Stop+Stop",
"IN3": "Input: Start+Start",
"IN4": "Input: Stop+Start",
"SA1": "Start Pos",
"SA2": "Start Neg",
"SO1": "Stop Pos",
"SO2": "Stop Neg",
"SE1": "Arm Pos",
"SE2": "Arm Neg",
"AR1": "+T.I. Arming Only",
"AR2": "+T.I. Arming",
"EH0": "Ext Holdoff dis",
"EH1": "Ext Holdoff ena",
"EA0": "Ext Arm dis",
"EA1": "Ext Arm ena",
"IA1": "Internal Arm Auto",
"IA2": "Start Chan Arm",
"IA3": "Stop Chan Arm",
"MR": "Manual Rate",
"MI": "Manual Input",
"SL": "Slope Local",
"SR": "Slope Remote",
"TL": "Trigger Local",
"TR": "Trigger Remote",
"TE": "Teach",
"PC": "Period Complement",
"TB0": "Ascii",
"TB1": "Binary",
"SB": "Sample Size Binary",
"LN": "Learn",
"TA": "Trigger Start",
"TO": "Trigger Stop",
}
#######################################################################
# HP5370B uses its own (weird|smart) floating point format.
#
# As far as I can tell, it looks like this: S{1}M{47}E{8} where the
# exponent is 2's complement. But there are two scaling factors
# involved, so the value is: (S * M{31.16} * 2^e * 5e-9)
#
# XXX: Hmm, the mantissa may be a 32.16 2' complement number...
#
def float_render(p, a):
x = p.m.rd(a + 0)
if x & 0x80:
s = -1
x ^= 0x80
else:
s = 1
m = math.ldexp(x, 24)
m += math.ldexp(p.m.rd(a + 1), 16)
m += math.ldexp(p.m.rd(a + 2), 8)
m += math.ldexp(p.m.rd(a + 3), 0)
m += math.ldexp(p.m.rd(a + 4), -8)
m += math.ldexp(p.m.rd(a + 5), -16)
e = p.m.s8(a + 6)
v = math.ldexp(m * 5e-9, e)
x = "%.9e" % v
if x.find(".") == -1 and x.find("e") == -1:
x = x + "."
print("FLOAT", "%x" % a, x)
return x
class float(tree.tree):
def __init__(self, p, adr):
tree.tree.__init__(self, adr, adr + 7, "dot_float")
p.t.add(adr, adr + 7, "dot-float", True, self)
self.render = self.rfunc
self.nbr = float_render(p, adr)
self.a['const'] = "FP=" + self.nbr
def rfunc(self, p, t):
s = ".FLOAT\t%s" % self.nbr
return (s,)
###########################################################
def dsp_dispatch(p, cpu, adr = 0x683b):
assert p.m.rd(adr) == 0xce
assert p.m.rd(adr + 3) == 0xbd
tbl = p.m.b16(adr + 1)
ii = cpu.disass(adr + 3)
p.setlabel(tbl, "DSP_FUNC_TABLE")
x=p.t.add(tbl, tbl + 8 * 2, "tbl")
x.blockcmt += "-\nTable of display functions\n"
dspf= ("AVG", "STD", "MIN", "MAX", "REF", "EVT", "DS6", "ALL")
j=0
for i in range(tbl, tbl + 8 * 2, 2):
x = const.ptr(p, i, 2)
w = p.m.b16(i)
p.setlabel(w, "DSP_" + dspf[j])
ii.flow("call", "DSPFUNC", w)
cpu.disass(w)
j += 1
###########################################################
#x = p.t.add(0x6f00,0x7000, "tbl")
#x.blockcmt += "Table of I^2>>8\n"
def sqare_table_render(p, t):
return (
"FOR I (0..255):",
" .BYTE ((I * I) >> 8)",
""
)
def square_table(p, adr = 0x6f00):
x = p.t.add(0x6f00,0x7000, "tbl")
x.blockcmt += "-\nTable of I^2>>8\n"
x.render = sqare_table_render
x.fold = True
| [
"phk@FreeBSD.org"
] | phk@FreeBSD.org |
7eb892b540bec24047ab9b270b2878817367efbe | a0947c2778742aec26b1c0600ceca17df42326cd | /Python/PythonInADay2/CSV-Files-Drill/37of79-76.py | 5d3b516908848e56823b4441139499a3dc51e4cb | [] | no_license | JohnCDunn/Course-Work-TTA | 5758319d4607114914ba9723328658bed8fb2024 | 8c4f60d51007dac2ac4cceb84b0f9666e143c0d7 | refs/heads/master | 2021-01-10T16:37:02.609879 | 2016-02-01T18:05:38 | 2016-02-01T18:05:38 | 49,983,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 982 | py | import wx
class Frame(wx.Frame):
def __init__(self, title):
wx.Frame.__init__(self, None,\
title=title, size=(300,200))
panel = wx.Panel(self)
button = wx.Button(panel,label="Exit",size=(100,40),pos=(100,30))
# Bind button event to the function self.exit
button.Bind(wx.EVT_BUTTON, self.exit)
# Create menu bar
menuBar = wx.MenuBar()
# Create wx menus
fileMenu = wx.Menu()
editMenu = wx.Menu()
# Add items to fileMenu
fileMenu.Append(wx.NewId(), "New File")
fileMenu.Append(wx.NewId(), "Open")
fileMenu.Append(wx.NewId(), "Exit")
# Add fileMenu and editMenu to menuBar
menuBar.Append(fileMenu, "File")
menuBar.Append(editMenu, "Edit")
self.SetMenuBar(menuBar)
def exit(self, event):
self.Destroy()
app = wx.App()
frame = Frame("Python GUI")
frame.Show()
app.MainLoop()
| [
"JohnClydeDunn@Gmail.com"
] | JohnClydeDunn@Gmail.com |
158f9e632271af09efccef3413b918b3039ae34d | 5e80f0b1af9fbf9dc774dbb68aa603574e4ae0ba | /algorithm-study/baekjun/1655.py | 5e0d2afac622a64349c666e62c305e6b93e0a95a | [] | no_license | namujinju/study-note | 4271b4248b3c4ac1b96ef1da484d86569a030762 | 790b21e5318a326e434dc836f5f678a608037a8c | refs/heads/master | 2023-02-04T13:25:55.418896 | 2020-12-26T10:47:11 | 2020-12-26T10:47:11 | 275,279,138 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | import heapq as hq
import sys
n = int(input())
arr = []
min_heap = []
for _ in range(n):
answer = []
i = int(sys.stdin.readline())
arr.append(i)
for j in arr:
hq.heappush(min_heap, j)
for j in range(len(min_heap)):
answer.append(hq.heappop(min_heap))
print(answer[(len(answer)-1)//2])
| [
"59328810+namujinju@users.noreply.github.com"
] | 59328810+namujinju@users.noreply.github.com |
de801108249a6a022a7033c65c5566e5adc89ccf | 508a70d9f57fe5143b5414742974cb2048fbe735 | /python/ccxt/async_support/cryptocom.py | e34586049e8efed5dac4b316f067f3175f2d46ed | [
"MIT"
] | permissive | block-crafters/ccxt | 928868fedd472e1ccd439288c003007f9f79a8f5 | 3ff65cdfb6f42dd3e6b55c65c030470ae12c7588 | refs/heads/master | 2022-11-13T02:31:10.878712 | 2022-11-07T11:28:58 | 2022-11-07T11:28:58 | 236,698,601 | 0 | 0 | MIT | 2021-03-05T09:03:39 | 2020-01-28T09:24:54 | JavaScript | UTF-8 | Python | false | false | 100,779 | py | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import asyncio
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import AccountNotEnabled
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import NotSupported
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import TICK_SIZE
from ccxt.base.precise import Precise
class cryptocom(Exchange):
def describe(self):
return self.deep_extend(super(cryptocom, self).describe(), {
'id': 'cryptocom',
'name': 'Crypto.com',
'countries': ['MT'],
'version': 'v2',
'rateLimit': 10, # 100 requests per second
'pro': True,
'has': {
'CORS': False,
'spot': True,
'margin': True,
'swap': None, # has but not fully implemented
'future': None, # has but not fully implemented
'option': None,
'borrowMargin': True,
'cancelAllOrders': True,
'cancelOrder': True,
'createOrder': True,
'fetchBalance': True,
'fetchBidsAsks': False,
'fetchBorrowInterest': True,
'fetchBorrowRate': False,
'fetchBorrowRateHistories': False,
'fetchBorrowRateHistory': False,
'fetchBorrowRates': True,
'fetchClosedOrders': 'emulated',
'fetchCurrencies': False,
'fetchDepositAddress': True,
'fetchDepositAddressesByNetwork': True,
'fetchDeposits': True,
'fetchFundingHistory': False,
'fetchFundingRate': False,
'fetchFundingRates': False,
'fetchMarginMode': False,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchPositionMode': False,
'fetchPositions': False,
'fetchStatus': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': False,
'fetchTrades': True,
'fetchTradingFee': False,
'fetchTradingFees': False,
'fetchTransactionFees': False,
'fetchTransactions': False,
'fetchTransfers': True,
'fetchWithdrawals': True,
'repayMargin': True,
'setLeverage': False,
'setMarginMode': False,
'transfer': True,
'withdraw': True,
},
'timeframes': {
'1m': '1m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'4h': '4h',
'6h': '6h',
'12h': '12h',
'1d': '1D',
'1w': '7D',
'2w': '14D',
'1M': '1M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/147792121-38ed5e36-c229-48d6-b49a-48d05fc19ed4.jpeg',
'test': 'https://uat-api.3ona.co/v2',
'api': {
'spot': 'https://api.crypto.com/v2',
'derivatives': 'https://deriv-api.crypto.com/v1',
},
'www': 'https://crypto.com/',
'referral': 'https://crypto.com/exch/5835vstech',
'doc': 'https://exchange-docs.crypto.com/',
'fees': 'https://crypto.com/exchange/document/fees-limits',
},
'api': {
'spot': {
'public': {
'get': {
'public/auth': 1,
'public/get-instruments': 1,
'public/get-book': 1,
'public/get-candlestick': 1,
'public/get-ticker': 1,
'public/get-trades': 1,
'public/margin/get-transfer-currencies': 1,
'public/margin/get-load-currenices': 1,
'public/respond-heartbeat': 1,
},
},
'private': {
'post': {
'private/set-cancel-on-disconnect': 10 / 3,
'private/get-cancel-on-disconnect': 10 / 3,
'private/create-withdrawal': 10 / 3,
'private/get-withdrawal-history': 10 / 3,
'private/get-currency-networks': 10 / 3,
'private/get-deposit-history': 10 / 3,
'private/get-deposit-address': 10 / 3,
'private/get-account-summary': 10 / 3,
'private/create-order': 2 / 3,
'private/cancel-order': 2 / 3,
'private/cancel-all-orders': 2 / 3,
'private/get-order-history': 10 / 3,
'private/get-open-orders': 10 / 3,
'private/get-order-detail': 1 / 3,
'private/get-trades': 100,
'private/margin/get-user-config': 10 / 3,
'private/margin/get-account-summary': 10 / 3,
'private/margin/transfer': 10 / 3,
'private/margin/borrow': 10 / 3,
'private/margin/repay': 10 / 3,
'private/margin/get-transfer-history': 10 / 3,
'private/margin/get-borrow-history': 10 / 3,
'private/margin/get-interest-history': 10 / 3,
'private/margin/get-repay-history': 10 / 3,
'private/margin/get-liquidation-history': 10 / 3,
'private/margin/get-liquidation-orders': 10 / 3,
'private/margin/create-order': 2 / 3,
'private/margin/cancel-order': 2 / 3,
'private/margin/cancel-all-orders': 2 / 3,
'private/margin/get-order-history': 10 / 3,
'private/margin/get-open-orders': 10 / 3,
'private/margin/get-order-detail': 1 / 3,
'private/margin/get-trades': 100,
'private/deriv/transfer': 10 / 3,
'private/deriv/get-transfer-history': 10 / 3,
'private/subaccount/get-sub-accounts': 10 / 3,
'private/subaccount/get-transfer-history': 10 / 3,
'private/subaccount/transfer': 10 / 3,
'private/otc/get-otc-user': 10 / 3,
'private/otc/get-instruments': 10 / 3,
'private/otc/request-quote': 100,
'private/otc/accept-quote': 100,
'private/otc/get-quote-history': 10 / 3,
'private/otc/get-trade-history': 10 / 3,
},
},
},
'derivatives': {
'public': {
'get': {
'public/auth': 10 / 3,
'public/get-instruments': 10 / 3,
'public/get-book': 1,
'public/get-candlestick': 1,
'public/get-trades': 1,
'public/get-tickers': 1,
'public/get-valuations': 1,
'public/get-expired-settlement-price': 10 / 3,
'public/get-insurance': 1,
},
},
'private': {
'post': {
'private/set-cancel-on-disconnect': 10 / 3,
'private/get-cancel-on-disconnect': 10 / 3,
'private/user-balance': 10 / 3,
'private/user-balance-history': 10 / 3,
'private/get-positions': 10 / 3,
'private/create-order': 2 / 3,
'private/cancel-order': 2 / 3,
'private/cancel-all-orders': 2 / 3,
'private/close-position': 10 / 3,
'private/convert-collateral': 10 / 3,
'private/get-order-history': 100,
'private/get-open-orders': 10 / 3,
'private/get-order-detail': 1 / 3,
'private/get-trades': 100,
'private/change-account-leverage': 10 / 3,
'private/get-transactions': 10 / 3,
},
},
},
},
'fees': {
'trading': {
'maker': self.parse_number('0.004'),
'taker': self.parse_number('0.004'),
'tiers': {
'maker': [
[self.parse_number('0'), self.parse_number('0.004')],
[self.parse_number('25000'), self.parse_number('0.0035')],
[self.parse_number('50000'), self.parse_number('0.0015')],
[self.parse_number('100000'), self.parse_number('0.001')],
[self.parse_number('250000'), self.parse_number('0.0009')],
[self.parse_number('1000000'), self.parse_number('0.0008')],
[self.parse_number('20000000'), self.parse_number('0.0007')],
[self.parse_number('100000000'), self.parse_number('0.0006')],
[self.parse_number('200000000'), self.parse_number('0.0004')],
],
'taker': [
[self.parse_number('0'), self.parse_number('0.004')],
[self.parse_number('25000'), self.parse_number('0.0035')],
[self.parse_number('50000'), self.parse_number('0.0025')],
[self.parse_number('100000'), self.parse_number('0.0016')],
[self.parse_number('250000'), self.parse_number('0.00015')],
[self.parse_number('1000000'), self.parse_number('0.00014')],
[self.parse_number('20000000'), self.parse_number('0.00013')],
[self.parse_number('100000000'), self.parse_number('0.00012')],
[self.parse_number('200000000'), self.parse_number('0.0001')],
],
},
},
},
'options': {
'defaultType': 'spot',
'accountsById': {
'funding': 'SPOT',
'spot': 'SPOT',
'margin': 'MARGIN',
'derivatives': 'DERIVATIVES',
'swap': 'DERIVATIVES',
'future': 'DERIVATIVES',
},
},
# https://exchange-docs.crypto.com/spot/index.html#response-and-reason-codes
'commonCurrencies': {
'USD_STABLE_COIN': 'USDC',
},
'precisionMode': TICK_SIZE,
'exceptions': {
'exact': {
'10001': ExchangeError,
'10002': PermissionDenied,
'10003': PermissionDenied,
'10004': BadRequest,
'10005': PermissionDenied,
'10006': DDoSProtection,
'10007': InvalidNonce,
'10008': BadRequest,
'10009': BadRequest,
'20001': BadRequest,
'20002': InsufficientFunds,
'20005': AccountNotEnabled, # {"id":"123xxx","method":"private/margin/xxx","code":"20005","message":"ACCOUNT_NOT_FOUND"}
'30003': BadSymbol,
'30004': BadRequest,
'30005': BadRequest,
'30006': BadRequest,
'30007': BadRequest,
'30008': BadRequest,
'30009': BadRequest,
'30010': BadRequest,
'30013': BadRequest,
'30014': BadRequest,
'30016': BadRequest,
'30017': BadRequest,
'30023': BadRequest,
'30024': BadRequest,
'30025': BadRequest,
'40001': BadRequest,
'40002': BadRequest,
'40003': BadRequest,
'40004': BadRequest,
'40005': BadRequest,
'40006': BadRequest,
'40007': BadRequest,
'40101': AuthenticationError,
'50001': BadRequest,
'9010001': OnMaintenance, # {"code":9010001,"message":"SYSTEM_MAINTENANCE","details":"Crypto.com Exchange is currently under maintenance. Please refer to https://status.crypto.com for more details."}
},
},
})
async def fetch_markets(self, params={}):
"""
see https://exchange-docs.crypto.com/spot/index.html#public-get-instruments
see https://exchange-docs.crypto.com/derivatives/index.html#public-get-instruments
retrieves data on all markets for cryptocom
:param dict params: extra parameters specific to the exchange api endpoint
:returns [dict]: an array of objects representing market data
"""
promises = [self.fetch_spot_markets(params), self.fetch_derivatives_markets(params)]
promises = await asyncio.gather(*promises)
spotMarkets = promises[0]
derivativeMarkets = promises[1]
markets = self.array_concat(spotMarkets, derivativeMarkets)
return markets
async def fetch_spot_markets(self, params={}):
response = await self.spotPublicGetPublicGetInstruments(params)
#
# {
# id: 11,
# method: 'public/get-instruments',
# code: 0,
# result: {
# 'instruments': [
# {
# instrument_name: 'NEAR_BTC',
# quote_currency: 'BTC',
# base_currency: 'NEAR',
# price_decimals: '8',
# quantity_decimals: '2',
# margin_trading_enabled: True,
# margin_trading_enabled_5x: True,
# margin_trading_enabled_10x: True,
# max_quantity: '100000000',
# min_quantity: '0.01'
# },
# ]
# }
# }
#
resultResponse = self.safe_value(response, 'result', {})
markets = self.safe_value(resultResponse, 'instruments', [])
result = []
for i in range(0, len(markets)):
market = markets[i]
id = self.safe_string(market, 'instrument_name')
baseId = self.safe_string(market, 'base_currency')
quoteId = self.safe_string(market, 'quote_currency')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
priceDecimals = self.safe_string(market, 'price_decimals')
minPrice = self.parse_precision(priceDecimals)
minQuantity = self.safe_string(market, 'min_quantity')
maxLeverage = self.parse_number('1')
margin_trading_enabled_5x = self.safe_value(market, 'margin_trading_enabled_5x')
if margin_trading_enabled_5x:
maxLeverage = self.parse_number('5')
margin_trading_enabled_10x = self.safe_value(market, 'margin_trading_enabled_10x')
if margin_trading_enabled_10x:
maxLeverage = self.parse_number('10')
result.append({
'id': id,
'symbol': base + '/' + quote,
'base': base,
'quote': quote,
'settle': None,
'baseId': baseId,
'quoteId': quoteId,
'settleId': None,
'type': 'spot',
'spot': True,
'margin': self.safe_value(market, 'margin_trading_enabled'),
'swap': False,
'future': False,
'option': False,
'active': None,
'contract': False,
'linear': None,
'inverse': None,
'contractSize': None,
'expiry': None,
'expiryDatetime': None,
'strike': None,
'optionType': None,
'precision': {
'amount': self.parse_number(self.parse_precision(self.safe_string(market, 'quantity_decimals'))),
'price': self.parse_number(self.parse_precision(priceDecimals)),
},
'limits': {
'leverage': {
'min': self.parse_number('1'),
'max': maxLeverage,
},
'amount': {
'min': self.parse_number(minQuantity),
'max': self.safe_number(market, 'max_quantity'),
},
'price': {
'min': self.parse_number(minPrice),
'max': None,
},
'cost': {
'min': self.parse_number(Precise.string_mul(minQuantity, minPrice)),
'max': None,
},
},
'info': market,
})
return result
async def fetch_derivatives_markets(self, params={}):
result = []
futuresResponse = await self.derivativesPublicGetPublicGetInstruments()
#
# {
# id: -1,
# method: 'public/get-instruments',
# code: 0,
# result: {
# data: [
# {
# symbol: '1INCHUSD-PERP',
# inst_type: 'PERPETUAL_SWAP',
# display_name: '1INCHUSD Perpetual',
# base_ccy: '1INCH',
# quote_ccy: 'USD_Stable_Coin',
# quote_decimals: 4,
# quantity_decimals: 0,
# price_tick_size: '0.0001',
# qty_tick_size: '1',
# max_leverage: '50',
# tradable: True,
# expiry_timestamp_ms: 0,
# beta_product: False,
# underlying_symbol: '1INCHUSD-INDEX',
# put_call: 'UNDEFINED',
# strike: '0',
# contract_size: '1'
# },
# ]
# }
# }
#
futuresResult = self.safe_value(futuresResponse, 'result', {})
data = self.safe_value(futuresResult, 'data', [])
for i in range(0, len(data)):
market = data[i]
inst_type = self.safe_string(market, 'inst_type')
swap = inst_type == 'PERPETUAL_SWAP'
future = inst_type == 'FUTURE'
if inst_type == 'CCY_PAIR':
continue # Found some inconsistencies between spot and derivatives api so use spot api for currency pairs.
baseId = self.safe_string(market, 'base_ccy')
quoteId = self.safe_string(market, 'quote_ccy')
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote + ':' + quote
expiry = self.safe_integer(market, 'expiry_timestamp_ms')
if expiry == 0:
expiry = None
type = 'swap'
if future:
type = 'future'
symbol = symbol + '-' + self.yymmdd(expiry)
contractSize = self.safe_number(market, 'contract_size')
result.append({
'id': self.safe_string(market, 'symbol'),
'symbol': symbol,
'base': base,
'quote': quote,
'settle': quote,
'baseId': baseId,
'quoteId': quoteId,
'settleId': quoteId,
'type': type,
'spot': False,
'margin': False,
'swap': swap,
'future': future,
'option': False,
'active': self.safe_value(market, 'tradable'),
'contract': True,
'linear': True,
'inverse': False,
'contractSize': contractSize,
'expiry': expiry,
'expiryDatetime': self.iso8601(expiry),
'strike': None,
'optionType': None,
'precision': {
'price': self.parse_number(self.parse_precision(self.safe_string(market, 'quote_decimals'))),
'amount': self.parse_number(self.parse_precision(self.safe_string(market, 'quantity_decimals'))),
},
'limits': {
'leverage': {
'min': self.parse_number('1'),
'max': self.safe_number(market, 'max_leverage'),
},
'amount': {
'min': self.parse_number(contractSize),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': None,
'max': None,
},
},
'info': market,
})
return result
async def fetch_tickers(self, symbols=None, params={}):
"""
fetches price tickers for multiple markets, statistical calculations with the information calculated over the past 24 hours each market
:param [str]|None symbols: unified symbols of the markets to fetch the ticker for, all market tickers are returned if not assigned
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns dict: an array of `ticker structures <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
symbols = self.market_symbols(symbols)
marketType, query = self.handle_market_type_and_params('fetchTickers', None, params)
method = self.get_supported_mapping(marketType, {
'spot': 'spotPublicGetPublicGetTicker',
'future': 'derivativesPublicGetPublicGetTickers',
'swap': 'derivativesPublicGetPublicGetTickers',
})
response = await getattr(self, method)(query)
#
# {
# "code":0,
# "method":"public/get-ticker",
# "result":{
# "data": [
# {"i":"CRO_BTC","b":0.00000890,"k":0.00001179,"a":0.00001042,"t":1591770793901,"v":14905879.59,"h":0.00,"l":0.00,"c":0.00},
# {"i":"EOS_USDT","b":2.7676,"k":2.7776,"a":2.7693,"t":1591770798500,"v":774.51,"h":0.05,"l":0.05,"c":0.00},
# {"i":"BCH_USDT","b":247.49,"k":251.73,"a":251.67,"t":1591770797601,"v":1.01693,"h":0.01292,"l":0.01231,"c":-0.00047},
# {"i":"ETH_USDT","b":239.92,"k":242.59,"a":240.30,"t":1591770798701,"v":0.97575,"h":0.01236,"l":0.01199,"c":-0.00018},
# {"i":"ETH_CRO","b":2693.11,"k":2699.84,"a":2699.55,"t":1591770795053,"v":95.680,"h":8.218,"l":7.853,"c":-0.050}
# ]
# }
# }
#
result = self.safe_value(response, 'result', {})
data = self.safe_value(result, 'data', [])
return self.parse_tickers(data, symbols)
async def fetch_ticker(self, symbol, params={}):
"""
fetches a price ticker, a statistical calculation with the information calculated over the past 24 hours for a specific market
:param str symbol: unified symbol of the market to fetch the ticker for
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns dict: a `ticker structure <https://docs.ccxt.com/en/latest/manual.html#ticker-structure>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'instrument_name': market['id'],
}
marketType, query = self.handle_market_type_and_params('fetchTicker', market, params)
if marketType != 'spot':
raise NotSupported(self.id + ' fetchTicker() only supports spot markets')
response = await self.spotPublicGetPublicGetTicker(self.extend(request, query))
#
# {
# "id":"-1",
# "method":"public/get-tickers",
# "code":"0",
# "result":{
# "data":[
# {"i":"BTC_USDT", "h":"20567.16", "l":"20341.39", "a":"20394.23", "v":"2236.3762", "vv":"45739074.30", "c":"-0.0036", "b":"20394.01", "k":"20394.02", "t":"1667406085934"}
# ]
# }
#
resultResponse = self.safe_value(response, 'result', {})
data = self.safe_value(resultResponse, 'data', {})
first = self.safe_value(data, 0, {})
return self.parse_ticker(first, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetches information on multiple orders made by the user
:param str symbol: unified market symbol of the market orders were made in
:param int|None since: the earliest time in ms to fetch orders for
:param int|None limit: the maximum number of orde structures to retrieve
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders() requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'instrument_name': market['id'],
}
if since is not None:
# maximum date range is one day
request['start_ts'] = since
if limit is not None:
request['page_size'] = limit
marketType, marketTypeQuery = self.handle_market_type_and_params('fetchOrders', market, params)
method = self.get_supported_mapping(marketType, {
'spot': 'spotPrivatePostPrivateGetOrderHistory',
'margin': 'spotPrivatePostPrivateMarginGetOrderHistory',
'future': 'derivativesPrivatePostPrivateGetOrderHistory',
'swap': 'derivativesPrivatePostPrivateGetOrderHistory',
})
marginMode, query = self.custom_handle_margin_mode_and_params('fetchOrders', marketTypeQuery)
if marginMode is not None:
method = 'spotPrivatePostPrivateMarginGetOrderHistory'
response = await getattr(self, method)(self.extend(request, query))
#
# spot and margin
# {
# id: 1641026542065,
# method: 'private/get-order-history',
# code: 0,
# result: {
# order_list: [
# {
# status: 'FILLED',
# side: 'BUY',
# price: 0,
# quantity: 110,
# order_id: '2120246337927715937',
# client_oid: '',
# create_time: 1641025064904,
# update_time: 1641025064958,
# type: 'MARKET',
# instrument_name: 'USDC_USDT',
# avg_price: 1.0001,
# cumulative_quantity: 110,
# cumulative_value: 110.011,
# fee_currency: 'USDC',
# exec_inst: '',
# time_in_force: 'GOOD_TILL_CANCEL'
# }
# ]
# }
# }
#
# swap
# {
# id: 1641026373106,
# method: 'private/get-order-history',
# code: 0,
# result: {
# data: [
# {
# account_id: '85ff689a-7508-4b96-aa79-dc0545d6e637',
# order_id: 13191401932,
# client_oid: '1641025941461',
# order_type: 'LIMIT',
# time_in_force: 'GOOD_TILL_CANCEL',
# side: 'BUY',
# exec_inst: [],
# quantity: '0.0001',
# limit_price: '48000.0',
# order_value: '4.80000000',
# maker_fee_rate: '0.00050',
# taker_fee_rate: '0.00070',
# avg_price: '47253.5',
# trigger_price: '0.0',
# ref_price_type: 'NULL_VAL',
# cumulative_quantity: '0.0001',
# cumulative_value: '4.72535000',
# cumulative_fee: '0.00330775',
# status: 'FILLED',
# update_user_id: 'ce075bef-b600-4277-bd6e-ff9007251e63',
# order_date: '2022-01-01',
# instrument_name: 'BTCUSD-PERP',
# fee_instrument_name: 'USD_Stable_Coin',
# create_time: 1641025941827,
# create_time_ns: '1641025941827994756',
# update_time: 1641025941827
# }
# ]
# }
# }
#
data = self.safe_value(response, 'result', {})
orderList = self.safe_value_2(data, 'order_list', 'data', [])
return self.parse_orders(orderList, market, since, limit)
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
"""
get the list of most recent trades for a particular symbol
:param str symbol: unified symbol of the market to fetch trades for
:param int|None since: timestamp in ms of the earliest trade to fetch
:param int|None limit: the maximum amount of trades to fetch
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html?#public-trades>`
"""
await self.load_markets()
market = self.market(symbol)
request = {
'instrument_name': market['id'],
}
if since is not None:
# maximum date range is one day
request['start_ts'] = since
if limit is not None:
request['page_size'] = limit
marketType, query = self.handle_market_type_and_params('fetchTrades', market, params)
method = self.get_supported_mapping(marketType, {
'spot': 'spotPublicGetPublicGetTrades',
'future': 'derivativesPublicGetPublicGetTrades',
'swap': 'derivativesPublicGetPublicGetTrades',
})
response = await getattr(self, method)(self.extend(request, query))
# {
# "code":0,
# "method":"public/get-trades",
# "result": {
# "instrument_name": "BTC_USDT",
# "data:": [
# {"dataTime":1591710781947,"d":465533583799589409,"s":"BUY","p":2.96,"q":16.0,"t":1591710781946,"i":"ICX_CRO"},
# {"dataTime":1591707701899,"d":465430234542863152,"s":"BUY","p":0.007749,"q":115.0,"t":1591707701898,"i":"VET_USDT"},
# {"dataTime":1591710786155,"d":465533724976458209,"s":"SELL","p":25.676,"q":0.55,"t":1591710786154,"i":"XTZ_CRO"},
# {"dataTime":1591710783300,"d":465533629172286576,"s":"SELL","p":2.9016,"q":0.6,"t":1591710783298,"i":"XTZ_USDT"},
# {"dataTime":1591710784499,"d":465533669425626384,"s":"SELL","p":2.7662,"q":0.58,"t":1591710784498,"i":"EOS_USDT"},
# {"dataTime":1591710784700,"d":465533676120104336,"s":"SELL","p":243.21,"q":0.01647,"t":1591710784698,"i":"ETH_USDT"},
# {"dataTime":1591710786600,"d":465533739878620208,"s":"SELL","p":253.06,"q":0.00516,"t":1591710786598,"i":"BCH_USDT"},
# {"dataTime":1591710786900,"d":465533749959572464,"s":"BUY","p":0.9999,"q":0.2,"t":1591710786898,"i":"USDC_USDT"},
# {"dataTime":1591710787500,"d":465533770081010000,"s":"BUY","p":3.159,"q":1.65,"t":1591710787498,"i":"ATOM_USDT"},
# ]
# }
# }
resultResponse = self.safe_value(response, 'result', {})
data = self.safe_value(resultResponse, 'data', [])
return self.parse_trades(data, market, since, limit)
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
"""
fetches historical candlestick data containing the open, high, low, and close price, and the volume of a market
:param str symbol: unified symbol of the market to fetch OHLCV data for
:param str timeframe: the length of time each candle represents
:param int|None since: timestamp in ms of the earliest candle to fetch
:param int|None limit: the maximum amount of candles to fetch
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns [[int]]: A list of candles ordered as timestamp, open, high, low, close, volume
"""
await self.load_markets()
market = self.market(symbol)
request = {
'instrument_name': market['id'],
'timeframe': self.timeframes[timeframe],
}
marketType, query = self.handle_market_type_and_params('fetchOHLCV', market, params)
method = self.get_supported_mapping(marketType, {
'spot': 'spotPublicGetPublicGetCandlestick',
'future': 'derivativesPublicGetPublicGetCandlestick',
'swap': 'derivativesPublicGetPublicGetCandlestick',
})
response = await getattr(self, method)(self.extend(request, query))
# {
# "code":0,
# "method":"public/get-candlestick",
# "result":{
# "instrument_name":"BTC_USDT",
# "interval":"5m",
# "data":[
# {"t":1596944700000,"o":11752.38,"h":11754.77,"l":11746.65,"c":11753.64,"v":3.694583},
# {"t":1596945000000,"o":11753.63,"h":11754.77,"l":11739.83,"c":11746.17,"v":2.073019},
# {"t":1596945300000,"o":11746.16,"h":11753.24,"l":11738.1,"c":11740.65,"v":0.867247},
# ...
# ]
# }
# }
resultResponse = self.safe_value(response, 'result', {})
data = self.safe_value(resultResponse, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
async def fetch_order_book(self, symbol, limit=None, params={}):
"""
fetches information on open orders with bid(buy) and ask(sell) prices, volumes and other data
:param str symbol: unified symbol of the market to fetch the order book for
:param int|None limit: the maximum amount of order book entries to return
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns dict: A dictionary of `order book structures <https://docs.ccxt.com/en/latest/manual.html#order-book-structure>` indexed by market symbols
"""
await self.load_markets()
market = self.market(symbol)
request = {
'instrument_name': market['id'],
}
if limit:
request['depth'] = limit
marketType, query = self.handle_market_type_and_params('fetchOrderBook', market, params)
method = self.get_supported_mapping(marketType, {
'spot': 'spotPublicGetPublicGetBook',
'future': 'derivativesPublicGetPublicGetBook',
'swap': 'derivativesPublicGetPublicGetBook',
})
response = await getattr(self, method)(self.extend(request, query))
# {
# "code":0,
# "method":"public/get-book",
# "result":{
# "bids":[[9668.44,0.006325,1.0],[9659.75,0.006776,1.0],[9653.14,0.011795,1.0],[9647.13,0.019434,1.0],[9634.62,0.013765,1.0],[9633.81,0.021395,1.0],[9628.46,0.037834,1.0],[9627.6,0.020909,1.0],[9621.51,0.026235,1.0],[9620.83,0.026701,1.0]],
# "asks":[[9697.0,0.68251,1.0],[9697.6,1.722864,2.0],[9699.2,1.664177,2.0],[9700.8,1.824953,2.0],[9702.4,0.85778,1.0],[9704.0,0.935792,1.0],[9713.32,0.002926,1.0],[9716.42,0.78923,1.0],[9732.19,0.00645,1.0],[9737.88,0.020216,1.0]],
# "t":1591704180270
# }
# }
result = self.safe_value(response, 'result')
data = self.safe_value(result, 'data')
orderBook = self.safe_value(data, 0)
timestamp = self.safe_integer(orderBook, 't')
return self.parse_order_book(orderBook, symbol, timestamp)
def parse_swap_balance(self, response):
responseResult = self.safe_value(response, 'result', {})
data = self.safe_value(responseResult, 'data', [])
result = {'info': response}
for i in range(0, len(data)):
balance = data[i]
currencyId = self.safe_string(balance, 'instrument_name')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'total_cash_balance')
account['free'] = self.safe_string(balance, 'total_available_balance')
result[code] = account
return self.safe_balance(result)
def parse_spot_balance(self, response):
data = self.safe_value(response, 'result', {})
coinList = self.safe_value(data, 'accounts', [])
result = {'info': response}
for i in range(0, len(coinList)):
balance = coinList[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = self.account()
account['total'] = self.safe_string(balance, 'balance')
account['free'] = self.safe_string(balance, 'available')
account['used'] = self.safe_string(balance, 'order')
result[code] = account
return self.safe_balance(result)
async def fetch_balance(self, params={}):
"""
query for balance and get the amount of funds available for trading or funds locked in orders
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns dict: a `balance structure <https://docs.ccxt.com/en/latest/manual.html?#balance-structure>`
"""
await self.load_markets()
marketType, marketTypeQuery = self.handle_market_type_and_params('fetchBalance', None, params)
method = self.get_supported_mapping(marketType, {
'spot': 'spotPrivatePostPrivateGetAccountSummary',
'margin': 'spotPrivatePostPrivateMarginGetAccountSummary',
'future': 'derivativesPrivatePostPrivateUserBalance',
'swap': 'derivativesPrivatePostPrivateUserBalance',
})
marginMode, query = self.custom_handle_margin_mode_and_params('fetchBalance', marketTypeQuery)
if marginMode is not None:
method = 'spotPrivatePostPrivateMarginGetAccountSummary'
response = await getattr(self, method)(query)
# spot
# {
# "id": 11,
# "method": "private/get-account-summary",
# "code": 0,
# "result": {
# "accounts": [
# {
# "balance": 99999999.905000000000000000,
# "available": 99999996.905000000000000000,
# "order": 3.000000000000000000,
# "stake": 0,
# "currency": "CRO"
# }
# ]
# }
# }
#
# margin
# {
# "id": 1656529728178,
# "method": "private/margin/get-account-summary",
# "code": 0,
# "result": {
# "accounts": [
# {
# "balance": 0,
# "available": 0,
# "order": 0,
# "borrowed": 0,
# "position": 0,
# "positionHomeCurrency": 0,
# "positionBtc": 0,
# "lastPriceHomeCurrency": 20111.38,
# "lastPriceBtc": 1,
# "currency": "BTC",
# "accrued_interest": 0,
# "liquidation_price": 0
# },
# ],
# "is_liquidating": False,
# "total_balance": 16,
# "total_balance_btc": 0.00079556,
# "equity_value": 16,
# "equity_value_btc": 0.00079556,
# "total_borrowed": 0,
# "total_borrowed_btc": 0,
# "total_accrued_interest": 0,
# "total_accrued_interest_btc": 0,
# "margin_score": "GOOD",
# "currency": "USDT"
# }
# }
#
# swap
# {
# "id" : 1641025392400,
# "method" : "private/user-balance",
# "code" : 0,
# "result" : {
# "data" : [{
# "total_available_balance" : "109.56000000",
# "total_margin_balance" : "109.56000000",
# "total_initial_margin" : "0.00000000",
# "total_maintenance_margin" : "0.00000000",
# "total_position_cost" : "0.00000000",
# "total_cash_balance" : "109.56000000",
# "total_collateral_value" : "109.56000000",
# "total_session_unrealized_pnl" : "0.00000000",
# "instrument_name" : "USD_Stable_Coin",
# "total_session_realized_pnl" : "0.00000000",
# "position_balances" : [{
# "quantity" : "109.56000000",
# "collateral_weight" : "1.000000",
# "collateral_amount" : "109.56000000",
# "market_value" : "109.56000000",
# "max_withdrawal_balance" : "109.56000000",
# "instrument_name" : "USD_Stable_Coin"
# }],
# "total_effective_leverage" : "0.000000",
# "position_limit" : "3000000.00000000",
# "used_position_limit" : "0.00000000",
# "is_liquidating" : False
# }]
# }
# }
#
parser = self.get_supported_mapping(marketType, {
'spot': 'parseSpotBalance',
'margin': 'parseSpotBalance',
'future': 'parseSwapBalance',
'swap': 'parseSwapBalance',
})
return getattr(self, parser)(response)
async def fetch_order(self, id, symbol=None, params={}):
"""
fetches information on an order made by the user
:param str|None symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {}
marketType, marketTypeQuery = self.handle_market_type_and_params('fetchOrder', market, params)
marginMode, query = self.custom_handle_margin_mode_and_params('fetchOrder', marketTypeQuery)
if (marketType == 'spot') or (marketType == 'margin') or (marginMode is not None):
request['order_id'] = str(id)
else:
request['order_id'] = int(id)
method = self.get_supported_mapping(marketType, {
'spot': 'spotPrivatePostPrivateGetOrderDetail',
'margin': 'spotPrivatePostPrivateMarginGetOrderDetail',
'future': 'derivativesPrivatePostPrivateGetOrderDetail',
'swap': 'derivativesPrivatePostPrivateGetOrderDetail',
})
if marginMode is not None:
method = 'spotPrivatePostPrivateMarginGetOrderDetail'
response = await getattr(self, method)(self.extend(request, query))
# {
# "id": 11,
# "method": "private/get-order-detail",
# "code": 0,
# "result": {
# "trade_list": [
# {
# "side": "BUY",
# "instrument_name": "ETH_CRO",
# "fee": 0.007,
# "trade_id": "371303044218155296",
# "create_time": 1588902493045,
# "traded_price": 7,
# "traded_quantity": 7,
# "fee_currency": "CRO",
# "order_id": "371302913889488619"
# }
# ],
# "order_info": {
# "status": "FILLED",
# "side": "BUY",
# "order_id": "371302913889488619",
# "client_oid": "9_yMYJDNEeqHxLqtD_2j3g",
# "create_time": 1588902489144,
# "update_time": 1588902493024,
# "type": "LIMIT",
# "instrument_name": "ETH_CRO",
# "cumulative_quantity": 7,
# "cumulative_value": 7,
# "avg_price": 7,
# "fee_currency": "CRO",
# "time_in_force": "GOOD_TILL_CANCEL",
# "exec_inst": "POST_ONLY"
# }
# }
# }
result = self.safe_value(response, 'result', {})
order = self.safe_value(result, 'order_info', result)
return self.parse_order(order, market)
async def create_order(self, symbol, type, side, amount, price=None, params={}):
"""
create a trade order
:param str symbol: unified symbol of the market to create an order in
:param str type: 'market' or 'limit'
:param str side: 'buy' or 'sell'
:param float amount: how much of currency you want to trade in units of base currency
:param float|None price: the price at which the order is to be fullfilled, in units of the quote currency, ignored in market orders
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns dict: an `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = self.market(symbol)
uppercaseType = type.upper()
request = {
'instrument_name': market['id'],
'side': side.upper(),
'type': uppercaseType,
'quantity': self.amount_to_precision(symbol, amount),
}
if (uppercaseType == 'LIMIT') or (uppercaseType == 'STOP_LIMIT'):
request['price'] = self.price_to_precision(symbol, price)
postOnly = self.safe_value(params, 'postOnly', False)
if postOnly:
request['exec_inst'] = 'POST_ONLY'
params = self.omit(params, ['postOnly'])
marketType, marketTypeQuery = self.handle_market_type_and_params('createOrder', market, params)
method = self.get_supported_mapping(marketType, {
'spot': 'spotPrivatePostPrivateCreateOrder',
'margin': 'spotPrivatePostPrivateMarginCreateOrder',
'future': 'derivativesPrivatePostPrivateCreateOrder',
'swap': 'derivativesPrivatePostPrivateCreateOrder',
})
marginMode, query = self.custom_handle_margin_mode_and_params('createOrder', marketTypeQuery)
if marginMode is not None:
method = 'spotPrivatePostPrivateMarginCreateOrder'
response = await getattr(self, method)(self.extend(request, query))
# {
# "id": 11,
# "method": "private/create-order",
# "result": {
# "order_id": "337843775021233500",
# "client_oid": "my_order_0002"
# }
# }
result = self.safe_value(response, 'result', {})
return self.parse_order(result, market)
async def cancel_all_orders(self, symbol=None, params={}):
"""
cancel all open orders
:param str|None symbol: unified market symbol, only orders in the market of self symbol are cancelled when symbol is not None
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {}
marketType, marketTypeQuery = self.handle_market_type_and_params('cancelAllOrders', market, params)
marginMode, query = self.custom_handle_margin_mode_and_params('cancelAllOrders', marketTypeQuery)
if (marketType == 'spot') or (marketType == 'margin') or (marginMode is not None):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelAllOrders() requires a symbol argument for ' + marketType + ' orders')
request['instrument_name'] = market['id']
method = self.get_supported_mapping(marketType, {
'spot': 'spotPrivatePostPrivateCancelAllOrders',
'margin': 'spotPrivatePostPrivateMarginCancelAllOrders',
'future': 'derivativesPrivatePostPrivateCancelAllOrders',
'swap': 'derivativesPrivatePostPrivateCancelAllOrders',
})
if marginMode is not None:
method = 'spotPrivatePostPrivateMarginCancelAllOrders'
return await getattr(self, method)(self.extend(request, query))
async def cancel_order(self, id, symbol=None, params={}):
"""
cancels an open order
:param str id: order id
:param str|None symbol: unified symbol of the market the order was made in
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns dict: An `order structure <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = None
if symbol is not None:
market = self.market(symbol)
request = {}
marketType, marketTypeQuery = self.handle_market_type_and_params('cancelOrder', market, params)
marginMode, query = self.custom_handle_margin_mode_and_params('cancelOrder', marketTypeQuery)
if (marketType == 'spot') or (marketType == 'margin') or (marginMode is not None):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder() requires a symbol argument for ' + marketType + ' orders')
request['instrument_name'] = market['id']
request['order_id'] = str(id)
else:
request['order_id'] = int(id)
method = self.get_supported_mapping(marketType, {
'spot': 'spotPrivatePostPrivateCancelOrder',
'margin': 'spotPrivatePostPrivateMarginCancelOrder',
'future': 'derivativesPrivatePostPrivateCancelOrder',
'swap': 'derivativesPrivatePostPrivateCancelOrder',
})
if marginMode is not None:
method = 'spotPrivatePostPrivateMarginCancelOrder'
response = await getattr(self, method)(self.extend(request, query))
result = self.safe_value(response, 'result', response)
return self.parse_order(result)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all unfilled currently open orders
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch open orders for
:param int|None limit: the maximum number of open orders structures to retrieve
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns [dict]: a list of `order structures <https://docs.ccxt.com/en/latest/manual.html#order-structure>`
"""
await self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['instrument_name'] = market['id']
if limit is not None:
request['page_size'] = limit
marketType, marketTypeQuery = self.handle_market_type_and_params('fetchOpenOrders', market, params)
method = self.get_supported_mapping(marketType, {
'spot': 'spotPrivatePostPrivateGetOpenOrders',
'margin': 'spotPrivatePostPrivateMarginGetOpenOrders',
'future': 'derivativesPrivatePostPrivateGetOpenOrders',
'swap': 'derivativesPrivatePostPrivateGetOpenOrders',
})
marginMode, query = self.custom_handle_margin_mode_and_params('fetchOpenOrders', marketTypeQuery)
if marginMode is not None:
method = 'spotPrivatePostPrivateMarginGetOpenOrders'
response = await getattr(self, method)(self.extend(request, query))
# {
# "id": 11,
# "method": "private/get-open-orders",
# "code": 0,
# "result": {
# "count": 1177,
# "order_list": [
# {
# "status": "ACTIVE",
# "side": "BUY",
# "price": 1,
# "quantity": 1,
# "order_id": "366543374673423753",
# "client_oid": "my_order_0002",
# "create_time": 1588760643829,
# "update_time": 1588760644292,
# "type": "LIMIT",
# "instrument_name": "ETH_CRO",
# "cumulative_quantity": 0,
# "cumulative_value": 0,
# "avg_price": 0,
# "fee_currency": "CRO",
# "time_in_force": "GOOD_TILL_CANCEL"
# },
# {
# "status": "ACTIVE",
# "side": "BUY",
# "price": 1,
# "quantity": 1,
# "order_id": "366455245775097673",
# "client_oid": "my_order_0002",
# "create_time": 1588758017375,
# "update_time": 1588758017411,
# "type": "LIMIT",
# "instrument_name": "ETH_CRO",
# "cumulative_quantity": 0,
# "cumulative_value": 0,
# "avg_price": 0,
# "fee_currency": "CRO",
# "time_in_force": "GOOD_TILL_CANCEL"
# }
# ]
# }
# }
data = self.safe_value(response, 'result', {})
resultList = self.safe_value_2(data, 'order_list', 'data', [])
return self.parse_orders(resultList, market, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
"""
fetch all trades made by the user
:param str|None symbol: unified market symbol
:param int|None since: the earliest time in ms to fetch trades for
:param int|None limit: the maximum number of trades structures to retrieve
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns [dict]: a list of `trade structures <https://docs.ccxt.com/en/latest/manual.html#trade-structure>`
"""
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['instrument_name'] = market['id']
if since is not None:
# maximum date range is one day
request['start_ts'] = since
endTimestamp = self.sum(since, 24 * 60 * 60 * 1000)
request['end_ts'] = endTimestamp
if limit is not None:
request['page_size'] = limit
marketType, marketTypeQuery = self.handle_market_type_and_params('fetchMyTrades', market, params)
method = self.get_supported_mapping(marketType, {
'spot': 'spotPrivatePostPrivateGetTrades',
'margin': 'spotPrivatePostPrivateMarginGetTrades',
'future': 'derivativesPrivatePostPrivateGetTrades',
'swap': 'derivativesPrivatePostPrivateGetTrades',
})
marginMode, query = self.custom_handle_margin_mode_and_params('fetchMyTrades', marketTypeQuery)
if marginMode is not None:
method = 'spotPrivatePostPrivateMarginGetTrades'
response = await getattr(self, method)(self.extend(request, query))
# {
# "id": 11,
# "method": "private/get-trades",
# "code": 0,
# "result": {
# "trade_list": [
# {
# "side": "SELL",
# "instrument_name": "ETH_CRO",
# "fee": 0.014,
# "trade_id": "367107655537806900",
# "create_time": 1588777459755,
# "traded_price": 7,
# "traded_quantity": 1,
# "fee_currency": "CRO",
# "order_id": "367107623521528450"
# }
# ]
# }
# }
data = self.safe_value(response, 'result', {})
resultList = self.safe_value_2(data, 'trade_list', 'data', [])
return self.parse_trades(resultList, market, since, limit)
def parse_address(self, addressString):
address = None
tag = None
rawTag = None
if addressString.find('?') > 0:
address, rawTag = addressString.split('?')
splitted = rawTag.split('=')
tag = splitted[1]
else:
address = addressString
return [address, tag]
async def withdraw(self, code, amount, address, tag=None, params={}):
"""
make a withdrawal
:param str code: unified currency code
:param float amount: the amount to withdraw
:param str address: the address to withdraw to
:param str|None tag:
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns dict: a `transaction structure <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
tag, params = self.handle_withdraw_tag_and_params(tag, params)
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': amount,
'address': address,
}
if tag is not None:
request['address_tag'] = tag
response = await self.spotPrivatePostPrivateCreateWithdrawal(self.extend(request, params))
#
# {
# "id":-1,
# "method":"private/create-withdrawal",
# "code":0,
# "result": {
# "id": 2220,
# "amount": 1,
# "fee": 0.0004,
# "symbol": "BTC",
# "address": "2NBqqD5GRJ8wHy1PYyCXTe9ke5226FhavBf",
# "client_wid": "my_withdrawal_002",
# "create_time":1607063412000
# }
# }
#
result = self.safe_value(response, 'result')
return self.parse_transaction(result, currency)
async def fetch_deposit_addresses_by_network(self, code, params={}):
"""
fetch a dictionary of addresses for a currency, indexed by network
:param str code: unified currency code of the currency for the deposit address
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns dict: a dictionary of `address structures <https://docs.ccxt.com/en/latest/manual.html#address-structure>` indexed by the network
"""
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.spotPrivatePostPrivateGetDepositAddress(self.extend(request, params))
# {
# "id": 11,
# "method": "private/get-deposit-address",
# "code": 0,
# "result": {
# "deposit_address_list": [
# {
# "currency": "CRO",
# "create_time": 1615886328000,
# "id": "12345",
# "address": "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx",
# "status": "1",
# "network": "CRO"
# },
# {
# "currency": "CRO",
# "create_time": 1615886332000,
# "id": "12346",
# "address": "yyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyyy",
# "status": "1",
# "network": "ETH"
# }
# ]
# }
# }
data = self.safe_value(response, 'result', {})
addresses = self.safe_value(data, 'deposit_address_list', [])
if len(addresses) == 0:
raise ExchangeError(self.id + ' fetchDepositAddressesByNetwork() generating address...')
result = {}
for i in range(0, len(addresses)):
value = self.safe_value(addresses, i)
addressString = self.safe_string(value, 'address')
currencyId = self.safe_string(value, 'currency')
responseCode = self.safe_currency_code(currencyId)
address, tag = self.parse_address(addressString)
self.check_address(address)
networkId = self.safe_string(value, 'network')
network = self.safe_network(networkId)
result[network] = {
'info': value,
'currency': responseCode,
'address': address,
'tag': tag,
'network': network,
}
return result
async def fetch_deposit_address(self, code, params={}):
"""
fetch the deposit address for a currency associated with self account
:param str code: unified currency code
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns dict: an `address structure <https://docs.ccxt.com/en/latest/manual.html#address-structure>`
"""
network = self.safe_string_upper(params, 'network')
params = self.omit(params, ['network'])
depositAddresses = await self.fetch_deposit_addresses_by_network(code, params)
if network in depositAddresses:
return depositAddresses[network]
else:
keys = list(depositAddresses.keys())
return depositAddresses[keys[0]]
def safe_network(self, networkId):
networksById = {
'BTC': 'BTC',
'ETH': 'ETH',
'SOL': 'SOL',
'BNB': 'BNB',
'CRONOS': 'CRONOS',
'MATIC': 'MATIC',
'OP': 'OP',
}
return self.safe_string(networksById, networkId, networkId)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
"""
fetch all deposits made to an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch deposits for
:param int|None limit: the maximum number of deposits structures to retrieve
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
currency = None
request = {}
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if since is not None:
# 90 days date range
request['start_ts'] = since
if limit is not None:
request['page_size'] = limit
response = await self.spotPrivatePostPrivateGetDepositHistory(self.extend(request, params))
# {
# "id": 11,
# "method": "private/get-deposit-history",
# "code": 0,
# "result": {
# "deposit_list": [
# {
# "currency": "XRP",
# "fee": 1.0,
# "create_time": 1607063412000,
# "id": "2220",
# "update_time": 1607063460000,
# "amount": 100,
# "address": "2NBqqD5GRJ8wHy1PYyCXTe9ke5226FhavBf?1234567890",
# "status": "1"
# }
# ]
# }
# }
data = self.safe_value(response, 'result', {})
depositList = self.safe_value(data, 'deposit_list', [])
return self.parse_transactions(depositList, currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
"""
fetch all withdrawals made from an account
:param str|None code: unified currency code
:param int|None since: the earliest time in ms to fetch withdrawals for
:param int|None limit: the maximum number of withdrawals structures to retrieve
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns [dict]: a list of `transaction structures <https://docs.ccxt.com/en/latest/manual.html#transaction-structure>`
"""
await self.load_markets()
currency = None
request = {}
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if since is not None:
# 90 days date range
request['start_ts'] = since
if limit is not None:
request['page_size'] = limit
response = await self.spotPrivatePostPrivateGetWithdrawalHistory(self.extend(request, params))
#
# {
# id: 1640704829096,
# method: 'private/get-withdrawal-history',
# code: 0,
# result: {
# withdrawal_list: [
# {
# currency: 'DOGE',
# client_wid: '',
# fee: 50,
# create_time: 1640425168000,
# id: '3180557',
# update_time: 1640425168000,
# amount: 1102.64092,
# address: 'DDrGGqmp5Ddo1QH9tUvDfoL4u4rqys5975',
# status: '5',
# txid: 'ce23e9e21b6c38eef953070a05110e6dca2fd2bcc76d3381000547b9ff5290b2/0'
# }
# ]
# }
# }
#
data = self.safe_value(response, 'result', {})
withdrawalList = self.safe_value(data, 'withdrawal_list', [])
return self.parse_transactions(withdrawalList, currency, since, limit)
async def transfer(self, code, amount, fromAccount, toAccount, params={}):
"""
transfer currency internally between wallets on the same account
:param str code: unified currency code
:param float amount: amount to transfer
:param str fromAccount: account to transfer from
:param str toAccount: account to transfer to
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns dict: a `transfer structure <https://docs.ccxt.com/en/latest/manual.html#transfer-structure>`
"""
await self.load_markets()
currency = self.currency(code)
fromAccount = fromAccount.lower()
toAccount = toAccount.lower()
accountsById = self.safe_value(self.options, 'accountsById', {})
fromId = self.safe_string(accountsById, fromAccount, fromAccount)
toId = self.safe_string(accountsById, toAccount, toAccount)
request = {
'currency': currency['id'],
'amount': float(amount),
'from': fromId,
'to': toId,
}
method = 'spotPrivatePostPrivateDerivTransfer'
if (fromAccount == 'margin') or (toAccount == 'margin'):
method = 'spotPrivatePostPrivateMarginTransfer'
response = await getattr(self, method)(self.extend(request, params))
#
# {
# "id": 11,
# "method": "private/deriv/transfer",
# "code": 0
# }
#
return self.parse_transfer(response, currency)
async def fetch_transfers(self, code=None, since=None, limit=None, params={}):
"""
fetch a history of internal transfers made on an account
:param str|None code: unified currency code of the currency transferred
:param int|None since: the earliest time in ms to fetch transfers for
:param int|None limit: the maximum number of transfers structures to retrieve
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns [dict]: a list of `transfer structures <https://docs.ccxt.com/en/latest/manual.html#transfer-structure>`
"""
if not ('direction' in params):
raise ArgumentsRequired(self.id + ' fetchTransfers() requires a direction param to be either "IN" or "OUT"')
await self.load_markets()
currency = None
request = {
'direction': 'OUT',
}
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if since is not None:
request['start_ts'] = since
if limit is not None:
request['page_size'] = limit
method = 'spotPrivatePostPrivateDerivGetTransferHistory'
marginMode, query = self.custom_handle_margin_mode_and_params('fetchTransfers', params)
if marginMode is not None:
method = 'spotPrivatePostPrivateMarginGetTransferHistory'
response = await getattr(self, method)(self.extend(request, query))
#
# {
# id: '1641032709328',
# method: 'private/deriv/get-transfer-history',
# code: '0',
# result: {
# transfer_list: [
# {
# direction: 'IN',
# time: '1641025185223',
# amount: '109.56',
# status: 'COMPLETED',
# information: 'From Spot Wallet',
# currency: 'USDC'
# }
# ]
# }
# }
#
transfer = []
transfer.append({
'response': response,
})
return self.parse_transfers(transfer, currency, since, limit, params)
def parse_transfer_status(self, status):
statuses = {
'COMPLETED': 'ok',
'PROCESSING': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_transfer(self, transfer, currency=None):
#
# {
# response: {
# id: '1641032709328',
# method: 'private/deriv/get-transfer-history',
# code: '0',
# result: {
# transfer_list: [
# {
# direction: 'IN',
# time: '1641025185223',
# amount: '109.56',
# status: 'COMPLETED',
# information: 'From Spot Wallet',
# currency: 'USDC'
# }
# ]
# }
# }
# }
#
response = self.safe_value(transfer, 'response', {})
result = self.safe_value(response, 'result', {})
transferList = self.safe_value(result, 'transfer_list', [])
timestamp = None
amount = None
code = None
information = None
status = None
for i in range(0, len(transferList)):
entry = transferList[i]
timestamp = self.safe_integer(entry, 'time')
amount = self.safe_number(entry, 'amount')
currencyId = self.safe_string(entry, 'currency')
code = self.safe_currency_code(currencyId)
information = self.safe_string(entry, 'information')
rawStatus = self.safe_string(entry, 'status')
status = self.parse_transfer_status(rawStatus)
fromAccount = None
toAccount = None
if information is not None:
parts = information.split(' ')
direction = self.safe_string_lower(parts, 0)
method = self.safe_string(response, 'method')
if direction == 'from':
fromAccount = self.safe_string_lower(parts, 1)
if method == 'private/margin/get-transfer-history':
toAccount = 'margin'
else:
toAccount = 'derivative'
elif direction == 'to':
toAccount = self.safe_string_lower(parts, 1)
if method == 'private/margin/get-transfer-history':
fromAccount = 'margin'
else:
fromAccount = 'derivative'
return {
'info': transferList,
'id': self.safe_string(response, 'id'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'currency': code,
'amount': amount,
'fromAccount': fromAccount,
'toAccount': toAccount,
'status': status,
}
def parse_ticker(self, ticker, market=None):
# {
# "i":"CRO_BTC",
# "b":0.00000890,
# "k":0.00001179,
# "a":0.00001042,
# "t":1591770793901,
# "v":14905879.59,
# "h":0.00,
# "l":0.00,
# "c":0.00
# }
timestamp = self.safe_integer(ticker, 't')
marketId = self.safe_string(ticker, 'i')
market = self.safe_market(marketId, market, '_')
symbol = market['symbol']
last = self.safe_string(ticker, 'a')
relativeChange = self.safe_string(ticker, 'c')
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_string(ticker, 'h'),
'low': self.safe_string(ticker, 'l'),
'bid': self.safe_string(ticker, 'b'),
'bidVolume': None,
'ask': self.safe_string(ticker, 'k'),
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': relativeChange,
'percentage': None,
'average': None,
'baseVolume': self.safe_string(ticker, 'v'),
'quoteVolume': None,
'info': ticker,
}, market)
def parse_trade(self, trade, market=None):
#
# public/get-trades
#
# {"dataTime":1591710781947,"d":465533583799589409,"s":"BUY","p":2.96,"q":16.0,"t":1591710781946,"i":"ICX_CRO"},
#
# private/get-trades
#
# {
# "side": "SELL",
# "instrument_name": "ETH_CRO",
# "fee": 0.014,
# "trade_id": "367107655537806900",
# "create_time": 1588777459755,
# "traded_price": 7,
# "traded_quantity": 1,
# "fee_currency": "CRO",
# "order_id": "367107623521528450"
# }
timestamp = self.safe_integer_2(trade, 't', 'create_time')
marketId = self.safe_string_2(trade, 'i', 'instrument_name')
market = self.safe_market(marketId, market, '_')
symbol = market['symbol']
price = self.safe_string_2(trade, 'p', 'traded_price')
amount = self.safe_string_2(trade, 'q', 'traded_quantity')
side = self.safe_string_2(trade, 's', 'side')
if side is not None:
side = side.lower()
id = self.safe_string_2(trade, 'd', 'trade_id')
takerOrMaker = self.safe_string_lower_2(trade, 'liquidity_indicator', 'taker_side')
order = self.safe_string(trade, 'order_id')
fee = None
feeCost = self.safe_string_2(trade, 'fee', 'fees')
if feeCost is not None:
contract = self.safe_value(market, 'contract', False)
if contract:
feeCost = Precise.string_neg(feeCost)
feeCurrency = None
if market['spot']:
feeCurrency = self.safe_string(trade, 'fee_currency')
elif market['linear']:
feeCurrency = market['quote']
fee = {
'currency': feeCurrency,
'cost': feeCost,
}
return self.safe_trade({
'info': trade,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'side': side,
'price': price,
'amount': amount,
'cost': None,
'order': order,
'takerOrMaker': takerOrMaker,
'type': None,
'fee': fee,
}, market)
def parse_ohlcv(self, ohlcv, market=None):
# {"t":1596944700000,"o":11752.38,"h":11754.77,"l":11746.65,"c":11753.64,"v":3.694583}
return [
self.safe_integer(ohlcv, 't'),
self.safe_number(ohlcv, 'o'),
self.safe_number(ohlcv, 'h'),
self.safe_number(ohlcv, 'l'),
self.safe_number(ohlcv, 'c'),
self.safe_number(ohlcv, 'v'),
]
def parse_order_status(self, status):
statuses = {
'ACTIVE': 'open',
'CANCELED': 'canceled',
'FILLED': 'closed',
'REJECTED': 'rejected',
'EXPIRED': 'expired',
}
return self.safe_string(statuses, status, status)
def parse_time_in_force(self, timeInForce):
timeInForces = {
'GOOD_TILL_CANCEL': 'GTC',
'IMMEDIATE_OR_CANCEL': 'IOC',
'FILL_OR_KILL': 'FOK',
}
return self.safe_string(timeInForces, timeInForce, timeInForce)
def parse_order(self, order, market=None):
# {
# "status": "FILLED",
# "side": "BUY",
# "order_id": "371302913889488619",
# "client_oid": "9_yMYJDNEeqHxLqtD_2j3g",
# "create_time": 1588902489144,
# "update_time": 1588902493024,
# "type": "LIMIT",
# "instrument_name": "ETH_CRO",
# "cumulative_quantity": 7,
# "cumulative_value": 7,
# "avg_price": 7,
# "fee_currency": "CRO",
# "time_in_force": "GOOD_TILL_CANCEL",
# "exec_inst": "POST_ONLY"
# }
#
# {
# id: 1641026373106,
# method: 'private/get-order-history',
# code: 0,
# result: {
# data: [
# {
# account_id: '85ff689a-7508-4b96-aa79-dc0545d6e637',
# order_id: 13191401932,
# client_oid: '1641025941461',
# order_type: 'LIMIT',
# time_in_force: 'GOOD_TILL_CANCEL',
# side: 'BUY',
# exec_inst: [],
# quantity: '0.0001',
# limit_price: '48000.0',
# order_value: '4.80000000',
# maker_fee_rate: '0.00050',
# taker_fee_rate: '0.00070',
# avg_price: '47253.5',
# trigger_price: '0.0',
# ref_price_type: 'NULL_VAL',
# cumulative_quantity: '0.0001',
# cumulative_value: '4.72535000',
# cumulative_fee: '0.00330775',
# status: 'FILLED',
# update_user_id: 'ce075bef-b600-4277-bd6e-ff9007251e63',
# order_date: '2022-01-01',
# instrument_name: 'BTCUSD-PERP',
# fee_instrument_name: 'USD_Stable_Coin',
# create_time: 1641025941827,
# create_time_ns: '1641025941827994756',
# update_time: 1641025941827
# }
# ]
# }
# }
#
created = self.safe_integer(order, 'create_time')
updated = self.safe_integer(order, 'update_time')
marketId = self.safe_string(order, 'instrument_name')
symbol = self.safe_symbol(marketId, market)
amount = self.safe_string(order, 'quantity')
filled = self.safe_string(order, 'cumulative_quantity')
status = self.parse_order_status(self.safe_string(order, 'status'))
id = self.safe_string(order, 'order_id')
clientOrderId = self.safe_string(order, 'client_oid')
price = self.safe_string_2(order, 'price', 'limit_price')
average = self.safe_string(order, 'avg_price')
type = self.safe_string_lower_2(order, 'type', 'order_type')
side = self.safe_string_lower(order, 'side')
timeInForce = self.parse_time_in_force(self.safe_string(order, 'time_in_force'))
execInst = self.safe_string(order, 'exec_inst')
postOnly = None
if execInst is not None:
postOnly = (execInst == 'POST_ONLY')
cost = self.safe_string(order, 'cumulative_value')
feeCost = self.safe_string(order, 'cumulative_fee')
fee = None
if feeCost is not None:
feeCurrency = self.safe_string(order, 'fee_instrument_name')
fee = {
'cost': feeCost,
'currency': self.safe_currency_code(feeCurrency),
}
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': created,
'datetime': self.iso8601(created),
'lastTradeTimestamp': updated,
'status': status,
'symbol': symbol,
'type': type,
'timeInForce': timeInForce,
'postOnly': postOnly,
'side': side,
'price': price,
'amount': amount,
'filled': filled,
'remaining': None,
'cost': cost,
'fee': fee,
'average': average,
'trades': [],
}, market)
def parse_deposit_status(self, status):
statuses = {
'0': 'pending',
'1': 'ok',
'2': 'failed',
'3': 'pending',
}
return self.safe_string(statuses, status, status)
def parse_withdrawal_status(self, status):
statuses = {
'0': 'pending',
'1': 'pending',
'2': 'failed',
'3': 'pending',
'4': 'failed',
'5': 'ok',
'6': 'canceled',
}
return self.safe_string(statuses, status, status)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# "currency": "XRP",
# "fee": 1.0,
# "create_time": 1607063412000,
# "id": "2220",
# "update_time": 1607063460000,
# "amount": 100,
# "address": "2NBqqD5GRJ8wHy1PYyCXTe9ke5226FhavBf?1234567890",
# "status": "1"
# }
#
# fetchWithdrawals
#
# {
# "currency": "XRP",
# "client_wid": "my_withdrawal_002",
# "fee": 1.0,
# "create_time": 1607063412000,
# "id": "2220",
# "update_time": 1607063460000,
# "amount": 100,
# "address": "2NBqqD5GRJ8wHy1PYyCXTe9ke5226FhavBf?1234567890",
# "status": "1"
# }
#
# withdraw
#
# {
# "id": 2220,
# "amount": 1,
# "fee": 0.0004,
# "symbol": "BTC",
# "address": "2NBqqD5GRJ8wHy1PYyCXTe9ke5226FhavBf",
# "client_wid": "my_withdrawal_002",
# "create_time":1607063412000
# }
#
type = None
rawStatus = self.safe_string(transaction, 'status')
status = None
if 'client_wid' in transaction:
type = 'withdrawal'
status = self.parse_withdrawal_status(rawStatus)
else:
type = 'deposit'
status = self.parse_deposit_status(rawStatus)
id = self.safe_string(transaction, 'id')
addressString = self.safe_string(transaction, 'address')
address, tag = self.parse_address(addressString)
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId, currency)
timestamp = self.safe_integer(transaction, 'create_time')
amount = self.safe_number(transaction, 'amount')
txId = self.safe_string(transaction, 'txid')
feeCost = self.safe_number(transaction, 'fee')
fee = None
if feeCost is not None:
fee = {'currency': code, 'cost': feeCost}
updated = self.safe_integer(transaction, 'update_time')
return {
'info': transaction,
'id': id,
'txid': txId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': None,
'address': address,
'addressTo': address,
'addressFrom': None,
'tag': tag,
'tagTo': tag,
'tagFrom': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'internal': None,
'fee': fee,
}
async def repay_margin(self, code, amount, symbol=None, params={}):
"""
repay borrowed margin and interest
see https://exchange-docs.crypto.com/spot/index.html#private-margin-repay
:param str code: unified currency code of the currency to repay
:param float amount: the amount to repay
:param str|None symbol: unified market symbol, not used by cryptocom.repayMargin()
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns dict: a `margin loan structure <https://docs.ccxt.com/en/latest/manual.html#margin-loan-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': self.currency_to_precision(code, amount),
}
response = await self.spotPrivatePostPrivateMarginRepay(self.extend(request, params))
#
# {
# "id": 1656620104211,
# "method": "private/margin/repay",
# "code": 0,
# "result": {
# "badDebt": 0
# }
# }
#
transaction = self.parse_margin_loan(response, currency)
return self.extend(transaction, {
'amount': amount,
})
async def borrow_margin(self, code, amount, symbol=None, params={}):
"""
create a loan to borrow margin
see https://exchange-docs.crypto.com/spot/index.html#private-margin-borrow
:param str code: unified currency code of the currency to borrow
:param float amount: the amount to borrow
:param str|None symbol: unified market symbol, not used by cryptocom.repayMargin()
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns dict: a `margin loan structure <https://docs.ccxt.com/en/latest/manual.html#margin-loan-structure>`
"""
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
'amount': self.currency_to_precision(code, amount),
}
response = await self.spotPrivatePostPrivateMarginBorrow(self.extend(request, params))
#
# {
# "id": 1656619578559,
# "method": "private/margin/borrow",
# "code": 0
# }
#
transaction = self.parse_margin_loan(response, currency)
return self.extend(transaction, {
'amount': amount,
})
def parse_margin_loan(self, info, currency=None):
#
# borrowMargin
#
# {
# "id": 1656619578559,
# "method": "private/margin/borrow",
# "code": 0
# }
#
# repayMargin
#
# {
# "id": 1656620104211,
# "method": "private/margin/repay",
# "code": 0,
# "result": {
# "badDebt": 0
# }
# }
#
return {
'id': self.safe_integer(info, 'id'),
'currency': self.safe_currency_code(None, currency),
'amount': None,
'symbol': None,
'timestamp': None,
'datetime': None,
'info': info,
}
async def fetch_borrow_interest(self, code=None, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
market = None
currency = None
if symbol is not None:
market = self.market(symbol)
if code is not None:
currency = self.currency(code)
request['currency'] = currency['id']
if since is not None:
request['start_ts'] = since
if limit is not None:
request['page_size'] = limit
response = await self.spotPrivatePostPrivateMarginGetInterestHistory(self.extend(request, params))
#
# {
# "id": 1656705829020,
# "method": "private/margin/get-interest-history",
# "code": 0,
# "result": {
# "list": [
# {
# "loan_id": "2643528867803765921",
# "currency": "USDT",
# "interest": 0.00000004,
# "time": 1656702899559,
# "stake_amount": 6,
# "interest_rate": 0.000025
# },
# ]
# }
# }
#
data = self.safe_value(response, 'result', {})
rows = self.safe_value(data, 'list', [])
interest = None
for i in range(0, len(rows)):
interest = self.parse_borrow_interests(rows, market)
return self.filter_by_currency_since_limit(interest, code, since, limit)
def parse_borrow_interest(self, info, market=None):
#
# {
# "loan_id": "2643528867803765921",
# "currency": "USDT",
# "interest": 0.00000004,
# "time": 1656702899559,
# "stake_amount": 6,
# "interest_rate": 0.000025
# },
#
timestamp = self.safe_integer(info, 'time')
symbol = None
if market is not None:
symbol = market['symbol']
return {
'symbol': symbol,
'marginMode': None,
'currency': self.safe_currency_code(self.safe_string(info, 'currency')),
'interest': self.safe_number(info, 'interest'),
'interestRate': self.safe_number(info, 'interest_rate'), # hourly interest rate
'amountBorrowed': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'info': info,
}
async def fetch_borrow_rates(self, params={}):
"""
fetch the borrow interest rates of all currencies
:param dict params: extra parameters specific to the cryptocom api endpoint
:returns dict: a list of `borrow rate structures <https://docs.ccxt.com/en/latest/manual.html#borrow-rate-structure>`
"""
await self.load_markets()
response = await self.spotPrivatePostPrivateMarginGetUserConfig(params)
#
# {
# "id": 1656707947456,
# "method": "private/margin/get-user-config",
# "code": 0,
# "result": {
# "stake_amount": 6,
# "currency_configs": [
# {
# "currency": "AGLD",
# "hourly_rate": 0.00003334,
# "max_borrow_limit": 342.4032393,
# "min_borrow_limit": 30
# },
# ]
# }
# }
#
data = self.safe_value(response, 'result', {})
rates = self.safe_value(data, 'currency_configs', [])
return self.parse_borrow_rates(rates, 'currency')
def parse_borrow_rates(self, info, codeKey):
#
# {
# "currency": "AGLD",
# "hourly_rate": 0.00003334,
# "max_borrow_limit": 342.4032393,
# "min_borrow_limit": 30
# },
#
timestamp = self.milliseconds()
rates = []
for i in range(0, len(info)):
entry = info[i]
rates.append({
'currency': self.safe_currency_code(self.safe_string(entry, 'currency')),
'rate': self.safe_number(entry, 'hourly_rate'),
'period': 3600000, # 1-Hour
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'info': entry,
})
return rates
def custom_handle_margin_mode_and_params(self, methodName, params={}):
"""
* @ignore
marginMode specified by params["marginMode"], self.options["marginMode"], self.options["defaultMarginMode"], params["margin"] = True or self.options["defaultType"] = 'margin'
:param dict params: extra parameters specific to the exchange api endpoint
:returns [str|None, dict]: the marginMode in lowercase
"""
defaultType = self.safe_string(self.options, 'defaultType')
isMargin = self.safe_value(params, 'margin', False)
params = self.omit(params, 'margin')
marginMode = None
marginMode, params = self.handle_margin_mode_and_params(methodName, params)
if marginMode is not None:
if marginMode != 'cross':
raise NotSupported(self.id + ' only cross margin is supported')
else:
if (defaultType == 'margin') or (isMargin is True):
marginMode = 'cross'
return [marginMode, params]
def nonce(self):
return self.milliseconds()
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
type, access = api
url = self.urls['api'][type] + '/' + path
query = self.omit(params, self.extract_params(path))
if access == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
nonce = str(self.nonce())
requestParams = self.extend({}, params)
keysorted = self.keysort(requestParams)
paramsKeys = list(keysorted.keys())
strSortKey = ''
for i in range(0, len(paramsKeys)):
strSortKey = strSortKey + str(paramsKeys[i]) + str(requestParams[paramsKeys[i]])
payload = path + nonce + self.apiKey + strSortKey + nonce
signature = self.hmac(self.encode(payload), self.encode(self.secret))
paramsKeysLength = len(paramsKeys)
body = self.json({
'id': nonce,
'method': path,
'params': params,
'api_key': self.apiKey,
'sig': signature,
'nonce': nonce,
})
# fix issue https://github.com/ccxt/ccxt/issues/11179
# php always encodes dictionaries as arrays
# if an array is empty, php will put it in square brackets
# python and js will put it in curly brackets
# the code below checks and replaces those brackets in empty requests
if paramsKeysLength == 0:
paramsString = '{}'
arrayString = '[]'
body = body.replace(arrayString, paramsString)
headers = {
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
errorCode = self.safe_string(response, 'code')
if errorCode != '0':
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions['exact'], errorCode, feedback)
raise ExchangeError(self.id + ' ' + body)
| [
"travis@travis-ci.org"
] | travis@travis-ci.org |
daf2fd690c88a4240ea99c0bad37cc113a3714e3 | 1a0e2871897fec3d345653fc3502909d9d2c48c0 | /cellprofiler_core/analysis/request/_debug_complete.py | bfe423f13d1202e2543f6980897e577b1bff1039 | [] | no_license | citypalmtree/core | 91ec8d75a09240c57a64708985a2e13fdeaab23d | 51f7aa318f126d4ec7be59780c8f88d4208b0ab1 | refs/heads/master | 2022-11-15T03:13:01.843990 | 2020-07-08T01:56:39 | 2020-07-08T01:56:39 | 277,964,195 | 0 | 0 | null | 2020-07-08T01:54:03 | 2020-07-08T01:54:02 | null | UTF-8 | Python | false | false | 204 | py | import cellprofiler_core.utilities.zmq.communicable.request._analysis_request
class DebugComplete(
cellprofiler_core.utilities.zmq.communicable.request._analysis_request.AnalysisRequest
):
pass
| [
"allen.goodman@icloud.com"
] | allen.goodman@icloud.com |
ec2100ffe81fc6bc9ee3ca2204b9dd7491bff4ad | 7a20dac7b15879b9453150b1a1026e8760bcd817 | /Curso/Challenges/URI/1154Ages.py | 28441b2097ef52e9af04e5429015523bfb5896f7 | [
"MIT"
] | permissive | DavidBitner/Aprendizado-Python | 7afbe94c48c210ddf1ab6ae21109a8475e11bdbc | e1dcf18f9473c697fc2302f34a2d3e025ca6c969 | refs/heads/master | 2023-01-02T13:24:38.987257 | 2020-10-26T19:31:22 | 2020-10-26T19:31:22 | 283,448,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | soma = media = 0
while True:
n = int(input())
if n < 0:
break
media += n
soma += 1
media /= soma
print(f"{media:.2f}")
| [
"david-bitner@hotmail.com"
] | david-bitner@hotmail.com |
4b13cfb3c68251ebdc78db99a7e5eebc94d6f1ec | 53784d3746eccb6d8fca540be9087a12f3713d1c | /res/packages/scripts/scripts/client/gui/prb_control/entities/e_sport/unit/requester.py | 4528c5a9cd0d410cd77307002ee02942ac25909b | [] | no_license | webiumsk/WOT-0.9.17.1-CT | 736666d53cbd0da6745b970e90a8bac6ea80813d | d7c3cf340ae40318933e7205bf9a17c7e53bac52 | refs/heads/master | 2021-01-09T06:00:33.898009 | 2017-02-03T21:40:17 | 2017-02-03T21:40:17 | 80,870,824 | 0 | 0 | null | null | null | null | WINDOWS-1250 | Python | false | false | 9,046 | py | # 2017.02.03 21:48:41 Střední Evropa (běžný čas)
# Embedded file name: scripts/client/gui/prb_control/entities/e_sport/unit/requester.py
import time
import BigWorld
from PlayerEvents import g_playerEvents
from UnitBase import UNIT_ERROR
from debug_utils import LOG_ERROR, LOG_DEBUG
from gui import SystemMessages
from gui.prb_control import prb_getters
from gui.prb_control.events_dispatcher import g_eventDispatcher
from gui.prb_control.formatters import messages
from helpers import time_utils
class UnitAutoSearchHandler(object):
"""
Unit auto search requester: it handles player's request to:
- start
- accept
- decline
- stop
"""
def __init__(self, entity):
super(UnitAutoSearchHandler, self).__init__()
self.__entity = entity
self.__vTypeDescrs = []
self.__isInSearch = False
self.__hasResult = False
self.__startSearchTime = -1
self.__lastErrorCode = UNIT_ERROR.OK
def init(self):
"""
Initialization of requester. Subscription to browser events.
"""
browser = prb_getters.getClientUnitBrowser()
if browser:
browser.onSearchSuccessReceived += self.unitBrowser_onSearchSuccessReceived
browser.onErrorReceived += self.unitBrowser_onErrorReceived
else:
LOG_ERROR('Unit browser is not defined')
g_playerEvents.onDequeuedUnitAssembler += self.pe_onDequeuedUnitAssembler
g_playerEvents.onKickedFromUnitAssembler += self.pe_onKickedFromUnitAssembler
g_playerEvents.onEnqueuedUnitAssembler += self.pe_onEnqueuedUnitAssembler
def fini(self):
"""
Finalization of requester. Unsubscription from browser events.
"""
browser = prb_getters.getClientUnitBrowser()
if browser:
browser.onSearchSuccessReceived -= self.unitBrowser_onSearchSuccessReceived
browser.onErrorReceived -= self.unitBrowser_onErrorReceived
g_playerEvents.onDequeuedUnitAssembler -= self.pe_onDequeuedUnitAssembler
g_playerEvents.onKickedFromUnitAssembler -= self.pe_onKickedFromUnitAssembler
g_playerEvents.onEnqueuedUnitAssembler -= self.pe_onEnqueuedUnitAssembler
if self.__isInSearch:
self.stop()
self.__entity = None
return
def initEvents(self, listener):
"""
Initializes event listeners
"""
if self.__hasResult:
browser = prb_getters.getClientUnitBrowser()
if browser:
acceptDelta = self.getAcceptDelta(browser._acceptDeadlineUTC)
if acceptDelta > 0:
LOG_DEBUG('onUnitAutoSearchSuccess', acceptDelta)
listener.onUnitAutoSearchSuccess(acceptDelta)
elif self.__isInSearch:
g_eventDispatcher.setUnitProgressInCarousel(self.__entity.getEntityType(), True)
listener.onUnitAutoSearchStarted(self.getTimeLeftInSearch())
def isInSearch(self):
"""
Are we in search now
"""
return self.__isInSearch
def getTimeLeftInSearch(self):
"""
Get time that left in search
"""
if self.__startSearchTime > -1:
timeLeft = int(BigWorld.time() - self.__startSearchTime)
else:
timeLeft = -1
return timeLeft
def getAcceptDelta(self, acceptDeadlineUTC):
"""
Get acceptance time delta
Args:
acceptDeadlineUTC: time when approval expires
"""
if acceptDeadlineUTC:
return max(0, int(time_utils.makeLocalServerTime(acceptDeadlineUTC) - time.time()))
return 0
def start(self, vTypeDescrs = None):
"""
Start auto search with vehicles selected:
Args:
vTypeDescrs: list of selected vehicles intCDs
"""
if self.__isInSearch:
LOG_ERROR('Auto search already started.')
return False
else:
browser = prb_getters.getClientUnitBrowser()
if browser:
if vTypeDescrs is not None:
self.__vTypeDescrs = vTypeDescrs
self.__lastErrorCode = UNIT_ERROR.OK
browser.startSearch(vehTypes=self.__vTypeDescrs)
return True
LOG_ERROR('Unit browser is not defined')
return False
return
def stop(self):
"""
Stops the auto search
"""
if not self.__isInSearch:
LOG_DEBUG('Auto search did not start. Exits form search forced.')
self.__exitFromQueue()
return True
browser = prb_getters.getClientUnitBrowser()
if browser:
self.__lastErrorCode = UNIT_ERROR.OK
browser.stopSearch()
else:
LOG_ERROR('Unit browser is not defined')
return False
def accept(self):
"""
Accepts the auto search result
"""
if not self.__hasResult:
LOG_ERROR('First, sends request for search.')
return False
else:
browser = prb_getters.getClientUnitBrowser()
if browser:
self.__lastErrorCode = UNIT_ERROR.OK
browser.acceptSearch()
return True
LOG_ERROR('Unit browser is not defined')
return False
def decline(self):
"""
Declines the auto search result
"""
if not self.__hasResult:
LOG_ERROR('First, sends request for search.')
return False
else:
browser = prb_getters.getClientUnitBrowser()
if browser:
self.__lastErrorCode = UNIT_ERROR.OK
browser.declineSearch()
return True
LOG_ERROR('Unit browser is not defined')
return False
def pe_onDequeuedUnitAssembler(self):
"""
Listener for unit assembler dequeue event
"""
self.__exitFromQueue()
g_eventDispatcher.updateUI()
def pe_onKickedFromUnitAssembler(self):
"""
Listener for unit assembler kick event
"""
self.__exitFromQueue()
g_eventDispatcher.updateUI()
SystemMessages.pushMessage(messages.getUnitKickedReasonMessage('KICKED_FROM_UNIT_ASSEMBLER'), type=SystemMessages.SM_TYPE.Warning)
def pe_onEnqueuedUnitAssembler(self):
"""
Listener for unit assembler enqueue event
"""
self.__isInSearch = True
self.__startSearchTime = BigWorld.time()
g_eventDispatcher.setUnitProgressInCarousel(self.__entity.getEntityType(), True)
for listener in self.__entity.getListenersIterator():
listener.onUnitAutoSearchStarted(0)
else:
g_eventDispatcher.showUnitWindow(self.__entity.getEntityType())
g_eventDispatcher.updateUI()
def unitBrowser_onSearchSuccessReceived(self, unitMgrID, acceptDeadlineUTC):
"""
Listener for auto search succeed event
Args:
unitMgrID: unit manager ID
acceptDeadlineUTC: time when approval will expire
"""
self.__hasResult = True
acceptDelta = self.getAcceptDelta(acceptDeadlineUTC)
LOG_DEBUG('onUnitAutoSearchSuccess', acceptDelta, acceptDeadlineUTC)
g_eventDispatcher.setUnitProgressInCarousel(self.__entity.getEntityType(), False)
for listener in self.__entity.getListenersIterator():
listener.onUnitAutoSearchSuccess(acceptDelta)
else:
g_eventDispatcher.showUnitWindow(self.__entity.getEntityType())
g_eventDispatcher.updateUI()
def unitBrowser_onErrorReceived(self, errorCode, errorStr):
"""
Listener for auto search error event
Args:
errorCode: error code
errorStr: error message
"""
self.__isInSearch = False
self.__lastErrorCode = errorCode
if errorCode != UNIT_ERROR.OK:
for listener in self.__entity.getListenersIterator():
listener.onUnitBrowserErrorReceived(errorCode)
g_eventDispatcher.updateUI()
def __exitFromQueue(self):
"""
Routine clears all information that is related to in search state
"""
self.__isInSearch = False
self.__lastErrorCode = UNIT_ERROR.OK
self.__hasResult = False
self.__startSearchTime = 0
prbType = self.__entity.getEntityType()
g_eventDispatcher.setUnitProgressInCarousel(prbType, False)
for listener in self.__entity.getListenersIterator():
listener.onUnitAutoSearchFinished()
else:
g_eventDispatcher.showUnitWindow(prbType)
# okay decompyling c:\Users\PC\wotsources\files\originals\res\packages\scripts\scripts\client\gui\prb_control\entities\e_sport\unit\requester.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.02.03 21:48:41 Střední Evropa (běžný čas)
| [
"info@webium.sk"
] | info@webium.sk |
ca699a7935f6cc559fb7425359c4f7e78b6d3cb4 | a5a99f646e371b45974a6fb6ccc06b0a674818f2 | /RecoJets/JetProducers/python/TracksForJets_cff.py | 059b08f72f44f40aa58bc9d58c52963f54292b51 | [
"Apache-2.0"
] | permissive | cms-sw/cmssw | 4ecd2c1105d59c66d385551230542c6615b9ab58 | 19c178740257eb48367778593da55dcad08b7a4f | refs/heads/master | 2023-08-23T21:57:42.491143 | 2023-08-22T20:22:40 | 2023-08-22T20:22:40 | 10,969,551 | 1,006 | 3,696 | Apache-2.0 | 2023-09-14T19:14:28 | 2013-06-26T14:09:07 | C++ | UTF-8 | Python | false | false | 264 | py | import FWCore.ParameterSet.Config as cms
from SimGeneral.HepPDTESSource.pythiapdt_cfi import *
trackRefsForJets = cms.EDProducer("ChargedRefCandidateProducer",
src = cms.InputTag('trackWithVertexRefSelector'),
particleType = cms.string('pi+')
)
| [
"giulio.eulisse@gmail.com"
] | giulio.eulisse@gmail.com |
966dba2f8f26a6952139c9d1757edd97074c1c7a | 505343f6ace00d22f8753c1a943a5794a619e698 | /katas/Python/2 kyu/Assembler interpreter part II 58e61f3d8ff24f774400002c.py | fdbc8ca03e20526570fb2185924e2f3f83a72eed | [] | no_license | bullet1337/codewars | 7652e50bf768bc47976a9124dd98b93602d4d458 | ba7f13ddd766158b41e036dae5d6b15f7f08761a | refs/heads/master | 2020-03-27T05:04:03.751302 | 2019-04-30T17:45:39 | 2019-04-30T17:45:39 | 145,991,995 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,575 | py | # https://www.codewars.com/kata/58e61f3d8ff24f774400002c
class Frame:
def __init__(self, ip=0):
self.ip = ip
self.cmp = 0
def mov(operands, frame):
registers[operands[0]] = registers[operands[1]] if operands[1].isalpha() else int(operands[1])
frame.ip += 1
def inc(operands, frame):
registers[operands[0]] += 1
frame.ip += 1
def dec(operands, frame):
registers[operands[0]] -= 1
frame.ip += 1
def add(operands, frame):
registers[operands[0]] += registers[operands[1]] if operands[1].isalpha() else int(operands[1])
frame.ip += 1
def sub(operands, frame):
registers[operands[0]] -= registers[operands[1]] if operands[1].isalpha() else int(operands[1])
frame.ip += 1
def mul(operands, frame):
registers[operands[0]] *= registers[operands[1]] if operands[1].isalpha() else int(operands[1])
frame.ip += 1
def div(operands, frame):
registers[operands[0]] //= registers[operands[1]] if operands[1].isalpha() else int(operands[1])
frame.ip += 1
def jmp(operands, frame):
frame.ip = labels_map[operands[0]]
def call(operands, frame):
frames.append(Frame(labels_map[operands[0]]))
frame.ip += 1
def ret(operands, frame):
frames.pop()
def end(operands, frame):
global success
success = True
frames.pop()
def msg(operands, frame):
global output
output = ''.join(operand[1:-1] if operand[0] == "'" else str(registers[operand]) for operand in operands)
frame.ip += 1
def cmp(operands, frame):
frame.cmp = (registers[operands[0]] if operands[0].isalpha() else int(operands[0])) \
- (registers[operands[1]] if operands[1].isalpha() else int(operands[1]))
frame.ip += 1
def jne(operands, frame):
if frame.cmp != 0:
frame.ip = labels_map[operands[0]]
else:
frame.ip += 1
def je(operands, frame):
if frame.cmp == 0:
frame.ip = labels_map[operands[0]]
else:
frame.ip += 1
def jge(operands, frame):
if frame.cmp >= 0:
frame.ip = labels_map[operands[0]]
else:
frame.ip += 1
def jg(operands, frame):
if frame.cmp > 0:
frame.ip = labels_map[operands[0]]
else:
frame.ip += 1
def jle(operands, frame):
if frame.cmp <= 0:
frame.ip = labels_map[operands[0]]
else:
frame.ip += 1
def jl(operands, frame):
if frame.cmp < 0:
frame.ip = labels_map[operands[0]]
else:
frame.ip += 1
def parse(program):
instructions = []
instruction = []
token = ''
i = 0
end = False
while i < len(program):
c = program[i]
if c == "'":
token += c
i += 1
while i < len(program) and program[i] != "'":
token += program[i]
i += 1
token += program[i]
elif c == ';':
if instruction:
instructions.append(instruction)
instruction = []
i += 1
while i < len(program) and program[i] != '\n':
i += 1
elif c == ':':
labels_map[token] = len(instructions)
token = ''
elif c == '\n':
if token:
instruction.append(token)
end |= token == 'end'
token = ''
instructions.append(instruction)
instruction = []
elif c == ' ' or c == ',' or c == '\t':
if token:
end |= token == 'end'
instruction.append(token)
token = ''
else:
token += c
i += 1
if token:
end |= token == 'end'
instruction.append(token)
instructions.append(instruction)
return instructions if end else None
def assembler_interpreter(program):
global registers, labels_map, frames, success, output
registers = {}
labels_map = {}
frames = [Frame()]
success = False
output = None
instructions_map = {
'mov': mov, 'inc': inc, 'dec': dec, 'add': add, 'sub': sub, 'mul': mul, 'div': div, 'jmp': jmp, 'cmp': cmp,
'jne': jne, 'je': je, 'jge': jge, 'jg': jg, 'jle': jle, 'jl': jl, 'call': call, 'ret': ret, 'msg': msg,
'end': end
}
instructions = parse(program)
if instructions is None:
return -1
while frames and frames[-1].ip < len(instructions):
instruction, *operands = instructions[frames[-1].ip]
instructions_map[instruction](operands, frame=frames[-1])
return output if success else -1 | [
"alichek95@mail.ru"
] | alichek95@mail.ru |
33bea5a81924a0c881a7e98ae59251af0b7efea9 | 4d07dfc5005ffe1d40337f99dea2ce20a5454a4e | /call-management/rule-management/create-company-greeting/code-samples/createCompanyGreeting.py | 69c0da247695ca7fa8e303df8fcf3c657f204ab1 | [] | no_license | ringcentral/ringcentral-api-code-samples | c6160c7cf305ec01709ddf87e830a513e3b2d17e | 92d75734e82809c56ae572b1a0347d5e8c222a0e | refs/heads/master | 2021-12-25T02:09:17.653787 | 2019-08-30T22:06:03 | 2019-08-30T22:06:03 | 204,058,290 | 2 | 4 | null | 2021-11-30T14:37:01 | 2019-08-23T19:28:26 | C# | UTF-8 | Python | false | false | 753 | py | # https://developers.ringcentral.com/my-account.html#/applications
# Find your credentials at the above url, set them as environment variables, or enter them below
# PATH PARAMETERS
accountId = '<ENTER VALUE>'
import os
from ringcentral import SDK
rcsdk = SDK(os.environ['clientId'], os.environ['clientSecret'], os.environ['serverURL'])
platform = rcsdk.platform()
platform.login(os.environ['username'], os.environ['extension'], os.environ['password'])
builder = rcsdk.create_multipart_builder()
builder.set_body({
'type': 'Company'
})
binary = ('mygreeting.wav', open('mygreeting.wav','r').read(), 'audio/wav')
builder.add(binary)
request = builder.request(f'/restapi/v1.0/account/{accountId}/greeting')
resp = platform.send_request(request)
| [
"drew.ligman@gmail.com"
] | drew.ligman@gmail.com |
b7c88331aa45890842ae86b76706f18dc7eec82d | e9757274ddb8484e27590ff0cc3f24550776c6cc | /Solved/0118/0118.py | 27c242f3f3dfd5e5fd672ace23028e7362ab786a | [] | no_license | Jinmin-Goh/LeetCode | 948a9b3e77eb03507aad6f3c78640aa7f00e6ad5 | d6e80b968032b08506c5b185f66d35c6ff1f8bb9 | refs/heads/master | 2020-09-22T10:22:18.443352 | 2020-09-06T06:34:12 | 2020-09-06T06:34:12 | 225,153,497 | 1 | 1 | null | 2020-01-29T15:16:53 | 2019-12-01T11:55:25 | Python | UTF-8 | Python | false | false | 666 | py | # Problem No.: 118
# Solver: Jinmin Goh
# Date: 20200115
# URL: https://leetcode.com/problems/pascals-triangle/
import sys
class Solution:
def generate(self, numRows: int) -> List[List[int]]:
if not numRows:
return []
if numRows == 1:
return [[1]]
if numRows == 2:
return [[1], [1,1]]
ans = [[1], [1,1]]
for i in range(2, numRows + 1):
print(ans, i)
ans.append([1])
for j in range(1, i - 1):
ans[i].append(ans[i - 1][j - 1] + ans[i - 1][j])
ans[i].append(1)
ans.pop(1)
return ans
| [
"eric970901@gmail.com"
] | eric970901@gmail.com |
2bfd1f9141a25054ee6e3c064715759677b1c827 | bed34365a9dab825fd9f4a4ff1b0863f441266ac | /neutron/tests/unit/services/l3_router/test_l3_router_plugin.py | f045e58567b74fdf465206b618b148d783818291 | [
"Apache-2.0"
] | permissive | openstack/neutron | 0913ee3cd69d5bdb9c10aa084d4e1803abee320c | dde31aae392b80341f6440eb38db1583563d7d1f | refs/heads/master | 2023-08-31T13:09:41.831598 | 2023-08-31T11:37:30 | 2023-08-31T11:37:30 | 2,400,289 | 1,174 | 1,325 | Apache-2.0 | 2022-06-29T08:00:05 | 2011-09-16T16:04:08 | Python | UTF-8 | Python | false | false | 1,192 | py | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from neutron.services.l3_router import l3_router_plugin as lrp
from neutron.tests import base
class TestL3PluginDvrConditional(base.BaseTestCase):
def _test_dvr_alias_exposed(self, enabled):
cfg.CONF.set_override('enable_dvr', enabled)
plugin = lrp.L3RouterPlugin()
exposed = 'dvr' in plugin.supported_extension_aliases
self.assertEqual(enabled, exposed)
def test_dvr_alias_exposed_enabled(self):
self._test_dvr_alias_exposed(enabled=True)
def test_dvr_alias_exposed_disabled(self):
self._test_dvr_alias_exposed(enabled=False)
| [
"ihrachys@redhat.com"
] | ihrachys@redhat.com |
73bf36c731ba344577a5b0017978458b51d26d58 | 6a95b330e1beec08b917ff45eccfd6be3fd4629f | /kubernetes/test/test_v1beta1_cluster_role_list.py | 5817779990b99ae2d28c041fd3d6cc6e8c697e19 | [
"Apache-2.0"
] | permissive | TokkoLabs/client-python | f4a83d6540e64861b59e322c951380a670578d7f | f1ad9c6889105d8510472606c98f8d3807f82020 | refs/heads/master | 2023-07-14T01:36:46.152341 | 2017-12-21T21:32:11 | 2017-12-21T21:32:11 | 115,042,671 | 0 | 0 | Apache-2.0 | 2021-08-06T03:29:17 | 2017-12-21T20:05:15 | Python | UTF-8 | Python | false | false | 1,011 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.8.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import os
import sys
import unittest
import kubernetes.client
from kubernetes.client.rest import ApiException
from kubernetes.client.models.v1beta1_cluster_role_list import V1beta1ClusterRoleList
class TestV1beta1ClusterRoleList(unittest.TestCase):
""" V1beta1ClusterRoleList unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1beta1ClusterRoleList(self):
"""
Test V1beta1ClusterRoleList
"""
# FIXME: construct object with mandatory attributes with example values
#model = kubernetes.client.models.v1beta1_cluster_role_list.V1beta1ClusterRoleList()
pass
if __name__ == '__main__':
unittest.main()
| [
"mehdy@google.com"
] | mehdy@google.com |
52f0f1e13a19477a42d2b64fe74e4c98a2fd2bb5 | 247b4e8e425b06a96a835426304629b39ed4021c | /foo/logs.py | 4e95137361ffe91ec3fc42b4c147f928dce627b3 | [
"MIT"
] | permissive | RobertoPrevato/PythonCLI | 63f19a8e07e44731684aef0e7394afd7b63400fb | 4d4af7cd66ab8f9a5bed2a5d01236ef29753401f | refs/heads/master | 2023-04-04T17:45:38.330844 | 2020-07-06T09:02:17 | 2020-07-06T09:02:17 | 271,034,925 | 2 | 0 | MIT | 2021-04-20T20:01:41 | 2020-06-09T15:06:48 | Python | UTF-8 | Python | false | false | 698 | py | import logging
import logging.handlers
from datetime import datetime
from essentials.folders import ensure_folder
logger = None
def get_app_logger():
global logger
if logger is not None:
return logger
logger = logging.getLogger("app")
logger.setLevel(logging.INFO)
max_bytes = 24 * 1024 * 1024
file_handler = logging.handlers.RotatingFileHandler
now = datetime.now()
ts = now.strftime("%Y%m%d")
ensure_folder(f"logs/{ts}")
handler = file_handler(f"logs/{ts}/app.log", maxBytes=max_bytes, backupCount=5)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.addHandler(logging.StreamHandler())
return logger
| [
"roberto.prevato@gmail.com"
] | roberto.prevato@gmail.com |
80ed2e1cb5dabb511c53b4fff6507fc0ad98f2d8 | 66cab93c26cc252f412860778131b208c6f120be | /parts/newproject/pyramid/scaffolds/tests.py | 276f529f9e58739a8628631f8305a38963d21f49 | [] | no_license | marcogarzini/Zodiac | 3332733f6ae8d64924557ff022f44c835aeac0a9 | 06e8ad0c709189dc65a26fb7d6c17a9ee2bc9112 | refs/heads/master | 2016-09-11T03:18:12.805299 | 2014-01-17T12:50:03 | 2014-01-17T12:50:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 76 | py | /home/user1/newproject/eggs/pyramid-1.4-py2.7.egg/pyramid/scaffolds/tests.py | [
"user1@user1-VirtualBox.(none)"
] | user1@user1-VirtualBox.(none) |
279de8adb2b3699b3b64a4025c81b0b05274086a | 4eaa1b9b08914e0a2cc9276363e489ccef19d3a2 | /ch8/greet_users.py | 63aab04ab28079790f3930251a60e913741e89b7 | [] | no_license | melihcanyardi/Python-Crash-Course-2e-Part-I | 69b3b5b3f63cdbd7be6fabd6d4f2ddfd9a3434a3 | 0c9b250f512985c04b2c0397f3afaa8bf3a57f17 | refs/heads/main | 2023-03-12T21:43:14.012537 | 2021-03-03T19:23:41 | 2021-03-03T19:23:41 | 344,236,741 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | def greet_users(names):
"""Print a simple greeting to each user."""
for name in names:
msg = f"Hello, {name.title()}!"
print(msg)
# usernames = ['hannah', 'ty', 'magot']
# greet_users(usernames)
| [
"melihcanyardi@hotmail.com"
] | melihcanyardi@hotmail.com |
aa384a4a3e2205567ada8b3ee0295314af2675c8 | 124df74bce796598d224c4380c60c8e95756f761 | /com.raytheon.viz.gfe/python/pyViz/GFEPainter.py | c5f2d6d8b3807067950be6acb09305d7188c1141 | [] | no_license | Mapoet/AWIPS-Test | 19059bbd401573950995c8cc442ddd45588e6c9f | 43c5a7cc360b3cbec2ae94cb58594fe247253621 | refs/heads/master | 2020-04-17T03:35:57.762513 | 2017-02-06T17:17:58 | 2017-02-06T17:17:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,221 | py | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
import os
from com.raytheon.uf.viz.core import RGBColors
from com.raytheon.uf.viz.core.map import MapDescriptor
from com.raytheon.uf.viz.core.rsc.capabilities import ColorableCapability,\
OutlineCapability, LabelableCapability, MagnificationCapability, ColorMapCapability
from com.raytheon.viz.core import ColorUtil
from com.raytheon.viz.gfe.core import DataManagerOffscreenFactory, GFEMapRenderableDisplay
from com.raytheon.viz.gfe.ifpimage import GfeImageUtil, ImageLegendResource
from com.raytheon.viz.gfe.rsc import GFEResource, GFESystemResource
from com.raytheon.viz.gfe.core.parm import ParmDisplayAttributes_EditorType as EditorType
from com.raytheon.viz.gfe.core.parm import ParmDisplayAttributes_VisMode as VisMode
from com.raytheon.viz.gfe.core.parm import ParmDisplayAttributes_VisualizationType as VisualizationType
from com.raytheon.uf.common.dataplugin.gfe.reference import ReferenceID
from java.lang import Double
from java.lang import Integer
from javax.imageio import ImageIO
from java.util import HashSet
from java.io import File
#
# GFE Painter for painting GFE data from scripts
#
#
# SOFTWARE HISTORY
#
# Date Ticket# Engineer Description
# ------------ ---------- ----------- --------------------------
# 04/01/09 njensen Initial Creation.
# 08/20/2012 #1077 randerso Fixed backgroundColor setting
# 09/13/2012 #1147 dgilling Allow map labels to be disabled.
# 11/06/2012 15406 ryu Correction for computing domain from mask
# 09/12/2013 #2033 dgilling Change how logo files are accessed.
#
#
import VizPainter
class GFEPainter(VizPainter.VizPainter):
def __init__(self, imageWidth=None, imageHeight=None, expandLeft=25.0, expandRight=25.0, expandTop=25.0, expandBottom=25.0, mask=None, wholeDomain=0, bgColor=None):
# Create empty display and data manager for display
display = GFEMapRenderableDisplay()
self.dataMgr = DataManagerOffscreenFactory.getInstance(display)
self.refId = None
envelope = None
gloc = self.dataMgr.getParmManager().compositeGridLocation()
if mask is not None:
from com.raytheon.uf.common.dataplugin.gfe.reference import ReferenceData_CoordinateType as CoordinateType
self.refId = ReferenceID(mask)
if wholeDomain == 0:
envelope = self.dataMgr.getRefManager().loadRefSet(self.refId).overallDomain(CoordinateType.GRID)
if imageWidth is not None:
imageWidth = Integer(int(imageWidth))
if imageHeight is not None:
imageHeight = Integer(int(imageHeight))
geom = GfeImageUtil.getLocationGeometry(gloc, envelope, imageWidth, imageHeight, expandLeft / 100.0, expandRight / 100.0, expandTop / 100.0, expandBottom / 100.0)
# Create descriptor for display
desc = MapDescriptor(geom)
display.setDescriptor(desc)
VizPainter.VizPainter.__init__(self, display, backgroundColor=bgColor)
gfeSystem = GFESystemResource(self.dataMgr)
self.addVizResource(gfeSystem)
desc.getResourceList().getProperties(gfeSystem).setSystemResource(True)
self.primaryRsc = None
def __del__(self):
VizPainter.VizPainter.__del__(self)
def setupLegend(self, localTime=False, snapshotTime=False, snapshot='', descriptiveName='SHORT', duration='', start='', end='', override={}, lang=''):
legend = ImageLegendResource(self.dataMgr)
legend.setLocalTime(localTime)
legend.setSnapshotTime(snapshotTime)
legend.setSnapshotFormat(snapshot)
legend.setDescriptiveName(descriptiveName)
legend.setDurationFormat(duration)
legend.setStartFormat(start)
legend.setEndFormat(end)
legend.setLanguage(lang)
parms = override.keys()
for parm in parms:
legend.setColorOverride(parm, override[parm])
self.addVizResource(legend)
self.getDescriptor().getResourceList().getProperties(legend).setSystemResource(True)
def enableColorbar(self):
from com.raytheon.viz.gfe.rsc.colorbar import GFEColorbarResource
colorBar = GFEColorbarResource(self.dataMgr)
self.addVizResource(colorBar)
self.getDescriptor().getResourceList().getProperties(colorBar).setSystemResource(True)
def __makeGFEResource(self, parm):
parm.getParmState().setPickUpValue(None)
gfeRsc = GFEResource(parm, self.dataMgr)
self.addVizResource(gfeRsc)
if not parm.getDisplayAttributes().getBaseColor():
from com.raytheon.viz.core import ColorUtil
parm.getDisplayAttributes().setBaseColor(ColorUtil.getNewColor(self.getDescriptor()))
return gfeRsc
def addGfeResource(self, parm, colormap=None, colorMin=None, colorMax=None, smooth=False, color=None, lineWidth=None):
gfeRsc = self.__makeGFEResource(parm)
# jvisType = VisualizationType.valueOf('IMAGE')
# jset = HashSet()
# jset.add(jvisType)
# parm.getDisplayAttributes().setVisualizationType(EDITOR, IMAGE, jset)
# parm.getDisplayAttributes().setVisMode(IMAGE)
if self.refId is not None:
parm.getDisplayAttributes().setDisplayMask(self.refId)
self.primaryRsc = gfeRsc
params = gfeRsc.getCapability(ColorMapCapability).getColorMapParameters()
if colormap is not None:
from com.raytheon.uf.viz.core.drawables import ColorMapLoader
params.setColorMap(ColorMapLoader.loadColorMap(colormap))
if colorMax is not None and colorMin is not None:
params.setDataMin(colorMin)
params.setColorMapMin(colorMin)
params.setDataMax(colorMax)
params.setColorMapMax(colorMax)
if smooth:
from com.raytheon.uf.viz.core.rsc.capabilities import ImagingCapability
gfeRsc.getCapability(ImagingCapability).setInterpolationState(True)
if color is None:
color = ColorUtil.getNewColor(self.getDescriptor())
else:
color = RGBColors.getRGBColor(color)
gfeRsc.getCapability(ColorableCapability).setColor(color)
if lineWidth is not None:
gfeRsc.getCapability(OutlineCapability).setOutlineWidth(lineWidth)
def addMapBackground(self, mapName, color=None, lineWidth=None,
linePattern=None, xOffset=None, yOffset=None,
labelAttribute=None, fontOffset=None):
from com.raytheon.uf.viz.core.maps import MapManager
rsc = MapManager.getInstance(self.getDescriptor()).loadMapByBundleName(mapName).getResource()
if color is not None:
rsc.getCapability(ColorableCapability).setColor(RGBColors.getRGBColor(color))
if lineWidth is not None:
rsc.getCapability(OutlineCapability).setOutlineWidth(lineWidth)
if linePattern is not None:
rsc.getCapability(OutlineCapability).setLineStyle(linePattern)
if xOffset is not None:
rsc.getCapability(LabelableCapability).setxOffset(xOffset)
if yOffset is not None:
rsc.getCapability(LabelableCapability).setyOffset(yOffset)
rsc.getCapability(LabelableCapability).setLabelField(labelAttribute)
if fontOffset is not None:
mag = Double(1.26 ** fontOffset)
rsc.getCapability(MagnificationCapability).setMagnification(mag)
def getDataManager(self):
return self.dataMgr
def outputFiles(self, filename, attachLogo=False, logoText=None):
rendered = self.getTarget().screenshot()
if attachLogo:
from java.awt.image import BufferedImage
noaa = File(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'logos/noaalogo2.png'))
nws = File(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'logos/nwslogo.png'))
noaaImage = ImageIO.read(noaa)
nwsImage = ImageIO.read(nws)
height = rendered.getHeight() + noaaImage.getHeight()
finalBuf = BufferedImage(rendered.getWidth(), height, BufferedImage.TYPE_INT_ARGB)
graphics = finalBuf.createGraphics()
graphics.drawImage(rendered, 0, 0, None)
graphics.drawImage(noaaImage, 0, rendered.getHeight(), None)
graphics.fillRect(noaaImage.getWidth(), rendered.getHeight(), rendered.getWidth() - noaaImage.getWidth() - nwsImage.getWidth(), rendered.getHeight())
if logoText is not None:
from java.awt import Color
from com.raytheon.uf.viz.core.font import FontAdapter
graphics.setColor(Color.BLACK)
graphics.setFont(FontAdapter.getAWTFont(self.getTarget().getDefaultFont()))
fm = graphics.getFontMetrics()
textBounds = fm.getStringBounds(logoText, graphics)
graphics.drawString(logoText, int((rendered.getWidth() - textBounds.getWidth()) / 2), \
int(rendered.getHeight() + (noaaImage.getHeight() / 2) + textBounds.getHeight() / 2))
graphics.drawImage(nwsImage, finalBuf.getWidth() - nwsImage.getWidth(), rendered.getHeight(), None)
finalBuf.flush()
self.outputImage(finalBuf, filename)
else:
self.outputImage(rendered, filename)
| [
"joshua.t.love@saic.com"
] | joshua.t.love@saic.com |
5bb865f631f65936ec4c9d374b4cf780d649646f | 2e083b7dbbce8c823c38a1a5d0863fe2f96903ff | /MyStock/MyStock/urls.py | afa179d67bc33f5f7d9844a6c62bc676b74aeadf | [] | no_license | jayquake/Django_Applications | 4d49023f954c86e0c53e18ec8e67137c9fe016a0 | ee0e12e8af502ca4fa0f6281ead955b1b1e7ebb1 | refs/heads/master | 2023-03-10T07:12:52.192224 | 2021-02-21T10:58:58 | 2021-02-21T10:58:58 | 259,059,961 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 749 | py | """MyStock URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"jayquake@gmail.com"
] | jayquake@gmail.com |
2cdb67f0b58fa12f5e124cf54892ae880d47cfd2 | 23f6dbacd9b98fdfd08a6f358b876d3d371fc8f6 | /rootfs/usr/share/pyshared/aptdaemon/worker.py | b64f81aef08b2ace507785400fbbc5b8dbfde7e1 | [] | no_license | xinligg/trainmonitor | 07ed0fa99e54e2857b49ad3435546d13cc0eb17a | 938a8d8f56dc267fceeb65ef7b867f1cac343923 | refs/heads/master | 2021-09-24T15:52:43.195053 | 2018-10-11T07:12:25 | 2018-10-11T07:12:25 | 116,164,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44,535 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Provides AptWorker which processes transactions."""
# Copyright (C) 2008-2009 Sebastian Heinlein <devel@glatzor.de>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
__author__ = "Sebastian Heinlein <devel@glatzor.de>"
__all__ = ("AptWorker", "DummyWorker")
import logging
import os
import sys
import time
import traceback
import apt
import apt.cache
import apt.debfile
import apt_pkg
import aptsources
import aptsources.distro
from aptsources.sourceslist import SourcesList
from gettext import gettext as _
import gobject
import pkg_resources
import re
from softwareproperties.AptAuth import AptAuth
import subprocess
from enums import *
from errors import *
import lock
from progress import DaemonOpenProgress, \
DaemonInstallProgress, \
DaemonAcquireProgress, \
DaemonDpkgInstallProgress, \
DaemonDpkgRecoverProgress
log = logging.getLogger("AptDaemon.Worker")
class AptWorker(gobject.GObject):
"""Worker which processes transactions from the queue."""
__gsignals__ = {"transaction-done":(gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
(gobject.TYPE_PYOBJECT,))}
def __init__(self):
"""Initialize a new AptWorker instance."""
gobject.GObject.__init__(self)
self.trans = None
self.last_action_timestamp = time.time()
self._cache = None
self._status_orig = apt_pkg.config.find_file("Dir::State::status")
self.plugins = {}
self._load_plugins()
def _load_plugins(self):
"""Load the plugins from setuptools' entry points."""
plugin_dirs = [os.path.join(os.path.dirname(__file__), "plugins")]
env = pkg_resources.Environment(plugin_dirs)
dists, errors = pkg_resources.working_set.find_plugins(env)
for dist in dists:
pkg_resources.working_set.add(dist)
for name in ["modify_cache_after", "modify_cache_before"]:
for ept in pkg_resources.iter_entry_points("aptdaemon.plugins",
name):
try:
self.plugins.setdefault(name, []).append(ept.load())
except:
log.critical("Failed to load %s plugin: "
"%s" % (name, ept.dist))
else:
log.debug("Loaded %s plugin: %s", name, ept.dist)
def _call_plugins(self, name, resolver=None):
"""Call all plugins of a given type."""
if not resolver:
# If the resolver of the original task isn't available we create
# a new one and protect the already marked changes
resolver = apt.cache.ProblemResolver(self._cache)
for pkg in self._cache.get_changes():
resolver.clear(pkg)
resolver.protect(pkg)
if pkg.marked_delete:
resolver.remove(pkg)
if not name in self.plugins:
log.debug("There isn't any registered %s plugin" % name)
return False
for plugin in self.plugins[name]:
log.debug("Calling %s plugin: %s", name, plugin)
try:
plugin(resolver, self._cache)
except Exception, error:
log.critical("Failed to call %s plugin:\n%s" % (plugin, error))
return True
def run(self, transaction):
"""Process the given transaction in the background.
Keyword argument:
transaction -- core.Transcation instance to run
"""
log.info("Processing transaction %s", transaction.tid)
if self.trans:
raise Exception("There is already a running transaction")
self.trans = transaction
gobject.idle_add(self._process_transaction)
def _emit_transaction_done(self, trans):
"""Emit the transaction-done signal.
Keyword argument:
trans -- the finished transaction
"""
log.debug("Emitting transaction-done: %s", trans.tid)
self.emit("transaction-done", trans)
def _process_transaction(self):
"""Run the worker"""
self.last_action_timestamp = time.time()
self.trans.status = STATUS_RUNNING
self.trans.progress = 0
try:
self._lock_cache()
# Prepare the package cache
if self.trans.role == ROLE_FIX_INCOMPLETE_INSTALL or \
not self.is_dpkg_journal_clean():
self.fix_incomplete_install()
# Process transaction which don't require a cache
if self.trans.role == ROLE_ADD_VENDOR_KEY_FILE:
self.add_vendor_key_from_file(**self.trans.kwargs)
elif self.trans.role == ROLE_ADD_VENDOR_KEY_FROM_KEYSERVER:
self.add_vendor_key_from_keyserver(**self.trans.kwargs)
elif self.trans.role == ROLE_REMOVE_VENDOR_KEY:
self.remove_vendor_key(**self.trans.kwargs)
elif self.trans.role == ROLE_ADD_REPOSITORY:
self.add_repository(**self.trans.kwargs)
elif self.trans.role == ROLE_ENABLE_DISTRO_COMP:
self.enable_distro_comp(**self.trans.kwargs)
else:
self._open_cache()
# Process transaction which can handle a broken dep cache
if self.trans.role == ROLE_FIX_BROKEN_DEPENDS:
self.fix_broken_depends()
elif self.trans.role == ROLE_UPDATE_CACHE:
self.update_cache(**self.trans.kwargs)
# Process the transactions which require a consistent cache
elif self._cache and self._cache.broken_count:
broken = [pkg.name for pkg in self._cache if pkg.is_now_broken]
raise TransactionFailed(ERROR_CACHE_BROKEN, " ".join(broken))
elif self.trans.role == ROLE_INSTALL_PACKAGES:
self.install_packages(self.trans.packages[0])
elif self.trans.role == ROLE_INSTALL_FILE:
self.install_file(**self.trans.kwargs)
elif self.trans.role == ROLE_REMOVE_PACKAGES:
self.remove_packages(self.trans.packages[2])
elif self.trans.role == ROLE_UPGRADE_SYSTEM:
self.upgrade_system(**self.trans.kwargs)
elif self.trans.role == ROLE_UPGRADE_PACKAGES:
self.upgrade_packages(self.trans.packages[4])
elif self.trans.role == ROLE_COMMIT_PACKAGES:
self.commit_packages(*self.trans.packages)
except TransactionCancelled:
self.trans.exit = EXIT_CANCELLED
except TransactionFailed, excep:
self.trans.error = excep
self.trans.exit = EXIT_FAILED
except (KeyboardInterrupt, SystemExit):
self.trans.exit = EXIT_CANCELLED
except Exception, excep:
self.trans.error = TransactionFailed(ERROR_UNKNOWN,
traceback.format_exc())
self.trans.exit = EXIT_FAILED
else:
self.trans.exit = EXIT_SUCCESS
finally:
self.trans.progress = 100
self.last_action_timestamp = time.time()
tid = self.trans.tid[:]
trans = self.trans
self.trans = None
self._emit_transaction_done(trans)
lock.release()
log.info("Finished transaction %s", tid)
return False
def commit_packages(self, install, reinstall, remove, purge, upgrade):
"""Perform a complex package operation.
Keyword arguments:
install - list of package names to install
reinstall - list of package names to reinstall
remove - list of package names to remove
purge - list of package names to purge including configuration files
upgrade - list of package names to upgrade
"""
log.info("Committing packages: %s, %s, %s, %s, %s",
install, reinstall, remove, purge, upgrade)
#FIXME python-apt 0.8 introduced a with statement
ac = self._cache.actiongroup()
resolver = apt.cache.ProblemResolver(self._cache)
self._mark_packages_for_installation(install, resolver)
self._mark_packages_for_installation(reinstall, resolver,
reinstall=True)
self._mark_packages_for_removal(remove, resolver)
self._mark_packages_for_removal(purge, resolver, purge=True)
self._mark_packages_for_upgrade(upgrade, resolver)
self._resolve_depends(resolver)
ac.release()
self._commit_changes()
def _resolve_depends(self, resolver):
"""Resolve the dependencies using the given ProblemResolver."""
self._call_plugins("modify_cache_before", resolver)
resolver.install_protect()
try:
resolver.resolve()
except SystemError:
broken = [pkg.name for pkg in self._cache if pkg.is_inst_broken]
raise TransactionFailed(ERROR_DEP_RESOLUTION_FAILED,
" ".join(broken))
if self._call_plugins("modify_cache_after", resolver):
try:
resolver.resolve()
except SystemError:
broken = [pkg.name for pkg in self._cache if pkg.is_inst_broken]
raise TransactionFailed(ERROR_DEP_RESOLUTION_FAILED,
" ".join(broken))
def install_packages(self, package_names):
"""Install packages.
Keyword argument:
package_names -- list of package name which should be installed
"""
log.debug("Installing packages: %s", package_names)
self.trans.status = STATUS_RESOLVING_DEP
ac = self._cache.actiongroup()
resolver = apt.cache.ProblemResolver(self._cache)
self._mark_packages_for_installation(package_names, resolver)
self._resolve_depends(resolver)
ac.release()
self._commit_changes()
def _check_unauthenticated(self):
"""Check if any of the cache changes get installed from an
unauthenticated repository"""
if self.trans.allow_unauthenticated:
return
unauthenticated = []
for pkg in self._cache:
if (pkg.marked_install or
pkg.marked_downgrade or
pkg.marked_upgrade or
pkg.marked_reinstall):
trusted = False
for origin in pkg.candidate.origins:
trusted |= origin.trusted
if not trusted:
unauthenticated.append(pkg.name)
if unauthenticated:
raise TransactionFailed(ERROR_PACKAGE_UNAUTHENTICATED,
" ".join(sorted(unauthenticated)))
def _mark_packages_for_installation(self, package_names, resolver,
reinstall=False):
"""Mark packages for installation."""
for pkg_name in package_names:
try:
pkg = self._cache[pkg_name]
except KeyError:
raise TransactionFailed(ERROR_NO_PACKAGE,
"Package %s isn't available" % pkg_name)
if reinstall:
if not pkg.is_installed:
raise TransactionFailed(ERROR_PACKAGE_NOT_INSTALLED,
"Package %s isn't installed" % \
pkg.name)
else:
#FIXME: Turn this into a non-critical message
if pkg.is_installed:
raise TransactionFailed(ERROR_PACKAGE_ALREADY_INSTALLED,
"Package %s is already installed" %\
pkg_name)
pkg.mark_install(False, True, True)
resolver.clear(pkg)
resolver.protect(pkg)
def enable_distro_comp(self, component):
"""Enable given component in the sources list.
Keyword arguments:
component -- a component, e.g. main or universe
"""
old_umask = os.umask(0022)
try:
sourceslist = SourcesList()
distro = aptsources.distro.get_distro()
distro.get_sources(sourceslist)
distro.enable_component(component)
sourceslist.save()
finally:
os.umask(old_umask)
def add_repository(self, rtype, uri, dist, comps, comment, sourcesfile):
"""Add given repository to the sources list.
Keyword arguments:
rtype -- the type of the entry (deb, deb-src)
uri -- the main repository uri (e.g. http://archive.ubuntu.com/ubuntu)
dist -- the distribution to use (e.g. karmic, "/")
comps -- a (possible empty) list of components (main, restricted)
comment -- an (optional) comment
sourcesfile -- an (optinal) filename in sources.list.d
"""
if sourcesfile:
if not sourcesfile.endswith(".list"):
sourcesfile += ".list"
d = apt_pkg.config.find_dir("Dir::Etc::sourceparts")
sourcesfile = os.path.join(d, os.path.basename(sourcesfile))
else:
sourcesfile = None
# if there is a password in the uri, protect the file from
# non-admin users
password_in_uri = re.match("(http|https|ftp)://\S+?:\S+?@\S+", uri)
if password_in_uri:
old_umask = os.umask(0027)
else:
old_umask = os.umask(0022)
try:
sources = SourcesList()
entry = sources.add(rtype, uri, dist, comps, comment,
file=sourcesfile)
if entry.invalid:
#FIXME: Introduce new error codes
raise RepositoryInvalidError()
except:
logging.exception("adding repository")
raise
else:
sources.save()
# set to sourcesfile root.admin only if there is a password
if password_in_uri and sourcesfile:
import grp
try:
os.chown(sourcesfile, 0, grp.getgrnam("admin")[2])
except Exception, e:
logging.warn("os.chmod() failed '%s'" % e)
finally:
os.umask(old_umask)
def add_vendor_key_from_keyserver(self, keyid, keyserver):
"""Add the signing key from the given (keyid, keyserver) to the
trusted vendors.
Keyword argument:
keyid - the keyid of the key (e.g. 0x0EB12F05)
keyserver - the keyserver (e.g. keyserver.ubuntu.com)
"""
log.info("Adding vendor key from keyserver: %s %s", keyid, keyserver)
self.trans.status = STATUS_DOWNLOADING
self.trans.progress = 101
last_pulse = time.time()
#FIXME: Use gobject.spawn_async and deferreds in the worker
# Alternatively we could use python-pyme directly for a better
# error handling. Or the --status-fd of gpg
proc = subprocess.Popen(["/usr/bin/apt-key", "adv",
"--keyserver", keyserver,
"--recv", keyid], stderr=subprocess.STDOUT,
stdout=subprocess.PIPE, close_fds=True)
while proc.poll() is None:
while gobject.main_context_default().pending():
gobject.main_context_default().iteration()
time.sleep(0.05)
if time.time() - last_pulse > 0.3:
self.trans.progress = 101
last_pulse = time.time()
if proc.returncode != 0:
stdout = unicode(proc.stdout.read(),
# that can return "None", in this case, just
# assume something
sys.stdin.encoding or "UTF-8",
errors="replace")
#TRANSLATORS: The first %s is the key id and the second the server
raise TransactionFailed(ERROR_KEY_NOT_INSTALLED,
_("Failed to download and install the key "
"%s from %s:\n%s") % (keyid, keyserver,
stdout))
def add_vendor_key_from_file(self, path):
"""Add the signing key from the given file to the trusted vendors.
Keyword argument:
path -- absolute path to the key file
"""
log.info("Adding vendor key from file: %s", path)
self.trans.progress = 101
self.trans.status = STATUS_COMMITTING
try:
#FIXME: use gobject.spawn_async or reactor.spawn
#FIXME: use --dry-run before?
auth = AptAuth()
auth.add(os.path.expanduser(path))
except Exception, error:
raise TransactionFailed(ERROR_KEY_NOT_INSTALLED,
"Key file %s couldn't be installed: %s" % \
(path, error))
def remove_vendor_key(self, fingerprint):
"""Remove repository key.
Keyword argument:
fingerprint -- fingerprint of the key to remove
"""
log.info("Removing vendor key: %s", fingerprint)
self.trans.progress = 101
self.trans.status = STATUS_COMMITTING
try:
#FIXME: use gobject.spawn_async or reactor.spawn
#FIXME: use --dry-run before?
auth = AptAuth()
auth.rm(fingerprint)
except Exception, error:
raise TransactionFailed(ERROR_KEY_NOT_REMOVED,
"Key with fingerprint %s couldn't be "
"removed: %s" % (fingerprint, error))
def install_file(self, path):
"""Install local package file.
Keyword argument:
path -- absolute path to the package file
"""
log.info("Installing local package file: %s", path)
# Check if the dpkg can be installed at all
self.trans.status = STATUS_RESOLVING_DEP
deb = apt.debfile.DebPackage(path.encode("UTF-8"), self._cache)
if not deb.check():
raise TransactionFailed(ERROR_DEP_RESOLUTION_FAILED,
deb._failure_string)
# Check for required changes and apply them before
(install, remove, unauth) = deb.required_changes
self._call_plugins("modify_cache_after")
if len(install) > 0 or len(remove) > 0:
dpkg_range = (64, 99)
self._commit_changes(fetch_range=(5, 33),
install_range=(34, 63))
# Install the dpkg file
if deb.install(DaemonDpkgInstallProgress(self.trans,
begin=64, end=95)):
raise TransactionFailed(ERROR_UNKNOWN, deb._failure_string)
def remove_packages(self, package_names):
"""Remove packages.
Keyword argument:
package_names -- list of package name which should be installed
"""
log.info("Removing packages: '%s'", package_names)
self.trans.status = STATUS_RESOLVING_DEP
ac = self._cache.actiongroup()
resolver = apt.cache.ProblemResolver(self._cache)
self._mark_packages_for_removal(package_names, resolver)
self._resolve_depends(resolver)
ac.release()
self._commit_changes(fetch_range=(10, 10), install_range=(10, 90))
#FIXME: should we use a persistant cache? make a check?
#self._open_cache(prange=(90,99))
#for p in pkgs:
# if self._cache.has_key(p) and self._cache[p].is_installed:
# self.ErrorCode(ERROR_UNKNOWN, "%s is still installed" % p)
# self.Finished(EXIT_FAILED)
# return
def _mark_packages_for_removal(self, package_names, resolver, purge=False):
"""Mark packages for installation."""
for pkg_name in package_names:
try:
pkg = self._cache[pkg_name]
except KeyError:
raise TransactionFailed(ERROR_NO_PACKAGE,
"Package %s isn't available" % pkg_name)
if not pkg.is_installed and not pkg.installed_files:
raise TransactionFailed(ERROR_PACKAGE_NOT_INSTALLED,
"Package %s isn't installed" % pkg_name)
if pkg.essential == True:
raise TransactionFailed(ERROR_NOT_REMOVE_ESSENTIAL_PACKAGE,
"Package %s cannot be removed." % \
pkg_name)
pkg.mark_delete(False, purge)
resolver.clear(pkg)
resolver.protect(pkg)
resolver.remove(pkg)
def _check_obsoleted_dependencies(self):
"""Mark obsoleted dependencies of to be removed packages for removal."""
if not self.trans.remove_obsoleted_depends:
return
installed_deps = set()
ac = self._cache.actiongroup()
for pkg in self._cache:
if pkg.marked_delete:
installed_deps = self._installed_dependencies(pkg.name,
installed_deps)
for dep_name in installed_deps:
if dep_name in self._cache:
pkg = self._cache[dep_name]
if pkg.is_installed and pkg.is_auto_removable:
pkg.mark_delete(False)
ac.release()
def _installed_dependencies(self, pkg_name, all_deps=None):
"""Recursively return all installed dependencies of a given package."""
#FIXME: Should be part of python-apt, since it makes use of non-public
# API. Perhaps by adding a recursive argument to
# apt.package.Version.get_dependencies()
if not all_deps:
all_deps = set()
if not pkg_name in self._cache:
return all_deps
cur = self._cache[pkg_name]._pkg.current_ver
if not cur:
return all_deps
for t in ("PreDepends", "Depends", "Recommends"):
try:
for dep in cur.depends_list[t]:
dep_name = dep[0].target_pkg.name
if not dep_name in all_deps:
all_deps.add(dep_name)
all_deps |= self._installed_dependencies(dep_name,
all_deps)
except KeyError:
pass
return all_deps
def upgrade_packages(self, package_names):
"""Upgrade packages.
Keyword argument:
package_names -- list of package name which should be upgraded
"""
log.info("Upgrading packages: %s", package_names)
self.trans.status = STATUS_RESOLVING_DEP
ac = self._cache.actiongroup()
resolver = apt.cache.ProblemResolver(self._cache)
self._mark_packages_for_upgrade(package_names, resolver)
self._resolve_depends(resolver)
ac.release()
self._commit_changes()
def _mark_packages_for_upgrade(self, package_names, resolver):
"""Mark packages for upgrade."""
for pkg_name in package_names:
try:
pkg = self._cache[pkg_name]
except KeyError:
raise TransactionFailed(ERROR_NO_PACKAGE,
"Package %s isn't available" % pkg_name)
if not pkg.is_installed:
raise TransactionFailed(ERROR_PACKAGE_NOT_INSTALLED,
"Package %s isn't installed" % pkg_name)
pkg.mark_install(False, True, pkg.is_auto_installed)
resolver.clear(pkg)
resolver.protect(pkg)
def update_cache(self, sources_list):
"""Update the cache."""
log.info("Updating cache")
progress = DaemonAcquireProgress(self.trans, begin=10, end=95)
if sources_list and not sources_list.startswith("/"):
d = apt_pkg.config.find_dir("Dir::Etc::sourceparts")
sources_list = os.path.join(d, sources_list)
try:
self._cache.update(progress, sources_list=sources_list)
except apt.cache.FetchFailedException, error:
raise TransactionFailed(ERROR_REPO_DOWNLOAD_FAILED,
str(error.message))
except apt.cache.FetchCancelledException:
raise TransactionCancelled()
def upgrade_system(self, safe_mode=True):
"""Upgrade the system.
Keyword argument:
safe_mode -- if additional software should be installed or removed to
satisfy the dependencies the an updates
"""
log.info("Upgrade system with safe mode: %s" % safe_mode)
# Check for available updates
self.trans.status = STATUS_RESOLVING_DEP
updates = filter(lambda p: p.is_upgradable,
self._cache)
#FIXME: What to do if already uptotdate? Add error code?
self._cache.upgrade(dist_upgrade=not safe_mode)
self._call_plugins("modify_cache_after")
# Check for blocked updates
outstanding = []
changes = self._cache.get_changes()
for pkg in updates:
if not pkg in changes or not pkg.marked_upgrade:
outstanding.append(pkg)
#FIXME: Add error state if system could not be fully updated
self._commit_changes()
def fix_incomplete_install(self):
"""Run dpkg --configure -a to recover from a failed installation."""
log.info("Fixing incomplete installs")
self.trans.status = STATUS_CLEANING_UP
progress = DaemonDpkgRecoverProgress(self.trans)
progress.start_update()
progress.run()
progress.finish_update()
if progress._child_exit != 0:
raise TransactionFailed(ERROR_PACKAGE_MANAGER_FAILED,
progress.output)
def fix_broken_depends(self):
"""Try to fix broken dependencies."""
log.info("Fixing broken depends")
self.trans.status = STATUS_RESOLVING_DEP
try:
self._cache._depcache.fix_broken()
except SystemError:
broken = [pkg.name for pkg in self._cache if pkg.is_inst_broken]
raise TransactionFailed(ERROR_DEP_RESOLUTION_FAILED,
" ".join(broken))
self._commit_changes()
def _open_cache(self, begin=0, end=5, quiet=False):
"""Open the APT cache.
Keyword arguments:
start -- the begin of the progress range
end -- the end of the the progress range
quiet -- if True do no report any progress
"""
self.trans.status = STATUS_LOADING_CACHE
apt_pkg.config.set("Dir::State::status", self._status_orig)
apt_pkg.init_system()
try:
progress = DaemonOpenProgress(self.trans, begin=begin, end=end,
quiet=quiet)
if not isinstance(self._cache, apt.cache.Cache):
self._cache = apt.cache.Cache(progress)
else:
self._cache.open(progress)
except Exception, excep:
raise TransactionFailed(ERROR_NO_CACHE, excep.message)
def _lock_cache(self):
"""Lock the APT cache."""
try:
lock.acquire()
except lock.LockFailedError, error:
logging.error("Failed to lock the cache")
self.trans.paused = True
self.trans.status = STATUS_WAITING_LOCK
if error.process:
#TRANSLATORS: %s is the name of a package manager
msg = "Waiting for %s to exit" % error.process
self.trans.status_details = msg
lock_watch = gobject.timeout_add_seconds(3, self._watch_lock)
while self.trans.paused and not self.trans.cancelled:
gobject.main_context_default().iteration()
gobject.source_remove(lock_watch)
if self.trans.cancelled:
raise TransactionCancelled()
def _watch_lock(self):
"""Unpause the transaction if the lock can be obtained."""
try:
lock.acquire()
except lock.LockFailedError:
return True
self.trans.paused = False
return False
def is_dpkg_journal_clean(self):
"""Return False if there are traces of incomplete dpkg status
updates."""
status_updates = os.path.join(os.path.dirname(self._status_orig),
"updates/")
for dentry in os.listdir(status_updates):
if dentry.isdigit():
return False
return True
def _commit_changes(self, fetch_range=(5, 50), install_range=(50, 90)):
"""Commit previously marked changes to the cache.
Keyword arguments:
fetch_range -- tuple containing the start and end point of the
download progress
install_range -- tuple containing the start and end point of the
install progress
"""
changes = self._cache.get_changes()
if not changes:
return
# Do not allow to remove essential packages
for pkg in changes:
if pkg.marked_delete and (pkg.essential == True or \
(pkg.installed and \
pkg.installed.priority == "required") or\
pkg.name == "aptdaemon"):
raise TransactionFailed(ERROR_NOT_REMOVE_ESSENTIAL_PACKAGE,
"Package %s cannot be removed." % \
pkg.name)
self._check_obsoleted_dependencies()
self._check_unauthenticated()
if self.trans.cancelled:
raise TransactionCancelled()
self.trans.cancellable = False
fetch_progress = DaemonAcquireProgress(self.trans,
begin=fetch_range[0],
end=fetch_range[1])
inst_progress = DaemonInstallProgress(self.trans,
begin=install_range[0],
end=install_range[1])
try:
self._cache.commit(fetch_progress, inst_progress)
except apt.cache.FetchFailedException, error:
raise TransactionFailed(ERROR_PACKAGE_DOWNLOAD_FAILED,
str(error.message))
except apt.cache.FetchCancelledException:
raise TransactionCancelled()
except SystemError, excep:
# Run dpkg --configure -a to recover from a failed transaction
self.trans.status = STATUS_CLEANING_UP
progress = DaemonDpkgRecoverProgress(self.trans, begin=90, end=95)
progress.start_update()
progress.run()
progress.finish_update()
output = inst_progress.output + progress.output
raise TransactionFailed(ERROR_PACKAGE_MANAGER_FAILED,
"%s: %s" % (excep, output))
def simulate(self, trans, status_path=None):
"""Return the dependencies which will be installed by the transaction,
the content of the dpkg status file after the transaction would have
been applied, the download size and the required disk space.
Keyword arguments:
trans -- the transaction which should be simulated
status_path -- the path to a dpkg status file on which the transaction
should be applied
"""
log.info("Simulating trans: %s" % trans.tid)
trans.status = STATUS_RESOLVING_DEP
try:
return self._simulate_helper(trans, status_path)
except TransactionFailed, excep:
trans.error = excep
except Exception, excep:
trans.error = TransactionFailed(ERROR_UNKNOWN,
traceback.format_exc())
finally:
trans.status = STATUS_SETTING_UP
trans.exit = EXIT_FAILED
trans.progress = 100
self.last_action_timestamp = time.time()
raise trans.error
def _simulate_helper(self, trans, status_path):
#FIXME: A lot of redundancy
#FIXME: Add checks for obsolete dependencies and unauthenticated
def get_base_records(sec, additional=None):
records = ["Priority", "Installed-Size", "Architecture",
"Version", "Replaces", "Depends", "Conflicts",
"Breaks", "Recommends", "Suggests", "Provides",
"Pre-Depends", "Essential"]
if additional:
records.extend(additional)
ret = ""
for record in records:
try:
ret += "%s: %s\n" % (record, sec[record])
except KeyError:
pass
return ret
status = ""
depends = [[], [], [], [], [], [], []]
skip_pkgs = []
size = 0
installs = reinstalls = removals = purges = upgrades = downgrades = \
kepts = upgradables = []
# Only handle transaction which change packages
#FIXME: Add support for ROLE_FIX_INCOMPLETE_INSTALL,
# ROLE_FIX_BROKEN_DEPENDS
if trans.role not in [ROLE_INSTALL_PACKAGES, ROLE_UPGRADE_PACKAGES,
ROLE_UPGRADE_SYSTEM, ROLE_REMOVE_PACKAGES,
ROLE_COMMIT_PACKAGES, ROLE_INSTALL_FILE]:
return depends, status, 0, 0
# Fast forward the cache
if not status_path:
status_path = self._status_orig
apt_pkg.config.set("Dir::State::status", status_path)
apt_pkg.init_system()
#FIXME: open cache in background after startup
if not self._cache:
self._cache = apt.cache.Cache()
else:
self._cache.open()
if self._cache.broken_count:
broken = [pkg.name for pkg in self._cache if pkg.is_now_broken]
raise TransactionFailed(ERROR_CACHE_BROKEN, " ".join(broken))
# Mark the changes and apply
if trans.role == ROLE_UPGRADE_SYSTEM:
#FIXME: Should be part of python-apt to avoid using private API
upgradables = [self._cache[pkgname] \
for pkgname in self._cache._set \
if self._cache._depcache.is_upgradable(\
self._cache._cache[pkgname])]
upgradables = [pkg for pkg in self._cache if pkg.is_upgradable]
self._cache.upgrade(not trans.kwargs["safe_mode"])
self._call_plugins("modify_cache_after")
elif trans.role == ROLE_INSTALL_FILE:
deb = apt.debfile.DebPackage(trans.kwargs["path"].encode("UTF-8"),
self._cache)
if not deb.check():
raise TransactionFailed(ERROR_DEP_RESOLUTION_FAILED,
deb._failure_string)
status += "Package: %s\n" % deb.pkgname
status += "Status: install ok installed\n"
status += get_base_records(deb)
status += "\n"
skip_pkgs.append(deb.pkgname)
try:
size = int(deb["Installed-Size"]) * 1024
except (KeyError, AttributeError):
pass
try:
pkg = self._cache[deb.pkgname]
except KeyError:
trans.packages[PKGS_INSTALL] = [deb.pkgname]
else:
if pkg.is_installed:
# if we failed to get the size from the deb file do nor
# try to get the delta
if size != 0:
size -= pkg.installed.installed_size
trans.packages[PKGS_REINSTALL] = [deb.pkgname]
else:
trans.packages[PKGS_INSTALL] = [deb.pkgname]
installs, reinstalls, removal, purges, upgrades = trans.packages
self._call_plugins("modify_cache_after")
else:
ac = self._cache.actiongroup()
installs, reinstalls, removals, purges, upgrades = trans.packages
resolver = apt.cache.ProblemResolver(self._cache)
self._mark_packages_for_installation(installs, resolver)
self._mark_packages_for_installation(reinstalls, resolver,
reinstall=True)
self._mark_packages_for_removal(removals, resolver)
self._mark_packages_for_removal(purges, resolver, purge=True)
self._mark_packages_for_upgrade(upgrades, resolver)
self._resolve_depends(resolver)
ac.release()
changes = self._cache.get_changes()
changes_names = []
# get the additional dependencies
for pkg in changes:
if pkg.marked_upgrade and pkg.is_installed and \
not pkg.name in upgrades:
depends[PKGS_UPGRADE].append(pkg.name)
elif pkg.marked_reinstall and not pkg.name in reinstalls:
depends[PKGS_REINSTALL].append(pkg.name)
elif pkg.marked_downgrade and not pkg.name in downgrades:
depends[PKGS_DOWNGRADE].append(pkg.name)
elif pkg.marked_install and not pkg.name in installs:
depends[PKGS_INSTALL].append(pkg.name)
elif pkg.marked_delete and not pkg.name in removals:
depends[PKGS_REMOVE].append(pkg.name)
#FIXME: add support for purges
changes_names.append(pkg.name)
# Check for skipped upgrades
for pkg in upgradables:
if not pkg in changes or not pkg.marked_upgrade:
depends[PKGS_KEEP].append(pkg.name)
# merge the changes into the dpkg status
for sec in apt_pkg.TagFile(open(status_path)):
pkg_name = sec["Package"]
if pkg_name in skip_pkgs:
continue
status += "Package: %s\n" % pkg_name
if pkg_name in changes_names:
pkg = self._cache[sec["Package"]]
if pkg.marked_delete:
status += "Status: deinstall ok config-files\n"
version = pkg.installed
else:
# Install, Upgrade, downgrade and reinstall all use the
# candidate version
version = pkg.candidate
status += "Status: install ok installed\n"
# Corner-case: a purge of an already removed package won't
# have an installed version
if version:
status += get_base_records(version.record)
changes.remove(pkg)
else:
status += get_base_records(sec, ["Status"])
status += "\n"
# Add changed and not yet known (installed) packages to the status
for pkg in changes:
version = pkg.candidate
status += "Package: %s\n" % pkg.name
status += "Status: install ok installed\n"
status += get_base_records(pkg.candidate.record)
status += "\n"
return depends, status, self._cache.required_download, \
size + self._cache.required_space
class DummyWorker(AptWorker):
"""Allows to test the daemon without making any changes to the system."""
def run(self, transaction):
"""Process the given transaction in the background.
Keyword argument:
transaction -- core.Transcation instance to run
"""
log.info("Processing transaction %s", transaction.tid)
if self.trans:
raise Exception("There is already a running transaction")
self.trans = transaction
self.last_action_timestamp = time.time()
self.trans.status = STATUS_RUNNING
self.trans.progress = 0
self.trans.cancellable = True
gobject.timeout_add(200, self._process_transaction, transaction)
def _process_transaction(self, trans):
"""Run the worker"""
if trans.cancelled:
trans.exit = EXIT_CANCELLED
elif trans.progress == 100:
trans.exit = EXIT_SUCCESS
elif trans.role == ROLE_UPDATE_CACHE:
trans.exit = EXIT_FAILED
elif trans.role == ROLE_UPGRADE_PACKAGES:
trans.exit = EXIT_SUCCESS
elif trans.role == ROLE_UPGRADE_SYSTEM:
trans.exit = EXIT_CANCELLE
else:
if trans.role == ROLE_INSTALL_PACKAGES:
if trans.progress == 1:
trans.status = STATUS_RESOLVING_DEP
elif trans.progress == 5:
trans.status = STATUS_DOWNLOADING
elif trans.progress == 50:
trans.status = STATUS_COMMITTING
trans.status_details = "Heyas!"
elif trans.progress == 55:
trans.paused = True
trans.status = STATUS_WAITING_CONFIG_FILE_PROMPT
trans.config_file_conflict = "/etc/fstab", "/etc/mtab"
while trans.paused:
gobject.main_context_default().iteration()
trans.config_file_conflict_resolution = None
trans.config_file_conflict = None
trans.status = STATUS_COMMITTING
elif trans.progress == 60:
trans.required_medium = ("Debian Lenny 5.0 CD 1",
"USB CD-ROM")
trans.paused = True
trans.status = STATUS_WAITING_MEDIUM
while trans.paused:
gobject.main_context_default().iteration()
trans.status = STATUS_DOWNLOADING
elif trans.progress == 70:
trans.status_details = "Servus!"
elif trans.progress == 90:
trans.status_deatils = ""
trans.status = STATUS_CLEANING_UP
elif trans.role == ROLE_REMOVE_PACKAGES:
if trans.progress == 1:
trans.status = STATUS_RESOLVING_DEP
elif trans.progress == 5:
trans.status = STATUS_COMMITTING
trans.status_details = "Heyas!"
elif trans.progress == 50:
trans.status_details = "Hola!"
elif trans.progress == 70:
trans.status_details = "Servus!"
elif trans.progress == 90:
trans.status_deatils = ""
trans.status = STATUS_CLEANING_UP
trans.progress += 1
return True
trans.status = STATUS_FINISHED
self.last_action_timestamp = time.time()
tid = self.trans.tid[:]
trans = self.trans
self.trans = None
self._emit_transaction_done(trans)
log.info("Finished transaction %s", tid)
return False
def simulate(self, trans, status_path=None):
depends = [[], [], [], [], [], [], []]
return depends, "", 0, 0
# vim:ts=4:sw=4:et
| [
"root@xinli.xinli"
] | root@xinli.xinli |
9c33821a24871a6e6eabc82aca793dca44554b7d | 38fff7bdefd8d62a740d51329b50d0e1e49258bb | /infra/cifuzz/cifuzz_end_to_end_test.py | 30e28beda330c621ac3a0f5b9d1ee2dc11a5e463 | [
"Apache-2.0"
] | permissive | google/oss-fuzz | 026384c2ada61ef68b147548e830f60730c5e738 | f0275421f84b8f80ee767fb9230134ac97cb687b | refs/heads/master | 2023-08-31T23:30:28.157702 | 2023-08-31T21:49:30 | 2023-08-31T21:49:30 | 63,809,205 | 9,438 | 2,315 | Apache-2.0 | 2023-09-14T20:32:19 | 2016-07-20T19:39:50 | Shell | UTF-8 | Python | false | false | 1,841 | py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""End-to-End tests for CIFuzz."""
import os
import unittest
import run_cifuzz
import test_helpers
CIFUZZ_DIR = os.path.dirname(os.path.abspath(__file__))
EXTERNAL_PROJECT_PATH = os.path.join(CIFUZZ_DIR, 'test_data',
'external-project')
# This test will fail if not run as root because the fuzzer build process
# creates binaries that only root can write to.
# Use a seperate env var to keep this seperate from integration tests which
# don't have this annoying property.
@unittest.skipIf(not os.getenv('END_TO_END_TESTS'),
'END_TO_END_TESTS=1 not set')
class EndToEndTest(unittest.TestCase):
"""End-to-End tests for CIFuzz."""
def setUp(self):
test_helpers.patch_environ(self, runner=True)
def test_simple(self):
"""Simple end-to-end test using run_cifuzz.main()."""
os.environ['REPOSITORY'] = 'external-project'
os.environ['PROJECT_SRC_PATH'] = EXTERNAL_PROJECT_PATH
os.environ['FILESTORE'] = 'no_filestore'
os.environ['NO_CLUSTERFUZZ_DEPLOYMENT'] = 'True'
with test_helpers.docker_temp_dir() as temp_dir:
os.environ['WORKSPACE'] = temp_dir
# TODO(metzman): Verify the crash, affected fuzzers, and other things.
self.assertEqual(run_cifuzz.main(), 1)
| [
"noreply@github.com"
] | google.noreply@github.com |
d00d266ba80663b9ebfeb3cc61a78e1314118b06 | 2491df3f643539e6055bb0b2a4b659474c57491f | /nonRepeating.py | 448672a1d5248162813e6d1da3a085a5576289a6 | [] | no_license | ghilbing/Ejemplos | 85efc91346028b8a3d26d7680d9286b26234c771 | 339a45ef48c9a61002a01f7c823cc42d34fab409 | refs/heads/master | 2021-05-13T13:58:33.010157 | 2018-02-26T20:44:44 | 2018-02-26T20:44:44 | 116,724,506 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 282 | py | def nonRepeating(A):
dictionary = {}
for c in A:
if c in dictionary:
dictionary[c] += 1
else:
dictionary[c] = 1
for c in A:
if dictionary[c] == 1:
return c
return None
A = "aabbdbc"
print nonRepeating(A) | [
"ghilbing@gmail.com"
] | ghilbing@gmail.com |
ed6e2fd9aa9f97143e4c73ed7f12ea853cca8b45 | e2f0806ca1cdd887ea40d050a19fa2710427bd38 | /도전 문제/04주차_그리디/1339_단어 수학/banghyungjin_1339.py | bace9ef7dcea98724223d5ffa32bf5cf39bdf06e | [] | no_license | JY-Dev/AlgorithmStudy-1 | 001f94d80097c850c79eeb2bc86971a01aa5bd5d | 2ad1df0fd65c72a6f6d1feeba09f889000ff8c15 | refs/heads/main | 2023-08-21T18:38:18.235994 | 2021-09-28T07:07:11 | 2021-09-28T07:07:11 | 406,208,087 | 1 | 0 | null | 2021-09-14T03:14:32 | 2021-09-14T03:14:31 | null | UTF-8 | Python | false | false | 1,904 | py | import sys
num_of_letters = int(sys.stdin.readline().split()[0]) # 단어의 개수
letters = {} # 알파벳과 그 알파벳이 나온 숫자 딕셔너리
answer = 0 # 정답
for i in range(num_of_letters): # 단어의 개수 만큼
input_letter = sys.stdin.readline().split()[0] # 단어들을 읽어옴
for letter in range(len(input_letter)): # 각 읽어온 단어들을 알파벳 하나씩 나눔
if not(input_letter[letter] in letters): # letters에 해당 알파벳이 없으면
letters[input_letter[letter]] = 10 ** (len(input_letter) - letter - 1) # 새로 넣음 이때 key는 알파벳, value는 해당 알파벳이 가리키는 자리수
else: # letters에 해당 알파벳이 있으면
letters[input_letter[letter]] += 10 ** (len(input_letter) - letter - 1) # 해당 원소의 value에 해당 알파벳이 가리키는 자리수 더해줌
letters = sorted(letters.items(), reverse=True, key=lambda x: (x[1])) # letters를 각 원소의 value 값으로 정렬
for i in range(len(letters)): # letters를 처음부터 탐색
answer += letters[i][1] * (9 - i) # 순서대로 9부터 역순으로 대입 후 value에 곱해서 answer에 더함
print(answer) # 정답 출력
| [
"noreply@github.com"
] | JY-Dev.noreply@github.com |
f36e3d60041a50a234a84aba86475d750f8bf046 | 8b7778d3c65f3688105e43718152da2c734ffa26 | /2.Travel_Request/data/Get_TestData/Get_D1_AM_Data.py | f229dca9ebff929a4e5484ebfc330be6b5e81771 | [] | no_license | zzworkaccount/OpenSourceLibrary | ab49b3f431c0474723dfad966ca09e29b07527eb | 0f99f881eb8a1f4ddebbc5e7676289d01e6ffe19 | refs/heads/main | 2023-01-29T05:02:56.341004 | 2020-12-03T12:05:59 | 2020-12-03T12:05:59 | 315,920,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | # 从Save_TestData拿到测试数据
from tools.util import Utility
class Get_SM_TestData:
# 从Excel中获取登录的测试数据(查询演职人员)
@classmethod
def get_login_excel_data_query_actor(cls, row=0):
login_info = Utility.get_json\
(Utility.get_root_path() + '\\conf\\Excel_conf\\D_AM.conf')[0]
login_data = Utility.get_excel(login_info,row)
return login_data
# 从Excel中获取登录的测试数据(查询演职人员)
@classmethod
def get_login_excel_data_delete_actor(cls, row=0):
login_info = Utility.get_json\
(Utility.get_root_path() + '\\conf\\Excel_conf\\D_AM.conf')[1]
login_data = Utility.get_excel(login_info,row)
return login_data
# 从Excel中获取登录的测试数据(新增演职人员)
@classmethod
def get_login_excel_data_add_actor(cls, row=0):
login_info = Utility.get_json \
(Utility.get_root_path() + '\\conf\\Excel_conf\\D_AM.conf')[2]
login_data = Utility.get_excel(login_info, row)
return login_data
if __name__ == '__main__':
print(Get_SM_TestData.get_login_excel_data_add_actor(1))
| [
"1434895836@qq.com"
] | 1434895836@qq.com |
26bd976b6ddc99461c454b2ab282d2464995263f | 9d6817b67f7993b3a43319894ebd508b1fa92f9f | /configs/SMP-RunIISummer20UL16wmLHEGEN-00020_1_cfg.py | 3bf496216d6131b3bcfe0e6aec8c453ba7f1200c | [] | no_license | kdlong/WMassNanoGen | b7c5c12df52862d7dd9d9554d7654b9e5d701167 | d1e0c6db75f671eb593cf907307189cd95aa31f6 | refs/heads/master | 2023-06-27T07:21:53.971633 | 2023-06-19T13:32:41 | 2023-06-19T13:32:41 | 235,908,488 | 2 | 7 | null | 2023-06-12T09:10:10 | 2020-01-23T23:43:11 | Python | UTF-8 | Python | false | false | 11,485 | py | # Auto generated configuration file
# using:
# Revision: 1.19
# Source: /local/reps/CMSSW/CMSSW/Configuration/Applications/python/ConfigBuilder.py,v
# with command line options: Configuration/GenProduction/python/SMP-RunIISummer20UL16wmLHEGEN-00020-fragment.py --fileout file:SMP-RunIISummer20UL16wmLHEGEN-00020.root --mc --eventcontent NANOAODGEN --datatier NANOAODSIM --conditions 106X_mcRun2_asymptotic_v13 --beamspot Realistic25ns13TeV2016Collision --step LHE,GEN,NANOGEN --nThreads 1 --geometry DB:Extended --era Run2_2016 --python_filename SMP-RunIISummer20UL16wmLHEGEN-00020_1_cfg.py --no_exec --customise Configuration/DataProcessing/Utils.addMonitoring -n 70
import FWCore.ParameterSet.Config as cms
from Configuration.Eras.Era_Run2_2016_cff import Run2_2016
process = cms.Process('NANOGEN',Run2_2016)
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('SimGeneral.MixingModule.mixNoPU_cfi')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.Generator_cff')
process.load('IOMC.EventVertexGenerators.VtxSmearedRealistic25ns13TeV2016Collision_cfi')
process.load('GeneratorInterface.Core.genFilterSummary_cff')
process.load('PhysicsTools.NanoAOD.nanogen_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(70)
)
# Input source
process.source = cms.Source("EmptySource")
process.options = cms.untracked.PSet(
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
annotation = cms.untracked.string('Configuration/GenProduction/python/SMP-RunIISummer20UL16wmLHEGEN-00020-fragment.py nevts:70'),
name = cms.untracked.string('Applications'),
version = cms.untracked.string('$Revision: 1.19 $')
)
# Output definition
process.NANOAODGENoutput = cms.OutputModule("NanoAODOutputModule",
SelectEvents = cms.untracked.PSet(
SelectEvents = cms.vstring('generation_step')
),
compressionAlgorithm = cms.untracked.string('LZMA'),
compressionLevel = cms.untracked.int32(9),
dataset = cms.untracked.PSet(
dataTier = cms.untracked.string('NANOAODSIM'),
filterName = cms.untracked.string('')
),
fileName = cms.untracked.string('file:SMP-RunIISummer20UL16wmLHEGEN-00020.root'),
outputCommands = process.NANOAODGENEventContent.outputCommands
)
# Additional output definition
# Other statements
process.genstepfilter.triggerConditions=cms.vstring("generation_step")
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, '106X_mcRun2_asymptotic_v13', '')
process.generator = cms.EDFilter("Pythia8HadronizerFilter",
ExternalDecays = cms.PSet(
Photospp = cms.untracked.PSet(
forceBremForDecay = cms.PSet(
Wm = cms.vint32(0, -24),
Wp = cms.vint32(0, 24),
Z = cms.vint32(0, 23),
parameterSets = cms.vstring(
'Z',
'Wp',
'Wm'
)
),
parameterSets = cms.vstring(
'setExponentiation',
'setInfraredCutOff',
'setMeCorrectionWtForW',
'setMeCorrectionWtForZ',
'setMomentumConservationThreshold',
'setPairEmission',
'setPhotonEmission',
'setStopAtCriticalError',
'suppressAll',
'forceBremForDecay'
),
setExponentiation = cms.bool(True),
setInfraredCutOff = cms.double(0.00011),
setMeCorrectionWtForW = cms.bool(True),
setMeCorrectionWtForZ = cms.bool(True),
setMomentumConservationThreshold = cms.double(0.1),
setPairEmission = cms.bool(True),
setPhotonEmission = cms.bool(True),
setStopAtCriticalError = cms.bool(False),
suppressAll = cms.bool(True)
),
parameterSets = cms.vstring('Photospp')
),
PythiaParameters = cms.PSet(
parameterSets = cms.vstring(
'pythia8CommonSettings',
'pythia8CP5Settings',
'pythia8PSweightsSettings',
'processParameters'
),
processParameters = cms.vstring(
'SpaceShower:pTmaxMatch = 1',
'TimeShower:pTmaxMatch = 1',
'ParticleDecays:allowPhotonRadiation = on',
'TimeShower:QEDshowerByL = off',
'BeamRemnants:hardKTOnlyLHE = on',
'BeamRemnants:primordialKThard = 2.225001',
'SpaceShower:dipoleRecoil = 1'
),
pythia8CP5Settings = cms.vstring(
'Tune:pp 14',
'Tune:ee 7',
'MultipartonInteractions:ecmPow=0.03344',
'MultipartonInteractions:bProfile=2',
'MultipartonInteractions:pT0Ref=1.41',
'MultipartonInteractions:coreRadius=0.7634',
'MultipartonInteractions:coreFraction=0.63',
'ColourReconnection:range=5.176',
'SigmaTotal:zeroAXB=off',
'SpaceShower:alphaSorder=2',
'SpaceShower:alphaSvalue=0.118',
'SigmaProcess:alphaSvalue=0.118',
'SigmaProcess:alphaSorder=2',
'MultipartonInteractions:alphaSvalue=0.118',
'MultipartonInteractions:alphaSorder=2',
'TimeShower:alphaSorder=2',
'TimeShower:alphaSvalue=0.118',
'SigmaTotal:mode = 0',
'SigmaTotal:sigmaEl = 21.89',
'SigmaTotal:sigmaTot = 100.309',
'PDF:pSet=LHAPDF6:NNPDF31_nnlo_as_0118'
),
pythia8CommonSettings = cms.vstring(
'Tune:preferLHAPDF = 2',
'Main:timesAllowErrors = 10000',
'Check:epTolErr = 0.01',
'Beams:setProductionScalesFromLHEF = off',
'SLHA:keepSM = on',
'SLHA:minMassSM = 1000.',
'ParticleDecays:limitTau0 = on',
'ParticleDecays:tau0Max = 10',
'ParticleDecays:allowPhotonRadiation = on'
),
pythia8PSweightsSettings = cms.vstring(
'UncertaintyBands:doVariations = on',
'UncertaintyBands:List = {isrRedHi isr:muRfac=0.707,fsrRedHi fsr:muRfac=0.707,isrRedLo isr:muRfac=1.414,fsrRedLo fsr:muRfac=1.414,isrDefHi isr:muRfac=0.5,fsrDefHi fsr:muRfac=0.5,isrDefLo isr:muRfac=2.0,fsrDefLo fsr:muRfac=2.0,isrConHi isr:muRfac=0.25,fsrConHi fsr:muRfac=0.25,isrConLo isr:muRfac=4.0,fsrConLo fsr:muRfac=4.0,fsr_G2GG_muR_dn fsr:G2GG:muRfac=0.5,fsr_G2GG_muR_up fsr:G2GG:muRfac=2.0,fsr_G2QQ_muR_dn fsr:G2QQ:muRfac=0.5,fsr_G2QQ_muR_up fsr:G2QQ:muRfac=2.0,fsr_Q2QG_muR_dn fsr:Q2QG:muRfac=0.5,fsr_Q2QG_muR_up fsr:Q2QG:muRfac=2.0,fsr_X2XG_muR_dn fsr:X2XG:muRfac=0.5,fsr_X2XG_muR_up fsr:X2XG:muRfac=2.0,fsr_G2GG_cNS_dn fsr:G2GG:cNS=-2.0,fsr_G2GG_cNS_up fsr:G2GG:cNS=2.0,fsr_G2QQ_cNS_dn fsr:G2QQ:cNS=-2.0,fsr_G2QQ_cNS_up fsr:G2QQ:cNS=2.0,fsr_Q2QG_cNS_dn fsr:Q2QG:cNS=-2.0,fsr_Q2QG_cNS_up fsr:Q2QG:cNS=2.0,fsr_X2XG_cNS_dn fsr:X2XG:cNS=-2.0,fsr_X2XG_cNS_up fsr:X2XG:cNS=2.0,isr_G2GG_muR_dn isr:G2GG:muRfac=0.5,isr_G2GG_muR_up isr:G2GG:muRfac=2.0,isr_G2QQ_muR_dn isr:G2QQ:muRfac=0.5,isr_G2QQ_muR_up isr:G2QQ:muRfac=2.0,isr_Q2QG_muR_dn isr:Q2QG:muRfac=0.5,isr_Q2QG_muR_up isr:Q2QG:muRfac=2.0,isr_X2XG_muR_dn isr:X2XG:muRfac=0.5,isr_X2XG_muR_up isr:X2XG:muRfac=2.0,isr_G2GG_cNS_dn isr:G2GG:cNS=-2.0,isr_G2GG_cNS_up isr:G2GG:cNS=2.0,isr_G2QQ_cNS_dn isr:G2QQ:cNS=-2.0,isr_G2QQ_cNS_up isr:G2QQ:cNS=2.0,isr_Q2QG_cNS_dn isr:Q2QG:cNS=-2.0,isr_Q2QG_cNS_up isr:Q2QG:cNS=2.0,isr_X2XG_cNS_dn isr:X2XG:cNS=-2.0,isr_X2XG_cNS_up isr:X2XG:cNS=2.0}',
'UncertaintyBands:nFlavQ = 4',
'UncertaintyBands:MPIshowers = on',
'UncertaintyBands:overSampleFSR = 10.0',
'UncertaintyBands:overSampleISR = 10.0',
'UncertaintyBands:FSRpTmin2Fac = 20',
'UncertaintyBands:ISRpTmin2Fac = 1'
),
pythia8PowhegEmissionVetoSettings = cms.vstring(
'POWHEG:veto = 1',
'POWHEG:pTdef = 1',
'POWHEG:emitted = 0',
'POWHEG:pTemt = 0',
'POWHEG:pThard = 0',
'POWHEG:vetoCount = 100',
'SpaceShower:pTmaxMatch = 2',
'TimeShower:pTmaxMatch = 2'
)
),
comEnergy = cms.double(13000.0),
filterEfficiency = cms.untracked.double(1.0),
maxEventsToPrint = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
pythiaPylistVerbosity = cms.untracked.int32(1)
)
process.externalLHEProducer = cms.EDProducer("ExternalLHEProducer",
args = cms.vstring('/cvmfs/cms.cern.ch/phys_generator/gridpacks/slc6_amd64_gcc700/13TeV/powheg/Vj_NNLOPS/Zj_slc6_amd64_gcc700_CMSSW_10_2_23_ZJToMuMu-suggested-nnpdf31-ncalls-doublefsr-q139-powheg-MiNNLO31-svn3756-ew-rwl5-j200-st2fix-ana-hoppetweights-ymax20-newgrids.tgz'),
generateConcurrently = cms.untracked.bool(True),
nEvents = cms.untracked.uint32(70),
numberOfParameters = cms.uint32(1),
outputFile = cms.string('cmsgrid_final.lhe'),
scriptName = cms.FileInPath('GeneratorInterface/LHEInterface/data/run_generic_tarball_cvmfs.sh')
)
process.ProductionFilterSequence = cms.Sequence(process.generator)
# Path and EndPath definitions
process.lhe_step = cms.Path(process.externalLHEProducer)
process.generation_step = cms.Path(process.pgen)
process.nanoAOD_step = cms.Path(process.nanogenSequence)
process.genfiltersummary_step = cms.EndPath(process.genFilterSummary)
process.endjob_step = cms.EndPath(process.endOfProcess)
process.NANOAODGENoutput_step = cms.EndPath(process.NANOAODGENoutput)
# Schedule definition
process.schedule = cms.Schedule(process.lhe_step,process.generation_step,process.genfiltersummary_step,process.nanoAOD_step,process.endjob_step,process.NANOAODGENoutput_step)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# filter all path with the production filter sequence
for path in process.paths:
if path in ['lhe_step']: continue
getattr(process,path).insert(0, process.ProductionFilterSequence)
# customisation of the process.
# Automatic addition of the customisation function from PhysicsTools.NanoAOD.nanogen_cff
from PhysicsTools.NanoAOD.nanogen_cff import customizeNanoGEN
#call to customisation function customizeNanoGEN imported from PhysicsTools.NanoAOD.nanogen_cff
process = customizeNanoGEN(process)
# Automatic addition of the customisation function from Configuration.DataProcessing.Utils
from Configuration.DataProcessing.Utils import addMonitoring
#call to customisation function addMonitoring imported from Configuration.DataProcessing.Utils
process = addMonitoring(process)
# End of customisation functions
# Customisation from command line
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
| [
"kdlong@hep.wisc.edu"
] | kdlong@hep.wisc.edu |
aa03a2a0ee9d005770a80b0f38d0e4aaf1b36800 | 450448e0ddb786fd13cfe9f6df5aa47573769fdc | /tripleaxisproject/gui/bspline3.py | 2013aaaf1ff207a20edbdcfe3f17b93f76a3155a | [] | no_license | williamratcliff/tripleaxisproject | 70bbd9ab5f7f1d2f30ced18b0887e51a1e3551e8 | 8649730ccc03e7d172ad41db776e2df9b463f3d6 | refs/heads/master | 2021-01-19T20:18:25.875294 | 2018-09-12T20:43:46 | 2018-09-12T20:43:46 | 32,125,247 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,791 | py | import numpy as N
def max(a,b):
return (a<b).choose(a,b)
def min(a,b):
return (a>b).choose(a,b)
def lookup(a,b):
return a.searchsorted(b)
def cat(*args):
return N.concatenate(args)
# f, f', f'', f''' = bspline3(knot, control, t, nderiv=0)
# Evaluate the B-spline specified by the given knot sequence and
# control values at the parametric points t. The knot sequence
# should be four elements longer than the control sequence.
# Returns up to p(t), p'(t), p''(t), p'''(t) depending on nderiv.
# bspline3(knot, control, t, clamp=True)
# Clamps the spline to the value of the final control point beyond
# the ends of the knot sequence. Default is 'zero' for clamping
# the spline to zero.
def bspline3(knot, control, t, clamp=False, nderiv=0):
degree = len(knot) - len(control);
if degree != 4: raise ValueError, "must have two extra knots at each end"
if clamp:
# Alternative approach spline is clamped to initial/final control values
control = cat([control[0]]*(degree-1), control, [control[-1]])
else:
# Traditional approach: spline goes to zero at +/- infinity.
control = cat([0]*(degree-1), control, [0])
# Deal with values outside the range
valid = (t > knot[0]) & (t <= knot[-1])
tv = t[valid]
f = N.zeros(t.shape)
df = N.zeros(t.shape)
d2f = N.zeros(t.shape)
d3f = N.zeros(t.shape)
f[t<=knot[0]] = control[0]
f[t>=knot[-1]] = control[-1]
# Find B-Spline parameters for the individual segments
end = len(knot)-1
segment = lookup(knot,tv)-1
tm2 = knot[max(segment-2,0)]
tm1 = knot[max(segment-1,0)]
tm0 = knot[max(segment-0,0)]
tp1 = knot[min(segment+1,end)]
tp2 = knot[min(segment+2,end)]
tp3 = knot[min(segment+3,end)]
P4 = control[min(segment+3,end)]
P3 = control[min(segment+2,end)]
P2 = control[min(segment+1,end)]
P1 = control[min(segment+0,end)]
# Compute second and third derivatives
if nderiv > 1:
# First derivative is available almost for free
# Second or more derivative requires extra computation
Q4 = (P4 - P3) * 3 / (tp3-tm0)
Q3 = (P3 - P2) * 3 / (tp2-tm1)
Q2 = (P2 - P1) * 3 / (tp1-tm2)
R4 = (Q4 - Q3) * 2 / (tp2-tm0)
R3 = (Q3 - Q2) * 2 / (tp1-tm1)
S4 = (R4 - R3) * 1 / (tp1-tm0)
R4 = ( (tv-tm0)*R4 + (tp1-tv)*R3 ) / (tp1 - tm0)
d2f[valid] = R4
d3f[valid] = S4
# Compute function value and first derivative
P4 = ( (tv-tm0)*P4 + (tp3-tv)*P3 ) / (tp3 - tm0)
P3 = ( (tv-tm1)*P3 + (tp2-tv)*P2 ) / (tp2 - tm1)
P2 = ( (tv-tm2)*P2 + (tp1-tv)*P1 ) / (tp1 - tm2)
P4 = ( (tv-tm0)*P4 + (tp2-tv)*P3 ) / (tp2 - tm0)
P3 = ( (tv-tm1)*P3 + (tp1-tv)*P2 ) / (tp1 - tm1)
fastdf = (P4-P3) * 3 / (tp1-tm0)
P4 = ( (tv-tm0)*P4 + (tp1-tv)*P3 ) / (tp1 - tm0)
# Check that fast df calculation matches the direct Q4 calculation.
# if nderiv > 1: print "|fast df - df| = ",norm(df-Q4)
df[valid] = fastdf
f[valid] = P4
if nderiv == 0: return f
elif nderiv == 1: return f,df
elif nderiv == 2: return f,df,d2f
else: return f,df,d2f,d3f
# Assertions left over from original octave code --- I'm not ready
# to write a generic assert yet in Python
#!assert(bspline3([0 0 0 1 1 3 4 6 6 6],[0 0 0 0 0 0],2.2),0,10*eps);
#!assert(bspline3([0 0 0 1 1 3 4 6 6 6],[1 1 1 1 1 1],2.2),1,10*eps);
#!assert(bspline3([0 0 0 0 1 4 5 5 5 5],[1:6],2),761/240,10*eps);
#!assert(bspline3([0 0 0 0 1 4 5 5 5 5],[1:6],[2,2]),[761/240,761/240],10*eps);
#!assert(bspline3([0 0 0 1 1 3 4 6 6 6],[1:6],3.2),4.2976,10*eps);
import numpy as nx
class BSpline3:
"""Manage control points for parametric B-spline."""
# TODO: this class doesn't give much control over knots.
def __init__(self, x, y, clamp=True):
n = len(x)
self.knot = nx.concatenate([[0.]*2, range(n), [n-1]*2])
self.x = x
self.y = y
self.clamp = clamp
def __len__(self):
"""Count the knots"""
return len(self.x)
def __getitem__(self, i):
"""Set control point for a knot"""
return self.x[i], self.y[i]
def __setitem__(self, i, pair):
"""Get control point for a knot"""
self.x[i],self.y[i] = pair
def __delitem__(self, i):
"""Delete a knot"""
if i < 0 or i >= len(self.x): raise IndexError
self.x = nx.delete(self.x,i)
self.y = nx.delete(self.y,i)
self.knot = nx.delete(self.knot,i+2)
if i == 0:
self.knot[0:2] = self.knot[2]
elif i == len(self.x)-2:
self.knot[-2:-1] = self.knot[-3]
def __call__(self, t):
"""Evalaute a B-spline at points t"""
fx = bspline3(self.knot,self.x,t,clamp=self.clamp)
fy = bspline3(self.knot,self.y,t,clamp=self.clamp)
return fx,fy
def append(self,x,y):
"""Add a knot to the end"""
self.x = nx.concatenate([self.x,[x]])
self.y = nx.concatenate([self.y,[y]])
k = self.knot[-1]+1
self.knot = nx.concatenate([self.knot,[k]])
self.knot[-3:-1] = k
def sample(self,n=400):
"""Sample the B-spline at n equidistance points in t"""
t = nx.linspace(self.knot[2],self.knot[-3],n)
return self.__call__(t)
def demo():
import pylab
t = N.linspace(-1,7,40 );
knot = N.array([0, 1, 1, 3, 4, 6],'f')
#knot = N.array([0, 0, 1, 4, 5, 5],'f')
control = N.array([1, 2, 3, 2, 1, 2],'f')
knotseq = cat([knot[0]-1,knot[0]], knot, [knot[-1],knot[-1]+1])
f = bspline3(knotseq,control,t,clamp=True);
#print zip(t,f)
pylab.plot(t,f,'-',knot,control,'x');
pylab.show()
if __name__ == "__main__": demo()
| [
"william.ratcliff@e28a235e-f944-0410-a937-4d0c1e564b32"
] | william.ratcliff@e28a235e-f944-0410-a937-4d0c1e564b32 |
87c991b0181b8c4ceac044906c173fd51e8341be | b76615ff745c6d66803506251c3d4109faf50802 | /pyobjc-framework-CoreText/PyObjCTest/test_coretext.py | 1cbd69a8ce38bd6cf94b9f0fff3c13bac2b8271a | [
"MIT"
] | permissive | danchr/pyobjc-git | 6ef17e472f54251e283a0801ce29e9eff9c20ac0 | 62b787fddeb381184043c7ff136f1c480755ab69 | refs/heads/master | 2021-01-04T12:24:31.581750 | 2020-02-02T20:43:02 | 2020-02-02T20:43:02 | 240,537,392 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,112 | py | from PyObjCTools.TestSupport import *
from CoreText import *
class TestCoreText(TestCase):
def testConstants(self):
self.assertEqual(kCTVersionNumber10_5, 0x00020000)
self.assertEqual(kCTVersionNumber10_5_2, 0x00020001)
self.assertEqual(kCTVersionNumber10_5_3, 0x00020002)
self.assertEqual(kCTVersionNumber10_5_5, 0x00020003)
self.assertEqual(kCTVersionNumber10_6, 0x00030000)
self.assertEqual(kCTVersionNumber10_7, 0x00040000)
self.assertEqual(kCTVersionNumber10_8, 0x00050000)
self.assertEqual(kCTVersionNumber10_9, 0x00060000)
self.assertEqual(kCTVersionNumber10_10, 0x00070000)
self.assertEqual(kCTVersionNumber10_11, 0x00080000)
self.assertEqual(kCTVersionNumber10_12, 0x00090000)
self.assertEqual(kCTVersionNumber10_13, 0x000A0000)
self.assertEqual(kCTVersionNumber10_14, 0x000B0000)
self.assertEqual(kCTVersionNumber10_15, 0x000C0000)
def testFunctions(self):
v = CTGetCoreTextVersion()
self.assertIsInstance(v, (int, long))
if __name__ == "__main__":
main()
| [
"ronaldoussoren@mac.com"
] | ronaldoussoren@mac.com |
a2c1ecc98cc0627fd78f90919a8545ff4de92e84 | 8426ae07c324370d87a0f009f1aae872a3efade1 | /tools/viewing/view_histogram.py | 47d3851ffbfb80add5f8d6c95cb0fc2809ba6f54 | [] | no_license | TomChow01/docrec-tifs18 | 2d4e95592732d9b42c023c2be912ba52cb1054f5 | 08e8099a9f5c95f8d513083acc321bebabb41245 | refs/heads/master | 2022-02-12T05:58:13.276509 | 2019-06-24T11:48:33 | 2019-06-24T11:48:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 820 | py | import sys
sys.path.append('../')
import matplotlib.pyplot as plt
import cv2
import numpy as np
from scipy import stats
img_filename = sys.argv[1]
fig = plt.figure(figsize=(12, 12), dpi=300)
ax1 = fig.add_subplot(121)
ax2 = fig.add_subplot(122)
# Open as graysacle
gray = cv2.imread(img_filename, cv2.IMREAD_GRAYSCALE)
# Open as rgb
rgb = cv2.imread(img_filename, cv2.IMREAD_COLOR)
counts = np.bincount(gray.flatten())
probs = counts / float(counts.sum())
# Stats
entropy = stats.entropy(probs)
skew = stats.skew(probs)
kurtosis = stats.kurtosis(probs)
weber_contrast = (255 - gray).sum() / (255.0 * gray.size)
print('Shannon entropy % .5f' % entropy)
print('Skew % .5f' % skew)
print('Kurtosis % .5f' % kurtosis)
print('Weber contrast % .5f' % weber_contrast)
ax1.plot(probs)
ax2.plot(probs.cumsum())
plt.show() | [
"paixao@gmail.com"
] | paixao@gmail.com |
5991535deca4ef0b5cf2d84263c3b15c13c62e3c | 609d037275a6b7c6aeae194c6ac0fe0a5ffc72e5 | /train.py | 882a10809c965c4a008846a208676957130e4b58 | [] | no_license | RobertSamoilescu/Robotics | e23da73073a819a158c37701d7aede48e7b8946e | 58a7fb61fd2f21bf0af5f73c5ffb2efe42b22a31 | refs/heads/master | 2020-05-24T20:51:52.256121 | 2019-05-22T17:58:01 | 2019-05-30T20:56:23 | 187,463,274 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,515 | py | import torch
import model
import sys
from logger import Logger
from model import *
# define constants
batch_size = 64
num_workers = 7
# define loaders
train_dataset = UPBDataset("train", augmentation=True)
train_dataloader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers)
validation_dataset = UPBDataset("validation", augmentation=False)
validation_dataloader = DataLoader(validation_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers)
# initialize logger
logger = Logger('./logs')
# initialize model
net = model.VGGBased(2).cuda()
net = net.train()
print(net)
# criterion & optimizer
criterion = torch.nn.MSELoss()
optimizer = torch.optim.RMSprop(net.parameters(), lr=1e-4)
best_loss = sys.maxsize
def train(epoch):
global best_loss
running_loss = None
for i, data in enumerate(train_dataloader, 0):
# get inputs
X, Y_gt = data['img'], data['steer_coord']
# send to gpu
X = X.cuda()
Y_gt = Y_gt.cuda()
# zero the parameters gradient
optimizer.zero_grad()
# forward, backward & optimize
Y = net(X)
loss = criterion(Y, Y_gt)
loss.backward()
optimizer.step()
# update running loss
running_loss = loss.item() if running_loss is None else 0.9 * running_loss + 0.1 * loss.item()
# tensor board plots & print
if i % max(1, (len(train_dataloader) // 50)) == 0:
# display
print(' * [%d, %5d] MSE loss training: %.6f, Euclidian distance training: %.6f' % (epoch, i, running_loss, np.sqrt(running_loss)))
# tensorboard plots
step = epoch * len(train_dataloader) + i
logger.scalar_summary('MSE loss training', loss.item(), step)
logger.scalar_summary('Euclidian distance training', torch.sqrt(loss).item(), step)
for tag, value in net.named_parameters():
tag = tag.replace('.', '/')
logger.histo_summary(tag, value.data.cpu().numpy(), step)
logger.histo_summary(tag + '/grad', value.grad.data.cpu().numpy(), step)
if i % max(1, (len(train_dataloader) // 20)) == 0:
eval_loss = evaluate(step)
if eval_loss < best_loss:
best_loss = eval_loss
torch.save(net, './checkpoints/best_model')
print("Model saved")
def evaluate(epoch):
total_loss = 0
net.eval()
for i, data in enumerate(validation_dataloader, 0):
# get inputs
X, Y_gt = data['img'], data['steer_coord']
# send to gpu
X = X.cuda()
Y_gt = Y_gt.cuda()
# forward, backward & optimize
with torch.no_grad():
Y = net(X)
loss = criterion(Y, Y_gt)
total_loss += loss.item() * X.shape[0]
mean_loss = total_loss / len(validation_dataset)
# logger
print("\t * [%d] MES loss validation: %.6f, Euclidian distance validation: %.6f"
% (epoch, mean_loss, np.sqrt(mean_loss)))
logger.scalar_summary("MSE loss validation", mean_loss, epoch)
logger.scalar_summary("Euclidian distance validation", np.sqrt(mean_loss), epoch)
net.train()
return mean_loss
def main():
for epoch in range(1, 100000):
train(epoch)
if __name__ == "__main__":
main()
| [
"robert.samoilescu@gmail.com"
] | robert.samoilescu@gmail.com |
9b930daeaf526a170c1f5dd3a676f6432129f540 | 714803060d2d667f4d331b605281304469c82f75 | /bcbio/variation/population.py | b32afd052af3d39124fdb13f34a733a45d5c7bba | [
"MIT"
] | permissive | nirajkhe/bcbio-nextgen | 60b09a14830833737b406072c9b8ecbe49a16a8f | 84a77ddab0eaa2614bc4ba42f5d66c8e557f7ea5 | refs/heads/master | 2021-01-15T12:31:35.674209 | 2014-12-13T11:18:19 | 2014-12-13T11:18:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,982 | py | """Provide infrastructure to allow exploration of variations within populations.
Uses the gemini framework (https://github.com/arq5x/gemini) to build SQLite
database of variations for query and evaluation.
"""
import collections
from distutils.version import LooseVersion
import os
import subprocess
import toolz as tz
from bcbio import install, utils
from bcbio.distributed.transaction import file_transaction
from bcbio.pipeline import config_utils
from bcbio.provenance import do, programs
from bcbio.variation import vcfutils
def prep_gemini_db(fnames, call_info, samples):
"""Prepare a gemini database from VCF inputs prepared with snpEff.
"""
data = samples[0]
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "gemini"))
name, caller, is_batch = call_info
gemini_db = os.path.join(out_dir, "%s-%s.db" % (name, caller))
gemini_vcf = get_multisample_vcf(fnames, name, caller, data)
use_gemini_quick = (do_db_build(samples, check_gemini=False) and
any(vcfutils.vcf_has_variants(f) for f in fnames))
if not utils.file_exists(gemini_db) and use_gemini_quick:
use_gemini = do_db_build(samples) and any(vcfutils.vcf_has_variants(f) for f in fnames)
if use_gemini:
with file_transaction(data, gemini_db) as tx_gemini_db:
gemini = config_utils.get_program("gemini", data["config"])
if "program_versions" in data["config"].get("resources", {}):
gemini_ver = programs.get_version("gemini", config=data["config"])
else:
gemini_ver = None
# Recent versions of gemini allow loading only passing variants
load_opts = ""
if not gemini_ver or LooseVersion(gemini_ver) > LooseVersion("0.6.2.1"):
load_opts += " --passonly"
# For small test files, skip gene table loading which takes a long time
if gemini_ver and LooseVersion(gemini_ver) > LooseVersion("0.6.4"):
if _is_small_vcf(gemini_vcf):
load_opts += " --skip-gene-tables"
if "/test_automated_output/" in gemini_vcf:
load_opts += " --test-mode"
# Skip CADD or gerp-bp if neither are loaded
if gemini_ver and LooseVersion(gemini_ver) >= LooseVersion("0.7.0"):
gemini_dir = install.get_gemini_dir()
for skip_cmd, check_file in [("--skip-cadd", "whole_genome_SNVs.tsv.compressed.gz")]:
if not os.path.exists(os.path.join(gemini_dir, check_file)):
load_opts += " %s" % skip_cmd
# skip gerp-bp which slows down loading
load_opts += " --skip-gerp-bp "
num_cores = data["config"]["algorithm"].get("num_cores", 1)
eanns = ("snpEff" if tz.get_in(("config", "algorithm", "effects"), data, "snpeff") == "snpeff"
else "VEP")
cmd = "{gemini} load {load_opts} -v {gemini_vcf} -t {eanns} --cores {num_cores} {tx_gemini_db}"
cmd = cmd.format(**locals())
do.run(cmd, "Create gemini database for %s %s" % (name, caller), data)
return [[(name, caller), {"db": gemini_db if utils.file_exists(gemini_db) else None,
"vcf": gemini_vcf if is_batch else None}]]
def _is_small_vcf(vcf_file):
"""Check for small VCFs which we want to analyze quicker.
"""
count = 0
small_thresh = 250
with utils.open_gzipsafe(vcf_file) as in_handle:
for line in in_handle:
if not line.startswith("#"):
count += 1
if count > small_thresh:
return False
return True
def get_multisample_vcf(fnames, name, caller, data):
"""Retrieve a multiple sample VCF file in a standard location.
Handles inputs with multiple repeated input files from batches.
"""
unique_fnames = []
for f in fnames:
if f not in unique_fnames:
unique_fnames.append(f)
out_dir = utils.safe_makedir(os.path.join(data["dirs"]["work"], "gemini"))
if len(unique_fnames) > 1:
gemini_vcf = os.path.join(out_dir, "%s-%s.vcf.gz" % (name, caller))
vrn_file_batch = None
for variant in data["variants"]:
if variant["variantcaller"] == caller and variant.get("vrn_file_batch"):
vrn_file_batch = variant["vrn_file_batch"]
if vrn_file_batch:
utils.symlink_plus(vrn_file_batch, gemini_vcf)
return gemini_vcf
else:
return vcfutils.merge_variant_files(unique_fnames, gemini_vcf, data["sam_ref"],
data["config"])
else:
gemini_vcf = os.path.join(out_dir, "%s-%s%s" % (name, caller, utils.splitext_plus(unique_fnames[0])[1]))
utils.symlink_plus(unique_fnames[0], gemini_vcf)
return gemini_vcf
def _has_gemini(config):
try:
gemini = config_utils.get_program("gemini", config)
except config_utils.CmdNotFound:
return False
try:
p = subprocess.Popen([gemini, "-h"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p.wait()
p.stdout.close()
if p.returncode not in [0, 1]:
return False
except OSError:
return False
return True
def do_db_build(samples, check_gemini=True, need_bam=True, gresources=None):
"""Confirm we should build a gemini database: need gemini + human samples + not in tool_skip.
"""
genomes = set()
for data in samples:
if not need_bam or data.get("align_bam"):
genomes.add(data["genome_build"])
if "gemini" in utils.get_in(data, ("config", "algorithm", "tools_off"), []):
return False
if len(genomes) == 1:
if not gresources:
gresources = samples[0]["genome_resources"]
return (tz.get_in(["aliases", "human"], gresources, False)
and (not check_gemini or _has_gemini(samples[0]["config"])))
else:
return False
def get_gemini_files(data):
"""Enumerate available gemini data files in a standard installation.
"""
try:
from gemini import annotations, config
except ImportError:
return {}
return {"base": config.read_gemini_config()["annotation_dir"],
"files": annotations.get_anno_files().values()}
def _group_by_batches(samples, check_fn):
"""Group data items into batches, providing details to retrieve results.
"""
batch_groups = collections.defaultdict(list)
singles = []
out_retrieve = []
extras = []
for data in [x[0] for x in samples]:
if check_fn(data):
batch = tz.get_in(["metadata", "batch"], data)
name = str(data["name"][-1])
if batch:
out_retrieve.append((str(batch), data))
else:
out_retrieve.append((name, data))
for vrn in data["variants"]:
if vrn.get("population", True):
if batch:
batch_groups[(str(batch), vrn["variantcaller"])].append((vrn["vrn_file"], data))
else:
singles.append((name, vrn["variantcaller"], data, vrn["vrn_file"]))
else:
extras.append(data)
return batch_groups, singles, out_retrieve, extras
def _has_variant_calls(data):
return data.get("align_bam") and data.get("vrn_file") and vcfutils.vcf_has_variants(data["vrn_file"])
def prep_db_parallel(samples, parallel_fn):
"""Prepares gemini databases in parallel, handling jointly called populations.
"""
batch_groups, singles, out_retrieve, extras = _group_by_batches(samples, _has_variant_calls)
to_process = []
has_batches = False
for (name, caller), info in batch_groups.iteritems():
fnames = [x[0] for x in info]
to_process.append([fnames, (str(name), caller, True), [x[1] for x in info]])
has_batches = True
for name, caller, data, fname in singles:
to_process.append([[fname], (str(name), caller, False), [data]])
if len(samples) > 0 and not do_db_build([x[0] for x in samples], check_gemini=False) and not has_batches:
return samples
output = parallel_fn("prep_gemini_db", to_process)
out_fetch = {}
for batch_id, out_file in output:
out_fetch[tuple(batch_id)] = out_file
out = []
for batch_name, data in out_retrieve:
out_variants = []
for vrn in data["variants"]:
use_population = vrn.pop("population", True)
if use_population:
vrn["population"] = out_fetch[(batch_name, vrn["variantcaller"])]
out_variants.append(vrn)
data["variants"] = out_variants
out.append([data])
for x in extras:
out.append([x])
return out
| [
"chapmanb@50mail.com"
] | chapmanb@50mail.com |
52eb6c8fd544416acfe3ad485d74995d1f24f22e | 4040b4103295141d979e004e34426aefed01f5d6 | /student_list.py | 687cee506be61e27919b8792662e6e68166ea757 | [] | no_license | mehulchopradev/bryan-python | f2fe55d8a6043333127bef7af9f359aae008b74f | 7ef405b40855c6fe7efac981c6b9d7b6aadc3237 | refs/heads/master | 2020-08-23T07:23:21.109175 | 2019-10-30T13:04:05 | 2019-10-30T13:04:05 | 216,570,101 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 684 | py | from com.abc.college.student import Student
'''slist = [
Student('mehul', 'm', 10),
Student('bryan', 'm', 5),
Student('jane', 'f', 23)
]'''
smap = {
10: Student('mehul', 'm', 10),
5: Student('bryan', 'm', 5),
23: Student('jane', 'f', 23)
}
roll = int(input('Enter roll to search : '))
'''for student in slist:
if student.roll == roll:
print(student.get_details())
break
else:
# will execute when the corresponding for block is completely exhausted
# when in the corresponding for block there is no break encountered
print('Student not found')'''
if roll in smap:
student = smap[roll]
print(student.get_details())
else:
print('Student not found') | [
"Mehul.Chopra@avalara.com"
] | Mehul.Chopra@avalara.com |
9321b829a43b3d9c0238e474739ef74a0282542a | 27bfa36b30e86dc749e3ff0d405d702721484eaa | /venv/bin/easy_install | 00e86fb213ca698413a89417260af1a2831660d9 | [] | no_license | mr-kaveh/miniblog | 00e9cb13067e2a02471d2ca8a92c1606f5307eef | b9bc6a652c16db6b487af895b1f4a7e4e62fb964 | refs/heads/master | 2020-04-13T16:02:59.025841 | 2019-01-11T17:06:29 | 2019-01-11T17:06:29 | 163,310,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 445 | #!/home/hossein/myScripts/megaTutorial/blog/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"mr.hdavoodi@gmail.com"
] | mr.hdavoodi@gmail.com | |
5ba7c9187bdea4e96a93da519265ab3849441bb1 | b289cc5016d394ff3137e85cf924d9c361c4d223 | /test/venv/Scripts/pip3-script.py | 9d042772e108f6506edb75a7a1635d77dcb5ab42 | [] | no_license | thundernova/spider | 0b409fccbe21998bb4179794dc46c887e2203608 | ebf5198e5bffa4c1c6282a88245fdf41ec37b86c | refs/heads/master | 2022-12-28T16:28:48.416976 | 2019-09-26T09:43:08 | 2019-09-26T09:43:08 | null | 0 | 0 | null | null | null | null | WINDOWS-1252 | Python | false | false | 411 | py | #!E:\ÅÀ³æ\ÅÀ³æÏîÄ¿\test\venv\Scripts\python.exe -x
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip3'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip3')()
)
| [
"315909056@qq.com"
] | 315909056@qq.com |
e7f1d3c40f50cd034136f6d0db6199b787754ea5 | facb8b9155a569b09ba66aefc22564a5bf9cd319 | /wp2/merra_scripts/03_model_fitting/merraRF882/868-tideGauge.py | ba649b09866db77f7861a146ed63d959be6d0543 | [] | no_license | moinabyssinia/modeling-global-storm-surges | 13e69faa8f45a1244a964c5de4e2a5a6c95b2128 | 6e385b2a5f0867df8ceabd155e17ba876779c1bd | refs/heads/master | 2023-06-09T00:40:39.319465 | 2021-06-25T21:00:44 | 2021-06-25T21:00:44 | 229,080,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,456 | py | # -*- coding: utf-8 -*-
"""
Created on Mon May 4 15:51:30 2020
This program is designed to validate a Random Forest
model by using the KFOLD method
@author: Michael Tadesse
"""
#import packages
import os
import glob
import numpy as np
import pandas as pd
from sklearn import metrics
from scipy import stats
import seaborn as sns
import matplotlib.pyplot as plt
from datetime import datetime
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.preprocessing import StandardScaler
def validateRF():
"""
run KFOLD method for regression
"""
#defining directories
dir_in = "/lustre/fs0/home/mtadesse/merraAllLagged"
dir_out = "/lustre/fs0/home/mtadesse/merraRFValidation"
surge_path = "/lustre/fs0/home/mtadesse/05_dmax_surge_georef"
#cd to the lagged predictors directory
os.chdir(dir_in)
x = 868
y = 869
#empty dataframe for model validation
df = pd.DataFrame(columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse'])
#looping through
for tg in range(x,y):
os.chdir(dir_in)
#filter only .csv files
tgNames = []
for file in glob.glob("*.csv"):
tgNames.append(file)
tg_name = sorted(tgNames)[tg]
print(tg_name)
##########################################
#check if this tg is already taken care of
##########################################
os.chdir(dir_out)
if os.path.isfile(tg_name):
print("this tide gauge is already taken care of")
return "file already analyzed!"
os.chdir(dir_in)
#load predictor
pred = pd.read_csv(tg_name)
pred.drop('Unnamed: 0', axis = 1, inplace = True)
#add squared and cubed wind terms (as in WPI model)
pickTerms = lambda x: x.startswith('wnd')
wndTerms = pred.columns[list(map(pickTerms, pred.columns))]
wnd_sqr = pred[wndTerms]**2
wnd_cbd = pred[wndTerms]**3
pred = pd.concat([pred, wnd_sqr, wnd_cbd], axis = 1)
#standardize predictor data
dat = pred.iloc[:,1:]
scaler = StandardScaler()
print(scaler.fit(dat))
dat_standardized = pd.DataFrame(scaler.transform(dat), \
columns = dat.columns)
pred_standardized = pd.concat([pred['date'], dat_standardized], axis = 1)
#load surge data
os.chdir(surge_path)
surge = pd.read_csv(tg_name)
surge.drop('Unnamed: 0', axis = 1, inplace = True)
#remove duplicated surge rows
surge.drop(surge[surge['ymd'].duplicated()].index, axis = 0, inplace = True)
surge.reset_index(inplace = True)
surge.drop('index', axis = 1, inplace = True)
#adjust surge time format to match that of pred
time_str = lambda x: str(datetime.strptime(x, '%Y-%m-%d'))
surge_time = pd.DataFrame(list(map(time_str, surge['ymd'])), columns = ['date'])
time_stamp = lambda x: (datetime.strptime(x, '%Y-%m-%d %H:%M:%S'))
surge_new = pd.concat([surge_time, surge[['surge', 'lon', 'lat']]], axis = 1)
#merge predictors and surge to find common time frame
pred_surge = pd.merge(pred_standardized, surge_new.iloc[:,:2], on='date', how='right')
pred_surge.sort_values(by = 'date', inplace = True)
#find rows that have nans and remove them
row_nan = pred_surge[pred_surge.isna().any(axis =1)]
pred_surge.drop(row_nan.index, axis = 0, inplace = True)
pred_surge.reset_index(inplace = True)
pred_surge.drop('index', axis = 1, inplace = True)
#in case pred and surge don't overlap
if pred_surge.shape[0] == 0:
print('-'*80)
print('Predictors and Surge don''t overlap')
print('-'*80)
continue
pred_surge['date'] = pd.DataFrame(list(map(time_stamp, \
pred_surge['date'])), \
columns = ['date'])
#prepare data for training/testing
X = pred_surge.iloc[:,1:-1]
y = pd.DataFrame(pred_surge['surge'])
y = y.reset_index()
y.drop(['index'], axis = 1, inplace = True)
#apply PCA
pca = PCA(.95)
pca.fit(X)
X_pca = pca.transform(X)
#apply 10 fold cross validation
kf = KFold(n_splits=10, random_state=29)
metric_corr = []; metric_rmse = []; #combo = pd.DataFrame(columns = ['pred', 'obs'])
for train_index, test_index in kf.split(X):
X_train, X_test = X_pca[train_index], X_pca[test_index]
y_train, y_test = y['surge'][train_index], y['surge'][test_index]
#train regression model
rf= RandomForestRegressor(n_estimators = 50, random_state = 101, \
min_samples_leaf = 1)
rf.fit(X_train, y_train)
#predictions
predictions = rf.predict(X_test)
# pred_obs = pd.concat([pd.DataFrame(np.array(predictions)), \
# pd.DataFrame(np.array(y_test))], \
# axis = 1)
# pred_obs.columns = ['pred', 'obs']
# combo = pd.concat([combo, pred_obs], axis = 0)
#evaluation matrix - check p value
if stats.pearsonr(y_test, predictions)[1] >= 0.05:
print("insignificant correlation!")
continue
else:
print(stats.pearsonr(y_test, predictions))
metric_corr.append(stats.pearsonr(y_test, predictions)[0])
print(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
print()
metric_rmse.append(np.sqrt(metrics.mean_squared_error(y_test, predictions)))
#number of years used to train/test model
num_years = (pred_surge['date'][pred_surge.shape[0]-1] -\
pred_surge['date'][0]).days/365
longitude = surge['lon'][0]
latitude = surge['lat'][0]
num_pc = X_pca.shape[1] #number of principal components
corr = np.mean(metric_corr)
rmse = np.mean(metric_rmse)
print('num_year = ', num_years, ' num_pc = ', num_pc ,'avg_corr = ',np.mean(metric_corr), ' - avg_rmse (m) = ', \
np.mean(metric_rmse), '\n')
#original size and pca size of matrix added
new_df = pd.DataFrame([tg_name, longitude, latitude, num_years, num_pc, corr, rmse]).T
new_df.columns = ['tg', 'lon', 'lat', 'num_year', \
'num_95pcs','corrn', 'rmse']
df = pd.concat([df, new_df], axis = 0)
#save df as cs - in case of interruption
os.chdir(dir_out)
df.to_csv(tg_name)
#run script
validateRF()
| [
"michaelg.tadesse@gmail.com"
] | michaelg.tadesse@gmail.com |
da510a13d601fed9b2e8679f23a3ef9b8f484811 | 428853ca880fb863ef708022ccac1258c25fc850 | /vart/network/nn_base.py | 0902104c3683868705c0917fbf3164f205fc2d1f | [
"Apache-2.0"
] | permissive | NicoRenaud/vArt | 2ddb47cd3ee9dd725096c40e69b153db2f78eb03 | f6d50f9499c2fca7e07bf8a3b68d81f70649fb10 | refs/heads/master | 2020-07-24T08:04:18.602264 | 2019-09-12T16:27:32 | 2019-09-12T16:27:32 | 207,857,937 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,090 | py | import autograd.numpy as np
from autograd import elementwise_grad as egrad
from autograd import hessian, jacobian
from functools import partial
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import grad, Variable
from time import time
class WaveFunction(nn.Module):
def __init__(self,nelec, ndim, kinetic='auto'):
super(WaveFunction, self).__init__()
self.ndim = ndim
self.nelec = nelec
self.ndim_tot = self.nelec*self.ndim
self.kinetic = kinetic
def forward(self,x):
''' Compute the value of the wave function.
for a multiple conformation of the electrons
Args:
parameters : variational param of the wf
pos: position of the electrons
Returns: values of psi
'''
raise NotImplementedError()
def electronic_potential(self,pos):
'''Compute the potential of the wf points
Args:
pos: position of the electron
Returns: values of Vee * psi
'''
raise NotImplementedError()
def nuclear_potential(self,pos):
'''Compute the potential of the wf points
Args:
pos: position of the electron
Returns: values of Ven * psi
'''
raise NotImplementedError()
def nuclear_repulsion(self):
'''Compute the nuclear repulsion term
Returns: values of Vnn * psi
'''
raise NotImplementedError()
def kinetic_energy(self,pos,out=None):
'''Main switch for the kinetic energy.'''
if self.kinetic == 'auto':
return self.kinetic_energy_autograd(pos,out)
elif self.kinetic == 'fd':
return self.kinetic_energy_finite_difference(pos)
else:
raise ValueError('kinetif %s not recognized' %self.kinetic)
def kinetic_energy_autograd(self,pos,out=None):
'''Compute the second derivative of the network
output w.r.t the value of the input.
This is to compute the value of the kinetic operator.
Args:
pos: position of the electron
out : preomputed values of the wf at pos
Returns:
values of nabla^2 * Psi
'''
if out is None:
out = self.forward(pos)
# compute the jacobian
z = Variable(torch.ones(out.shape))
jacob = grad(out,pos,grad_outputs=z,create_graph=True,only_inputs=False)[0]
# compute the diagonal element of the Hessian
z = Variable(torch.ones(jacob.shape[0]))
hess = torch.zeros(jacob.shape[0])
for idim in range(jacob.shape[1]):
tmp = grad(jacob[:,idim],pos,
grad_outputs=z,
create_graph=True,
allow_unused=True,
only_inputs=False)[0]
hess += tmp[:,idim]
return -0.5 * hess.view(-1,1)
def kinetic_energy_finite_difference(self,pos,eps=1E-3):
'''Compute the second derivative of the network
output w.r.t the value of the input using finite difference.
This is to compute the value of the kinetic operator.
Args:
pos: position of the electron
out : preomputed values of the wf at pos
eps : psilon for numerical derivative
Returns:
values of nabla^2 * Psi
'''
nwalk = pos.shape[0]
ndim = pos.shape[1]
out = torch.zeros(nwalk,1)
for icol in range(ndim):
pos_tmp = pos.clone()
feps = -2*self.forward(pos_tmp)
pos_tmp = pos.clone()
pos_tmp[:,icol] += eps
feps += self.forward(pos_tmp)
pos_tmp = pos.clone()
pos_tmp[:,icol] -= eps
feps += self.forward(pos_tmp)
out += feps/(eps**2)
return -0.5*out.view(-1,1)
def local_energy_save(self,pos):
''' local energy of the sampling points.'''
return self.kinetic_energy(pos)/self.forward(pos) \
+ self.nuclear_potential(pos) \
+ self.electronic_potential(pos) \
+ self.nuclear_repulsion()
def local_energy(self,pos):
''' local energy of the sampling points.'''
#t0 = time()
wf = self.forward(pos)
ke = self.kinetic_energy(pos,out=wf)
#print('Kinetic done in %f' %(time()-t0))
return ke/wf \
+ self.nuclear_potential(pos) \
+ self.electronic_potential(pos) \
+ self.nuclear_repulsion()
def energy(self,pos):
'''Total energy for the sampling points.'''
return torch.mean(self.local_energy(pos))
def variance(self, pos):
'''Variance of the energy at the sampling points.'''
return torch.var(self.local_energy(pos))
def pdf(self,pos):
'''density of the wave function.'''
return (self.forward(pos)**2).reshape(-1)
| [
"nicolas.gm.renaud@gmail.com"
] | nicolas.gm.renaud@gmail.com |
7022095a59a50e7551cff88e0bfad6084ddddf93 | ff4fe07752b61aa6404f85a8b4752e21e8a5bac8 | /challenge-215/roger-bell-west/python/ch-2.py | 0dca0440e596590236e0101c7b6d153af66bb44f | [] | no_license | choroba/perlweeklychallenge-club | 7c7127b3380664ca829158f2b6161c2f0153dfd9 | 2b2c6ec6ece04737ba9a572109d5e7072fdaa14a | refs/heads/master | 2023-08-10T08:11:40.142292 | 2023-08-06T20:44:13 | 2023-08-06T20:44:13 | 189,776,839 | 0 | 1 | null | 2019-06-01T20:56:32 | 2019-06-01T20:56:32 | null | UTF-8 | Python | false | false | 640 | py | #! /usr/bin/python3
def numberplacement(a0, ct):
a = [1] + a0 + [1]
s = 0
tt = 0
for i in range(1, len(a)):
if a[i - 1] == 1 and a[i] == 0:
s = i
elif a[i - 1] == 0 and a[i] == 1:
tt += (i - s) // 2
return ct <= tt
import unittest
class TestNumberplacement(unittest.TestCase):
def test_ex1(self):
self.assertEqual(numberplacement([1, 0, 0, 0, 1], 1), True, 'example 1')
def test_ex2(self):
self.assertEqual(numberplacement([1, 0, 0, 0, 1], 2), False, 'example 2')
def test_ex3(self):
self.assertEqual(numberplacement([1, 0, 0, 0, 0, 0, 0, 0, 1], 3), True, 'example 3')
unittest.main()
| [
"roger@firedrake.org"
] | roger@firedrake.org |
289e20471c5101745fd5c8ae3a1b183a640d13f0 | 6bf4e54f8ae95582b73bb969ba44069c64e87651 | /kdhi/main_site/migrations/0064_auto_20200604_1726.py | 274bb623cea8b66901484390548f0a57fc0e0532 | [] | no_license | speedycowenator/kdhi_migration | 4bc983c4656a2a87cb056461bfb4219e38da1a85 | 422b2e3f142a30c81f428fb8eaa813e4a71d56fc | refs/heads/master | 2022-11-14T13:27:51.520697 | 2020-07-02T19:31:12 | 2020-07-02T19:31:12 | 246,138,874 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 519 | py | # Generated by Django 2.2.5 on 2020-06-04 21:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main_site', '0063_auto_20200601_2331'),
]
operations = [
migrations.AlterField(
model_name='position',
name='position_status',
field=models.CharField(choices=[('Active', 'Active'), ('Unclear', 'Unclear'), ('Removed', 'Removed'), ('Likely', 'Likely')], default='Active', max_length=20),
),
]
| [
"54556114+speedycowenator@users.noreply.github.com"
] | 54556114+speedycowenator@users.noreply.github.com |
10dcfc0a2776192fbdf3dfb574af35850dc1b8d4 | 0a2cc497665f2a14460577f129405f6e4f793791 | /sdk/resources/azure-mgmt-resource/azure/mgmt/resource/policy/v2017_06_01_preview/aio/operations/_policy_assignments_operations.py | a7decebc5e6714ca400d5980c671cf92c2ee43ff | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | hivyas/azure-sdk-for-python | 112158aa9e1dd6e30cf6b3dde19f5db6ea2a577b | 8b3258fa45f5dc25236c22ad950e48aa4e1c181c | refs/heads/master | 2023-06-17T12:01:26.392186 | 2021-05-18T19:56:01 | 2021-05-18T19:56:01 | 313,761,277 | 1 | 1 | MIT | 2020-12-02T17:48:22 | 2020-11-17T22:42:00 | Python | UTF-8 | Python | false | false | 33,220 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PolicyAssignmentsOperations:
"""PolicyAssignmentsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.resource.policy.v2017_06_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def delete(
self,
scope: str,
policy_assignment_name: str,
**kwargs
) -> Optional["_models.PolicyAssignment"]:
"""Deletes a policy assignment.
:param scope: The scope of the policy assignment.
:type scope: str
:param policy_assignment_name: The name of the policy assignment to delete.
:type policy_assignment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyAssignment or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PolicyAssignment"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01-preview"
accept = "application/json, text/json"
# Construct URL
url = self.delete.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'policyAssignmentName': self._serialize.url("policy_assignment_name", policy_assignment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'} # type: ignore
async def create(
self,
scope: str,
policy_assignment_name: str,
parameters: "_models.PolicyAssignment",
**kwargs
) -> "_models.PolicyAssignment":
"""Creates a policy assignment.
Policy assignments are inherited by child resources. For example, when you apply a policy to a
resource group that policy is assigned to all resources in the group.
:param scope: The scope of the policy assignment.
:type scope: str
:param policy_assignment_name: The name of the policy assignment.
:type policy_assignment_name: str
:param parameters: Parameters for the policy assignment.
:type parameters: ~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyAssignment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self.create.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'policyAssignmentName': self._serialize.url("policy_assignment_name", policy_assignment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PolicyAssignment')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'} # type: ignore
async def get(
self,
scope: str,
policy_assignment_name: str,
**kwargs
) -> "_models.PolicyAssignment":
"""Gets a policy assignment.
:param scope: The scope of the policy assignment.
:type scope: str
:param policy_assignment_name: The name of the policy assignment to get.
:type policy_assignment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01-preview"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'scope': self._serialize.url("scope", scope, 'str', skip_quote=True),
'policyAssignmentName': self._serialize.url("policy_assignment_name", policy_assignment_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/{scope}/providers/Microsoft.Authorization/policyAssignments/{policyAssignmentName}'} # type: ignore
def list_for_resource_group(
self,
resource_group_name: str,
filter: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.PolicyAssignmentListResult"]:
"""Gets policy assignments for the resource group.
:param resource_group_name: The name of the resource group that contains policy assignments.
:type resource_group_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyAssignmentListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyAssignmentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignmentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01-preview"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_for_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str', skip_quote=True)
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PolicyAssignmentListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_for_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Authorization/policyAssignments'} # type: ignore
def list_for_resource(
self,
resource_group_name: str,
resource_provider_namespace: str,
parent_resource_path: str,
resource_type: str,
resource_name: str,
filter: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.PolicyAssignmentListResult"]:
"""Gets policy assignments for a resource.
:param resource_group_name: The name of the resource group containing the resource. The name is
case insensitive.
:type resource_group_name: str
:param resource_provider_namespace: The namespace of the resource provider.
:type resource_provider_namespace: str
:param parent_resource_path: The parent resource path.
:type parent_resource_path: str
:param resource_type: The resource type.
:type resource_type: str
:param resource_name: The name of the resource with policy assignments.
:type resource_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyAssignmentListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyAssignmentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignmentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01-preview"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_for_resource.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._\(\)]+$'),
'resourceProviderNamespace': self._serialize.url("resource_provider_namespace", resource_provider_namespace, 'str'),
'parentResourcePath': self._serialize.url("parent_resource_path", parent_resource_path, 'str', skip_quote=True),
'resourceType': self._serialize.url("resource_type", resource_type, 'str', skip_quote=True),
'resourceName': self._serialize.url("resource_name", resource_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PolicyAssignmentListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_for_resource.metadata = {'url': '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{parentResourcePath}/{resourceType}/{resourceName}/providers/Microsoft.Authorization/policyAssignments'} # type: ignore
def list(
self,
filter: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.PolicyAssignmentListResult"]:
"""Gets all the policy assignments for a subscription.
:param filter: The filter to apply on the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PolicyAssignmentListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyAssignmentListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignmentListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01-preview"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PolicyAssignmentListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Authorization/policyAssignments'} # type: ignore
async def delete_by_id(
self,
policy_assignment_id: str,
**kwargs
) -> "_models.PolicyAssignment":
"""Deletes a policy assignment by ID.
When providing a scope for the assignment, use '/subscriptions/{subscription-id}/' for
subscriptions, '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for
resource groups, and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to delete. Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
:type policy_assignment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01-preview"
accept = "application/json, text/json"
# Construct URL
url = self.delete_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'policyAssignmentId': self._serialize.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
delete_by_id.metadata = {'url': '/{policyAssignmentId}'} # type: ignore
async def create_by_id(
self,
policy_assignment_id: str,
parameters: "_models.PolicyAssignment",
**kwargs
) -> "_models.PolicyAssignment":
"""Creates a policy assignment by ID.
Policy assignments are inherited by child resources. For example, when you apply a policy to a
resource group that policy is assigned to all resources in the group. When providing a scope
for the assignment, use '/subscriptions/{subscription-id}/' for subscriptions,
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for resource groups,
and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to create. Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
:type policy_assignment_id: str
:param parameters: Parameters for policy assignment.
:type parameters: ~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyAssignment
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self.create_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'policyAssignmentId': self._serialize.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PolicyAssignment')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_by_id.metadata = {'url': '/{policyAssignmentId}'} # type: ignore
async def get_by_id(
self,
policy_assignment_id: str,
**kwargs
) -> "_models.PolicyAssignment":
"""Gets a policy assignment by ID.
When providing a scope for the assignment, use '/subscriptions/{subscription-id}/' for
subscriptions, '/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}' for
resource groups, and
'/subscriptions/{subscription-id}/resourceGroups/{resource-group-name}/providers/{resource-provider-namespace}/{resource-type}/{resource-name}'
for resources.
:param policy_assignment_id: The ID of the policy assignment to get. Use the format
'/{scope}/providers/Microsoft.Authorization/policyAssignments/{policy-assignment-name}'.
:type policy_assignment_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PolicyAssignment, or the result of cls(response)
:rtype: ~azure.mgmt.resource.policy.v2017_06_01_preview.models.PolicyAssignment
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PolicyAssignment"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-06-01-preview"
accept = "application/json, text/json"
# Construct URL
url = self.get_by_id.metadata['url'] # type: ignore
path_format_arguments = {
'policyAssignmentId': self._serialize.url("policy_assignment_id", policy_assignment_id, 'str', skip_quote=True),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PolicyAssignment', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_by_id.metadata = {'url': '/{policyAssignmentId}'} # type: ignore
| [
"noreply@github.com"
] | hivyas.noreply@github.com |
3f8d86dba81d3a794f9f8cabc515358a27dd1aaf | 623701d50b98b678707771f06cd81d72a819ee80 | /sigfox_api_client/models/cellular_connectivity_base.py | 70d27e23944e67671f46f1c5722246c8ca433470 | [] | no_license | besterjaco/sigfoxapi | 1a63c6687bd26aa8a76248a8ea59532c786f6c78 | 03d86819365de95c0da32de1b1de0b7fd20a3c3d | refs/heads/master | 2020-09-21T17:05:47.908180 | 2019-11-27T11:20:51 | 2019-11-27T11:20:51 | 224,859,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,019 | py | # coding: utf-8
"""
Sigfox API
# API overview Sigfox API is used to integrate with the Sigfox platform. The API uses the HTTP protocol, following the REST principles (POST, GET, DELETE, PUT requests). The API endpoints accept and return data in the JSON format, with the corresponding \"application/json\" content type header. The Sigfox API access differs for every API User based on their profile. If you already have a Sigfox account, you can retrieve the API Documentation customized for your API User directly in json or yaml format. The “how to” procedure is detailed in the [API Documentation](https://support.sigfox.com/docs/api-documentation) article. The PUT request is the only request used to edit an existing entity. You don't need to specify each value. If a property is not present in the request, it won't be processed and updated. To remove an optional property, it must be filled in the request with the explicit value NULL. If a property has no value, it won't appear in the result of a GET request. # Authentication and security Sigfox API is only accessible using HTTPS, and all API endpoints require authentication credentials (API user login and password). An API User is associated to a group with given profiles. You can view and manage your API User in the [Sigfox Portal](https://backend.sigfox.com/auth/login). If you need an API User, follow the [API credential creation](https://support.sigfox.com/docs/api-credential-creation) procedure. Your API User must remain private. Should the API credentials be compromised, new ones can be generated at any moment, invalidating the previous ones. CORS and JSONP are intentionally unsupported. CORS and JSONP JavaScript techniques tends to expose your credentials to your users. If you really need to call Sigfox API from JavaScript in the browser, you must set a reverse proxy on your website. Be careful not to use proxy for all requests to Sigfox OSS but to only select the relevant ones. <!-- ReDoc-Inject: <security-definitions> --> # Usage limits All Sigfox API endpoints are using the same underlying technology that powers the core Sigfox Platform. For Cloud efficiency and security reasons, Sigfox is moving a step forward on API rate limiting, by setting upper bounds for some API endpoints. Please note that a new HTTP response will be returned in case of rate exceeded : “HTTP 429: too many requests”. For more information check [API Rate limiting](https://support.sigfox.com/docs/api-rate-limiting) policy. Sigfox reserves the right to modify these limits without notice. # Versioning Sigfox API supports versioning of its endpoints through a version suffix in the endpoint URL. This suffix has the following format: \"vX\", where X is the version number. For example: v2/device. All requests must include the version suffix in the endpoint URL. Any new backwards-incompatible change will be released in a new version. Read the [API versioning management](https://storage.sbg1.cloud.ovh.net/v1/AUTH_669d7dfced0b44518cb186841d7cbd75/prod_docs/55746591-API_Versioning_management.pdf) to learn more about it. # Paging Some API requests will return a list of data. If the list is longer than the set limit, the items will be retrieved via multiple requests. The paging section in the response will specify a URL for the next request. Keep in mind rate limiting policy to manage your requests. You can use the limit parameter to limit the number of items to be returned, between 1 and 100 (default). The offset parameter is used to specify a number of items to skip. # Errors Sigfox API uses conventional HTTP response codes to indicate the success or failure of an API request. Codes in the 2xx range indicate success. Codes in the 4xx range indicate an error that failed given the information provided (e.g. a required parameter missing, a resource was not found, etc.). Often the response will also include a message explaining the error. Codes in the 5xx range indicate an error with servers. For more information please refer to the [Response code article](https://support.sigfox.com/docs/api-response-code-references). # noqa: E501
OpenAPI spec version: 2.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class CellularConnectivityBase(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'apn': 'str',
'username': 'str',
'password': 'str',
'pin': 'str',
'roaming': 'bool'
}
attribute_map = {
'name': 'name',
'apn': 'apn',
'username': 'username',
'password': 'password',
'pin': 'pin',
'roaming': 'roaming'
}
def __init__(self, name=None, apn=None, username=None, password=None, pin=None, roaming=None): # noqa: E501
"""CellularConnectivityBase - a model defined in Swagger""" # noqa: E501
self._name = None
self._apn = None
self._username = None
self._password = None
self._pin = None
self._roaming = None
self.discriminator = None
self.name = name
self.apn = apn
if username is not None:
self.username = username
if password is not None:
self.password = password
if pin is not None:
self.pin = pin
if roaming is not None:
self.roaming = roaming
@property
def name(self):
"""Gets the name of this CellularConnectivityBase. # noqa: E501
the name of the configuration # noqa: E501
:return: The name of this CellularConnectivityBase. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this CellularConnectivityBase.
the name of the configuration # noqa: E501
:param name: The name of this CellularConnectivityBase. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def apn(self):
"""Gets the apn of this CellularConnectivityBase. # noqa: E501
the APN used to connect the base station with this cellular connectivity configuration # noqa: E501
:return: The apn of this CellularConnectivityBase. # noqa: E501
:rtype: str
"""
return self._apn
@apn.setter
def apn(self, apn):
"""Sets the apn of this CellularConnectivityBase.
the APN used to connect the base station with this cellular connectivity configuration # noqa: E501
:param apn: The apn of this CellularConnectivityBase. # noqa: E501
:type: str
"""
if apn is None:
raise ValueError("Invalid value for `apn`, must not be `None`") # noqa: E501
self._apn = apn
@property
def username(self):
"""Gets the username of this CellularConnectivityBase. # noqa: E501
the username used to connect the base station with this cellular connectivity configuration. This field can be unset when updating. # noqa: E501
:return: The username of this CellularConnectivityBase. # noqa: E501
:rtype: str
"""
return self._username
@username.setter
def username(self, username):
"""Sets the username of this CellularConnectivityBase.
the username used to connect the base station with this cellular connectivity configuration. This field can be unset when updating. # noqa: E501
:param username: The username of this CellularConnectivityBase. # noqa: E501
:type: str
"""
self._username = username
@property
def password(self):
"""Gets the password of this CellularConnectivityBase. # noqa: E501
the password used to connect the base station with this cellular connectivity configuration. This field can be unset when updating. # noqa: E501
:return: The password of this CellularConnectivityBase. # noqa: E501
:rtype: str
"""
return self._password
@password.setter
def password(self, password):
"""Sets the password of this CellularConnectivityBase.
the password used to connect the base station with this cellular connectivity configuration. This field can be unset when updating. # noqa: E501
:param password: The password of this CellularConnectivityBase. # noqa: E501
:type: str
"""
self._password = password
@property
def pin(self):
"""Gets the pin of this CellularConnectivityBase. # noqa: E501
the PIN used to connect the base station with this cellular connectivity configuration. This field can be unset when updating. # noqa: E501
:return: The pin of this CellularConnectivityBase. # noqa: E501
:rtype: str
"""
return self._pin
@pin.setter
def pin(self, pin):
"""Sets the pin of this CellularConnectivityBase.
the PIN used to connect the base station with this cellular connectivity configuration. This field can be unset when updating. # noqa: E501
:param pin: The pin of this CellularConnectivityBase. # noqa: E501
:type: str
"""
self._pin = pin
@property
def roaming(self):
"""Gets the roaming of this CellularConnectivityBase. # noqa: E501
indicate if the cellular connectivity is registered on a roaming network. # noqa: E501
:return: The roaming of this CellularConnectivityBase. # noqa: E501
:rtype: bool
"""
return self._roaming
@roaming.setter
def roaming(self, roaming):
"""Sets the roaming of this CellularConnectivityBase.
indicate if the cellular connectivity is registered on a roaming network. # noqa: E501
:param roaming: The roaming of this CellularConnectivityBase. # noqa: E501
:type: bool
"""
self._roaming = roaming
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(CellularConnectivityBase, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CellularConnectivityBase):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"JacoB3@discovery.co.za"
] | JacoB3@discovery.co.za |
1d2be9dea9766f873a9dbdadadf1fdabc7f5fa98 | fecca37427d8f6d3b2e818c16d0cb4d4d26a0092 | /job_helper.py | 72ac9509ef3542d67c0968affd81648238ea45e7 | [
"MIT"
] | permissive | TheoPis/cutmix-semisup-seg | 88993d3582b59c588bc8470f3a679879330ddb88 | d5f7f58a202ec16d0276eec5a1160fd14c1d4b26 | refs/heads/master | 2022-11-13T10:16:19.764384 | 2020-07-10T08:46:07 | 2020-07-10T08:46:07 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,414 | py | import os
import inspect
import sys, re
LOG_PREFIX = re.compile('log_(\d+)')
JOB_DIR_PREFIX = re.compile('(\d+)')
class LogAlreadyExistsError (Exception):
pass
class Logger (object):
def __init__(self, path, stream):
self.path = path
self.stream = stream
def write(self, x):
with open(self.path, 'a+') as f_out:
f_out.write(x)
self.stream.write(x)
def flush(self):
self.stream.flush()
class SubmitConfig (object):
def __init__(self, job_name, job_desc, enumerate_job_names):
res_dir = os.path.join('results', job_name)
if not os.path.exists(res_dir):
os.makedirs(res_dir, exist_ok=True)
if job_desc == 'none':
log_path = None
job_out_dir = None
else:
if enumerate_job_names:
job_index = 0
res_dir_contents = os.listdir(res_dir)
for name in res_dir_contents:
m = LOG_PREFIX.match(name)
if m is not None:
job_index = max(job_index, int(m.group(1)) + 1)
m = JOB_DIR_PREFIX.match(name)
if m is not None:
job_index = max(job_index, int(m.group(1)) + 1)
log_path = os.path.join(res_dir, 'log_{:04d}_{}.txt'.format(job_index, job_desc))
job_out_dir = os.path.join(res_dir, '{:04d}_{}'.format(job_index, job_desc))
else:
log_path = os.path.join(res_dir, 'log_{}.txt'.format(job_desc))
job_out_dir = os.path.join(res_dir, job_desc)
if os.path.exists(log_path) or os.path.exists(job_out_dir):
raise LogAlreadyExistsError
self.log_path = log_path
self.job_out_dir = job_out_dir
# Run-dir created on the fly
self.__run_dir = None
if self.log_path is not None:
self.__stdout = Logger(self.log_path, sys.stdout)
self.__stderr = Logger(self.log_path, sys.stderr)
@property
def run_dir(self):
if self.__run_dir is None:
# Make the run dir to receive output
self.__run_dir = self.job_out_dir
os.makedirs(self.__run_dir, exist_ok=True)
return self.__run_dir
def connect_streams(self):
if self.log_path is not None:
sys.stdout = self.__stdout
sys.stderr = self.__stderr
def disconnect_streams(self):
if self.log_path is not None:
sys.stdout = self.__stdout.stream
sys.stderr = self.__stderr.stream
# dnnlib not available; we're not running at nVidia Helsinki; run locally
def job(job_name, enumerate_job_names=True):
"""
Decorator to turn a function into a job submitter.
Usage:
>>> @job('wait_some_time')
... def wait_some_time(submit_config: SubmitConfig, iteration_count):
... # Create a run context (hides low level details, exposes simple API to manage the run)
... with dnnlib.RunContext(submit_config) as ctx:
...
... fn = os.path.join(submit_config.run_dir, "output.txt")
... with open(fn, 'w') as f:
... f.write("Works!")
...
... print('Training...')
... for i in range(iteration_count):
... if ctx.should_stop():
... break
...
... time.sleep(1.0)
... ctx.update(loss='%2f' % i, cur_epoch=i, max_epoch=iteration_count)
To submit a job:
>>> wait_some_time.submit(on='local', job_desc='description_to_identify_specific_job', iteration_count=50)
:param job_name: The name to be given to the job
:param module_name: If necessary, name the module in which the job function resides
:param docker_image: Provide the path to the docker image required for this job
:param num_gpus: The number of GPUs required
:param enumerate_job_names: Enumerated job name prefix
"""
valid_targets = {'local', 'any', 'maxwell', 'pascal', 'volta', 'volta_or_pascal'}
def decorate(job_fn):
def run_job(**kwargs):
specific_job_name = kwargs.pop('job_name', None)
if specific_job_name == '':
specific_job_name = None
if specific_job_name is None:
specific_job_name = job_name
quota_group = kwargs.pop('quota_group', None)
if quota_group is not None and quota_group != '':
raise ValueError('quota_group not supported when dnnlib is not available')
job_desc_arg = kwargs.pop('job_desc', None)
if job_desc_arg is None or job_desc_arg == '':
job_desc_arg = specific_job_name
try:
submit_config = SubmitConfig(specific_job_name, job_desc_arg, enumerate_job_names)
except LogAlreadyExistsError:
print('Job {}:{} already executed; skipping'.format(specific_job_name, job_desc_arg))
else:
print('[NO dnnlib] logging to {}'.format(submit_config.log_path))
submit_config.connect_streams()
try:
job_fn(submit_config, **kwargs)
finally:
submit_config.disconnect_streams()
job_fn.submit = run_job
return job_fn
return decorate
| [
"brittix1023@gmail.com"
] | brittix1023@gmail.com |
634ad5ca3726b53d84a17fb2426236c7c123d9f2 | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/1/bWv.py | c3d10fc535b8dd767ef024308a9e13b87d4d864b | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'bWV':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
32c5685349516c253ce1c6823ff31d31d95fb77e | c9f1cc3a6715917d658a6e525b7c2d35b0380f9f | /Non_canonical_introns/SIM2/random_print.py | 6c29297bbbdef8a3c5f1bced21b3825b26a980b3 | [] | no_license | geparada/my_src | 4f84887130b985e84aad3d0d35e85911087d9b4f | 8d64f7ef51e1f74303ca88beb0ee964f546d8301 | refs/heads/master | 2021-01-17T01:50:50.414690 | 2017-03-14T10:01:50 | 2017-03-14T10:01:50 | 20,638,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 278 | py | import sys
import csv
import random
def ran_printer(bigfile, P):
reader = csv.reader(open(bigfile), dialect='excel-tab' )
for row in reader:
R = random.random()
if R<=P:
print " ".join(row)
if __name__ == '__main__':
ran_printer(sys.argv[1], float(sys.argv[2]) )
| [
"geparada@omics.(none)"
] | geparada@omics.(none) |
cc0b891a842f0d95cbaa4fc90d61413db3782199 | 85c426913d63773c4802a4a3c354df909030654b | /python/FA3/Integration/Copy of Copy of PyFood me and rohita/Copy of Copy of PyFood me and rohita/validations/Validate.py | 02ad4496540e48daae0060d8a176a66e271c35b7 | [] | no_license | SensehacK/playgrounds | 17bf2a3133db6c0cafe185c4cc2c7b59862980aa | 3decd550cdb6034db8b497051acaaec8221073aa | refs/heads/master | 2023-05-11T20:05:31.680168 | 2023-04-30T00:01:58 | 2023-04-30T00:01:58 | 159,632,542 | 1 | 0 | null | 2023-03-05T11:34:34 | 2018-11-29T08:27:53 | Python | UTF-8 | Python | false | false | 5,566 | py | '''
Created on Mar 15, 2017
@author: kautilya.save
'''
from database import ViewDB,searchdb
from exceptions import CustomException2
def validate_search_category(city,area):
list_of_search_categories=searchdb.search_as_a_guest(city,area)
if(len(list_of_search_categories)==0):
raise CustomException2.Invalidcityareaname()
return list_of_search_categories
def validate_search_as_rating(city,area,rating_lower,rating_upper):
list_of_search_categories=searchdb.search_as_rating(city,area,rating_lower,rating_upper)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_likes(city,area):
list_of_search_categories=searchdb.search_as_likes(city,area)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
def validate_search_as_dislikes(city,area):
list_of_search_categories=searchdb.search_as_dislikes(city,area)
if(len(list_of_search_categories)==0):
raise CustomException2.Invalidfilter()
return list_of_search_categories
def validate_search_as_type(city,area,var1):
list_of_search_categories=searchdb.search_as_type(city,area,var1)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_rating_dislikes(city,area,rating_lower,rating_upper):
list_of_search_categories=searchdb.search_as_rating_dislikes(city,area,rating_lower,rating_upper)
if(len(list_of_search_categories)==0):
raise CustomException2.Invalidfilter()
return list_of_search_categories
def validate_search_as_rating_likes(city,area,rating_lower,rating_upper):
list_of_search_categories=searchdb.search_as_rating_likes(city,area,rating_lower,rating_upper)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_rating_type(city,area,rating_lower,rating_upper,var):
list_of_search_categories=searchdb.search_as_rating_type(city,area,rating_lower,rating_upper,var)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_dislike_like(city,area):
list_of_search_categories=searchdb.search_as_dislike_like(city,area)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_dislike_type(city,area,var1):
list_of_search_categories=searchdb.search_as_dislike_type(city,area,var1)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_like_type(city,area,var1):
list_of_search_categories=searchdb.search_as_like_type(city,area,var1)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_rating_dislike_like(city,area,rating_lower,rating_upper):
list_of_search_categories=searchdb.search_as_rating_dislike_like(city,area,rating_lower,rating_upper)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_rating_dislike_type(city,area,rating_lower,rating_upper,var1):
list_of_search_categories=searchdb.search_as_rating_dislike_type(city,area,rating_lower,rating_upper,var1)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_dislike_like_type(city,area,var1):
list_of_search_categories=searchdb.search_as_dislike_like_type(city,area,var1)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_like_type_rating(city,area,rating_lower,rating_upper,var1):
list_of_search_categories=searchdb.search_as_like_type_rating(city,area,rating_lower,rating_upper,var1)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_search_as_all(city,area,rating_lower,rating_upper,var1):
list_of_search_categories=searchdb.search_as_all(city,area,rating_lower,rating_upper,var1)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_hotel_name(city,area,restaurant_name):
list_of_search_categories=searchdb.hotel_name(city,area,restaurant_name)
if(len(list_of_search_categories)==0):
raise CustomException2 .Invalidfilter()
return list_of_search_categories
def validate_view_category(restaurant_type):
list_of_restaurant_categories=ViewDB.get_restaurant_categories(restaurant_type)
if(len(list_of_restaurant_categories)==0):
raise CustomException2 .InvalidCategoryException()
return list_of_restaurant_categories
def validate_view_category_items(category):
list_of_restaurant_categories_items=ViewDB.get_categories_fooditems(category)
if(len(list_of_restaurant_categories_items)==0):
raise CustomException2 .InvalidCatItemsException
return list_of_restaurant_categories_items
| [
"kautilyasave@gmail.com"
] | kautilyasave@gmail.com |
58cff43dd9b00a860369424cd66fd9750167eee5 | 3712a929d1124f514ea7af1ac0d4a1de03bb6773 | /开班笔记/python数据分析机器学习部分/机器学习/day06/tf.py | c6bbc842ae1c6bfc7f9cff4b546a67aee8307fc8 | [] | no_license | jiyabing/learning | abd82aa3fd37310b4a98b11ea802c5b0e37b7ad9 | 6059006b0f86aee9a74cfc116d2284eb44173f41 | refs/heads/master | 2020-04-02T20:47:33.025331 | 2018-10-26T05:46:10 | 2018-10-26T05:46:10 | 154,779,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 579 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import nltk.tokenize as tk
import sklearn.feature_extraction.text as ft
import sklearn.preprocessing as sp
doc = 'The brown dog is running. ' \
'The black dog is in the black room. ' \
'Running in the room is forbidden.'
print(doc)
sentences = tk.sent_tokenize(doc)
for i, sentence in enumerate(sentences):
print(i + 1, sentence)
cv = ft.CountVectorizer()
bow = cv.fit_transform(sentences).toarray()
print(bow)
words = cv.get_feature_names()
print(words)
tf = sp.normalize(bow, norm='l1')
print(tf)
| [
"yabing_ji@163.com"
] | yabing_ji@163.com |
b093c8113f7bbff2923760d0f934c28f35a0c438 | 244e751aa882c6df1abb04db8a4de70a0e804ece | /Lesson 8/01 - Visualization 1.py | b8d7326fd3af2964c0f679a09c10a1aa3ab5f3de | [] | no_license | Mostafa-At-GitHub/Intro-to-Data-Science--Udacity | 0c0656234a26edee84b430745302f330b8857885 | 6519e60eb23df6568d64f47cfe7d8600acb8e933 | refs/heads/master | 2021-09-01T00:40:13.821220 | 2017-12-23T20:07:47 | 2017-12-23T20:07:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,793 | py | import pandas as pd
from ggplot import *
def plot_weather_data(turnstile_weather):
'''
You are passed in a dataframe called turnstile_weather.
Use turnstile_weather along with ggplot to make a data visualization
focused on the MTA and weather data we used in assignment #3.
You should feel free to implement something that we discussed in class
(e.g., scatterplots, line plots, or histograms) or attempt to implement
something more advanced if you'd like.
Here are some suggestions for things to investigate and illustrate:
* Ridership by time of day or day of week
* How ridership varies based on Subway station (UNIT)
* Which stations have more exits or entries at different times of day
(You can use UNIT as a proxy for subway station.)
If you'd like to learn more about ggplot and its capabilities, take
a look at the documentation at:
https://pypi.python.org/pypi/ggplot/
You can check out:
https://s3.amazonaws.com/content.udacity-data.com/courses/ud359/turnstile_data_master_with_weather.csv
To see all the columns and data points included in the turnstile_weather
dataframe. However, due to the limitation of our Amazon EC2 server, we are giving you a random
subset, about 1/3 of the actual data in the turnstile_weather dataframe.
'''
temp_df = turnstile_weather[['rain','ENTRIESn_hourly', 'EXITSn_hourly']]
df = temp_df.groupby("rain", as_index=False).sum()
df["rain"][0] = "No Rain"
df["rain"][1] = "Rain"
print df
plot = ggplot(df, aes(x="rain", weight="ENTRIESn_hourly"))
plot += ggtitle("Density per Station")
plot += geom_bar(stat="identity")
plot += xlab('Station')
plot += ylab("Denisty per day")
return plot
| [
"mohamedanwarvic@gmail.com"
] | mohamedanwarvic@gmail.com |
51bafdf4e50b66a4ee9225e493ae81aad6aa8505 | c70ac4c4f159bf9d0d06870a8975143885b067de | /it_courses/wsgi.py | 7849650c3d17ba2337b969d120fbc654eb8a8652 | [] | no_license | assigdev/it_courses | b494856e43d02d4ba78018635c3b9fefb48c8522 | a2e80fdac0e0e8d43c0b32b7b00cf3ea3c8f7c26 | refs/heads/master | 2020-03-06T18:24:38.075774 | 2018-05-07T21:33:48 | 2018-05-07T21:33:48 | 127,006,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 397 | py | """
WSGI config for it_courses project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "it_courses.settings")
application = get_wsgi_application()
| [
"assigdev@gmail.com"
] | assigdev@gmail.com |
2e829c6947f7da5b59fd8d9fd9451aeeaf9e4329 | 917a27a988bd9617115368f549e26b8a265bf7f9 | /fb1/migrations/0023_auto_20200805_1246.py | 72f2afc5826d7dc5e6d2b996922193d2a812a4d3 | [] | no_license | anand0101/FbPost | 0a33ee710120d4c4b753aa87b04a9166605653fe | c649a44070572b286412369d4975cca82c55cdd8 | refs/heads/master | 2022-11-28T08:01:29.967598 | 2020-08-06T12:21:28 | 2020-08-06T12:21:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | # Generated by Django 3.0.6 on 2020-08-05 07:16
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fb1', '0022_auto_20200804_0616'),
]
operations = [
migrations.CreateModel(
name='Newsdata',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('imagelink', models.CharField(blank=True, max_length=422)),
('heading', models.CharField(blank=True, max_length=442)),
('body', models.TextField(blank=True)),
('date', models.DateTimeField(blank=True)),
],
),
migrations.AlterField(
model_name='imagepost',
name='date',
field=models.DateTimeField(default=datetime.datetime(2020, 8, 5, 12, 46, 30, 706606)),
),
migrations.AlterField(
model_name='postcomment',
name='date',
field=models.DateTimeField(default=datetime.datetime(2020, 8, 5, 12, 46, 30, 712607)),
),
migrations.AlterField(
model_name='videopost',
name='date',
field=models.DateTimeField(default=datetime.datetime(2020, 8, 5, 12, 46, 30, 709606)),
),
]
| [
"ansarihasnain3598@gmail.com"
] | ansarihasnain3598@gmail.com |
e29d72cdd89552c4900a47d83ec088569c38a8d6 | 0f5f6ff75cef3e81419216ba0191bb69a26c9068 | /aws/debugging.py | b4f37180a4ccbd3532904ea44281823bfc7115e0 | [] | no_license | daddyawesome/CodingP | 1c7bbb2edd30333f7cb1b78ec6a0428854c4fa2b | 66ab4e452c23f3f770d6ad1e32f604c65e1dcbd3 | refs/heads/master | 2022-10-13T18:36:23.068195 | 2022-10-04T07:01:58 | 2022-10-04T07:01:58 | 220,047,911 | 0 | 0 | null | 2020-07-07T20:49:07 | 2019-11-06T17:01:44 | Python | UTF-8 | Python | false | false | 430 | py | # Ask the user for a value and confirm the supplied value is greater than 0
def checkvalue(valuetocheck):
assert (type(valuetocheck) is int), "You must enter a number."
assert (valuetocheck > 0), "Value entered must be greater than 0"
if valuetocheck > 4:
print("Value is greater than 4")
else:
print("Value is lesser than 4")
var = int(input("Enter a number greater than 0: "))
checkvalue(var) | [
"sablay296@gmail.com"
] | sablay296@gmail.com |
f2740e448fe9f797da84617de0a145d889873a4b | 197ad5eecd8d5fb46e75dff67bab3be96dd961b0 | /graphene_mongoengine/fields.py | 3865af262e84bcb967d8544f664515fb44cf19c2 | [] | no_license | tomasgarzon/graphene-mongoengine | c18a5b51e411e905a8890bdc542898673d0280a7 | ecc2116739f56d065c07024c3082958f490307f5 | refs/heads/master | 2021-01-20T09:20:10.272424 | 2017-05-04T08:25:44 | 2017-05-04T08:25:44 | 90,237,184 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,272 | py | from functools import partial
from django.db.models.query import QuerySet
from graphene.types import Field, List
from graphene.relay import ConnectionField, PageInfo
from graphql_relay.connection.arrayconnection import connection_from_list_slice
from graphene_django.utils import maybe_queryset
from .utils import get_type_for_document
class MongoEngineListField(Field):
def __init__(self, _type, *args, **kwargs):
super(MongoEngineListField, self).__init__(List(_type), *args, **kwargs)
@property
def model(self):
return self.type.of_type._meta.node._meta.document
@staticmethod
def list_resolver(resolver, root, args, context, info):
return maybe_queryset(resolver(root, args, context, info))
def get_resolver(self, parent_resolver):
return partial(self.list_resolver, parent_resolver)
class MongoEngineConnectionField(ConnectionField):
def __init__(self, *args, **kwargs):
self.on = kwargs.pop('on', False)
super(MongoEngineConnectionField, self).__init__(*args, **kwargs)
@property
def model(self):
return self.type._meta.node._meta.document
def get_manager(self):
if self.on:
return getattr(self.model, self.on)
else:
return None
@staticmethod
def connection_resolver(resolver, connection, default_manager, root, args, context, info):
iterable = resolver(root, args, context, info)
if iterable is None:
iterable = default_manager
iterable = maybe_queryset(iterable)
if isinstance(iterable, QuerySet):
_len = iterable.count()
else:
_len = len(iterable)
connection = connection_from_list_slice(
iterable,
args,
slice_start=0,
list_length=_len,
list_slice_length=_len,
connection_type=connection,
edge_type=connection.Edge,
pageinfo_type=PageInfo,
)
connection.iterable = iterable
connection.length = _len
return connection
def get_resolver(self, parent_resolver):
return partial(self.connection_resolver, parent_resolver, self.type, self.get_manager())
class MongoEngineDocumentField(Field):
def __init__(self, document, *args, **kwargs):
self.document = document
super(MongoEngineDocumentField, self).__init__(*args, **kwargs)
def internal_type(self, schema):
_type = self.get_object_type(schema)
if not _type and self.parent._meta.only_fields:
raise Exception(
"Collection %r is not accessible by the schema. "
"You can either register the type manually "
"using @schema.register. "
"Or disable the field in %s" % (
self.document,
self.parent,
)
)
return schema.T(_type)
def get_object_type(self, schema):
return get_type_for_document(schema, self.document)
@property
def List(self):
return List(self, *self.args, **self.kwargs)
def get_connection_field(*args, **kwargs):
return MongoEngineConnectionField(*args, **kwargs)
| [
"tomasgarzonhervas@gmail.com"
] | tomasgarzonhervas@gmail.com |
fb81536c36d1bcf3197847f734633a588c8236d2 | 84db91ca8e14687251eca5d4ffe1a50fde89e4d9 | /parsbot/chat/migrations/0001_initial.py | 449f59a6375bd31f545fba965f0a53fd9338819d | [
"MIT"
] | permissive | aodarc/project009 | 426fba89f7978dec5605eadf5b72932f8866201d | 3fcd795cd936223442cf09a0a3494fc0fb54ceb3 | refs/heads/master | 2020-03-28T14:33:40.728781 | 2018-09-24T20:12:15 | 2018-09-24T20:12:15 | 148,499,017 | 0 | 1 | MIT | 2018-09-24T15:52:58 | 2018-09-12T15:08:02 | Python | UTF-8 | Python | false | false | 1,583 | py | # Generated by Django 2.1.1 on 2018-09-24 17:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_at', models.DateTimeField(auto_created=True)),
('modified_at', models.DateTimeField(auto_now_add=True)),
('title', models.CharField(max_length=255, verbose_name='Products')),
('url', models.URLField(verbose_name='Product URL')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProductHistory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('create_at', models.DateTimeField(auto_created=True)),
('price', models.DecimalField(decimal_places=2, max_digits=19)),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='history', to='chat.Product')),
],
),
]
| [
"odarchenko@ex.ua"
] | odarchenko@ex.ua |
72a8fb5dde9f4d106f9351a271aa41bde1edf893 | 97062249c6eb04069c6fb01e71d06bc334c828e1 | /apps/sqoop/src/sqoop/api/submission.py | 44009836ab097b7541ac0a0f966c8dd8f1f024fe | [
"Apache-2.0"
] | permissive | Albertsss/hue | 1c8b31c64cc420a029f5b5b80712fb3d0c6cbd6e | 454d320dd09b6f7946f3cc05bc97c3e2ca6cd485 | refs/heads/master | 2021-07-08T17:21:13.237871 | 2018-05-30T06:03:21 | 2018-05-30T06:03:21 | 135,386,450 | 0 | 1 | Apache-2.0 | 2020-07-25T13:36:58 | 2018-05-30T04:06:18 | Python | UTF-8 | Python | false | false | 2,077 | py | #!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import socket
from django.utils.translation import ugettext as _
from sqoop import client, conf
from decorators import get_submission_or_exception
from desktop.lib.django_util import JsonResponse
from desktop.lib.exceptions import StructuredException
from desktop.lib.rest.http_client import RestException
from exception import handle_rest_exception
from utils import list_to_dict
from django.views.decorators.cache import never_cache
__all__ = ['get_submissions', 'submissions']
LOG = logging.getLogger(__name__)
@never_cache
def get_submissions(request):
response = {
'status': 0,
'errors': None,
'submissions': []
}
status = request.GET.get('status', 'submissions').split(',')
try:
c = client.SqoopClient(conf.SERVER_URL.get(), request.user.username, request.LANGUAGE_CODE)
submissions = c.get_submissions()
response['submissions'] = list_to_dict(submissions)
except RestException, e:
response.update(handle_rest_exception(e, _('Could not get submissions.')))
return JsonResponse(response)
@never_cache
def submissions(request):
if request.method == 'GET':
return get_submissions(request)
else:
raise StructuredException(code="INVALID_METHOD", message=_('GET request required.'), error_code=405)
| [
"540227148@qq.com"
] | 540227148@qq.com |
162235d8d5abbef1ca2ce0a7af548072e397fc91 | 4fee81e2fa833911ea2f65ee975cf7cc1724b6a2 | /venv/Lib/site-packages/pyrogram/raw/functions/channels/toggle_signatures.py | eef32ab59dc28ad7b0d53afa60d8b335f7bcd8e5 | [] | no_license | devID767/Farm_Duel_Bot | 453628d2bcaba27dab01338d9368e6727781901e | aaab3c16ec6626f5196f24f741bfeecc63492116 | refs/heads/master | 2023-08-14T22:06:06.705300 | 2021-10-08T13:50:15 | 2021-10-08T13:50:15 | 360,924,958 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,435 | py | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
from io import BytesIO
from pyrogram.raw.core.primitives import Int, Long, Int128, Int256, Bool, Bytes, String, Double, Vector
from pyrogram.raw.core import TLObject
from pyrogram import raw
from typing import List, Union, Any
# # # # # # # # # # # # # # # # # # # # # # # #
# !!! WARNING !!! #
# This is a generated file! #
# All changes made in this file will be lost! #
# # # # # # # # # # # # # # # # # # # # # # # #
class ToggleSignatures(TLObject): # type: ignore
"""Telegram API method.
Details:
- Layer: ``126``
- ID: ``0x1f69b606``
Parameters:
channel: :obj:`InputChannel <pyrogram.raw.base.InputChannel>`
enabled: ``bool``
Returns:
:obj:`Updates <pyrogram.raw.base.Updates>`
"""
__slots__: List[str] = ["channel", "enabled"]
ID = 0x1f69b606
QUALNAME = "functions.channels.ToggleSignatures"
def __init__(self, *, channel: "raw.base.InputChannel", enabled: bool) -> None:
self.channel = channel # InputChannel
self.enabled = enabled # Bool
@staticmethod
def read(data: BytesIO, *args: Any) -> "ToggleSignatures":
# No flags
channel = TLObject.read(data)
enabled = Bool.read(data)
return ToggleSignatures(channel=channel, enabled=enabled)
def write(self) -> bytes:
data = BytesIO()
data.write(Int(self.ID, False))
# No flags
data.write(self.channel.write())
data.write(Bool(self.enabled))
return data.getvalue()
| [
"dsarnavskiy77@gmail.com"
] | dsarnavskiy77@gmail.com |
43e61b6c59561aa8e5347f9f382de8b3bafcb311 | c46becf6497484e4f0a904ad0104a3e971982481 | /upbit.py | 9cd7ccc8e666e7e4d0f154d05560fef830082c31 | [] | no_license | damoa-recommend/time-series-ARIMA | 4cfa8213cda0bb4843c583aad94185f27540372a | cb11f7bc0e98c96d0cc2bf532ac46da8ef586240 | refs/heads/master | 2023-03-26T20:38:18.728032 | 2021-03-28T06:12:58 | 2021-03-28T06:12:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,575 | py | from websocket import WebSocketApp
import json, ssl, asyncio
from datetime import datetime
from model import add_data, fit, forecast
try:
import thread
except ImportError:
import _thread as thread
import time
def on_message(ws, message):
msg = json.loads(message.decode('utf-8'))
KRW_RATE = 1129
price = msg["trade_price"] / KRW_RATE
ts = datetime.fromtimestamp(int(msg["trade_timestamp"]) / 1000)
add_data({
"ts": ts,
"price": price,
"index": int(msg["trade_timestamp"])
})
fit()
forecast_price = forecast()
print('[%s] 실제가격: %10.2f, 예측가격: %10.2f, 예측가격 대비 실제가격: %10.2f'%(ts, price, forecast_price, (forecast_price-price) * KRW_RATE))
def on_error(ws, error):
print(error)
def on_close(ws):
print("close")
def on_open(ws):
def run(*args):
# https://docs.upbit.com/docs/upbit-quotation-websocket 문서참고
# ticker: 현재가, trade: 채결내역, orderbook: 호가
originData = [
{ "ticket": "UNIQUE_TICKET" },
# { "type": "orderbook", "codes": ["KRW-MTL"], "isOnlyRealtime": True },
{ "type": "ticker", "codes": ["KRW-BTC"] },
# { "type": "trade", "codes": ["KRW-MTL"] }
]
ws.send(json.dumps(originData))
thread.start_new_thread(run, ())
if __name__ == "__main__":
fit()
ws = WebSocketApp(
"wss://api.upbit.com/websocket/v1",
on_message = on_message,
on_error = on_error,
on_close = on_close,
)
ws.on_open = on_open
ws.run_forever(sslopt={"cert_reqs": ssl.CERT_NONE}) | [
"pjt3591oo@gmail.com"
] | pjt3591oo@gmail.com |
9868dafa6ae43df16ef95315c834cbd07783c1f1 | 0a1f8957a798006deaa53d10d09f733fab1e6b05 | /bin/Python27/Lib/site-packages/sympy/mpmath/function_docs.py | d366172b8daea5a20531b2c580cee1a18f7e52e8 | [
"LicenseRef-scancode-other-permissive"
] | permissive | metamorph-inc/meta-core | a89504ccb1ed2f97cc6e792ba52e3a6df349efef | bc7a05e04c7901f477fe553c59e478a837116d92 | refs/heads/master | 2023-03-07T02:52:57.262506 | 2023-03-01T18:49:49 | 2023-03-01T18:49:49 | 40,361,476 | 25 | 15 | NOASSERTION | 2023-01-13T16:54:30 | 2015-08-07T13:21:24 | Python | UTF-8 | Python | false | false | 276,840 | py | """
Extended docstrings for functions.py
"""
pi = r"""
`\pi`, roughly equal to 3.141592654, represents the area of the unit
circle, the half-period of trigonometric functions, and many other
things in mathematics.
Mpmath can evaluate `\pi` to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +pi
3.1415926535897932384626433832795028841971693993751
This shows digits 99991-100000 of `\pi`::
>>> mp.dps = 100000
>>> str(pi)[-10:]
'5549362464'
**Possible issues**
:data:`pi` always rounds to the nearest floating-point
number when used. This means that exact mathematical identities
involving `\pi` will generally not be preserved in floating-point
arithmetic. In particular, multiples of :data:`pi` (except for
the trivial case ``0*pi``) are *not* the exact roots of
:func:`~mpmath.sin`, but differ roughly by the current epsilon::
>>> mp.dps = 15
>>> sin(pi)
1.22464679914735e-16
One solution is to use the :func:`~mpmath.sinpi` function instead::
>>> sinpi(1)
0.0
See the documentation of trigonometric functions for additional
details.
"""
degree = r"""
Represents one degree of angle, `1^{\circ} = \pi/180`, or
about 0.01745329. This constant may be evaluated to arbitrary
precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +degree
0.017453292519943295769236907684886127134428718885417
The :data:`degree` object is convenient for conversion
to radians::
>>> sin(30 * degree)
0.5
>>> asin(0.5) / degree
30.0
"""
e = r"""
The transcendental number `e` = 2.718281828... is the base of the
natural logarithm (:func:`~mpmath.ln`) and of the exponential function
(:func:`~mpmath.exp`).
Mpmath can be evaluate `e` to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +e
2.7182818284590452353602874713526624977572470937
This shows digits 99991-100000 of `e`::
>>> mp.dps = 100000
>>> str(e)[-10:]
'2100427165'
**Possible issues**
:data:`e` always rounds to the nearest floating-point number
when used, and mathematical identities involving `e` may not
hold in floating-point arithmetic. For example, ``ln(e)``
might not evaluate exactly to 1.
In particular, don't use ``e**x`` to compute the exponential
function. Use ``exp(x)`` instead; this is both faster and more
accurate.
"""
phi = r"""
Represents the golden ratio `\phi = (1+\sqrt 5)/2`,
approximately equal to 1.6180339887. To high precision,
its value is::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +phi
1.6180339887498948482045868343656381177203091798058
Formulas for the golden ratio include the following::
>>> (1+sqrt(5))/2
1.6180339887498948482045868343656381177203091798058
>>> findroot(lambda x: x**2-x-1, 1)
1.6180339887498948482045868343656381177203091798058
>>> limit(lambda n: fib(n+1)/fib(n), inf)
1.6180339887498948482045868343656381177203091798058
"""
euler = r"""
Euler's constant or the Euler-Mascheroni constant `\gamma`
= 0.57721566... is a number of central importance to
number theory and special functions. It is defined as the limit
.. math ::
\gamma = \lim_{n\to\infty} H_n - \log n
where `H_n = 1 + \frac{1}{2} + \ldots + \frac{1}{n}` is a harmonic
number (see :func:`~mpmath.harmonic`).
Evaluation of `\gamma` is supported at arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +euler
0.57721566490153286060651209008240243104215933593992
We can also compute `\gamma` directly from the definition,
although this is less efficient::
>>> limit(lambda n: harmonic(n)-log(n), inf)
0.57721566490153286060651209008240243104215933593992
This shows digits 9991-10000 of `\gamma`::
>>> mp.dps = 10000
>>> str(euler)[-10:]
'4679858165'
Integrals, series, and representations for `\gamma` in terms of
special functions include the following (there are many others)::
>>> mp.dps = 25
>>> -quad(lambda x: exp(-x)*log(x), [0,inf])
0.5772156649015328606065121
>>> quad(lambda x,y: (x-1)/(1-x*y)/log(x*y), [0,1], [0,1])
0.5772156649015328606065121
>>> nsum(lambda k: 1/k-log(1+1/k), [1,inf])
0.5772156649015328606065121
>>> nsum(lambda k: (-1)**k*zeta(k)/k, [2,inf])
0.5772156649015328606065121
>>> -diff(gamma, 1)
0.5772156649015328606065121
>>> limit(lambda x: 1/x-gamma(x), 0)
0.5772156649015328606065121
>>> limit(lambda x: zeta(x)-1/(x-1), 1)
0.5772156649015328606065121
>>> (log(2*pi*nprod(lambda n:
... exp(-2+2/n)*(1+2/n)**n, [1,inf]))-3)/2
0.5772156649015328606065121
For generalizations of the identities `\gamma = -\Gamma'(1)`
and `\gamma = \lim_{x\to1} \zeta(x)-1/(x-1)`, see
:func:`~mpmath.psi` and :func:`~mpmath.stieltjes` respectively.
"""
catalan = r"""
Catalan's constant `K` = 0.91596559... is given by the infinite
series
.. math ::
K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2}.
Mpmath can evaluate it to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +catalan
0.91596559417721901505460351493238411077414937428167
One can also compute `K` directly from the definition, although
this is significantly less efficient::
>>> nsum(lambda k: (-1)**k/(2*k+1)**2, [0, inf])
0.91596559417721901505460351493238411077414937428167
This shows digits 9991-10000 of `K`::
>>> mp.dps = 10000
>>> str(catalan)[-10:]
'9537871503'
Catalan's constant has numerous integral representations::
>>> mp.dps = 50
>>> quad(lambda x: -log(x)/(1+x**2), [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x: atan(x)/x, [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x: ellipk(x**2)/2, [0, 1])
0.91596559417721901505460351493238411077414937428167
>>> quad(lambda x,y: 1/(1+(x*y)**2), [0, 1], [0, 1])
0.91596559417721901505460351493238411077414937428167
As well as series representations::
>>> pi*log(sqrt(3)+2)/8 + 3*nsum(lambda n:
... (fac(n)/(2*n+1))**2/fac(2*n), [0, inf])/8
0.91596559417721901505460351493238411077414937428167
>>> 1-nsum(lambda n: n*zeta(2*n+1)/16**n, [1,inf])
0.91596559417721901505460351493238411077414937428167
"""
khinchin = r"""
Khinchin's constant `K` = 2.68542... is a number that
appears in the theory of continued fractions. Mpmath can evaluate
it to arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +khinchin
2.6854520010653064453097148354817956938203822939945
An integral representation is::
>>> I = quad(lambda x: log((1-x**2)/sincpi(x))/x/(1+x), [0, 1])
>>> 2*exp(1/log(2)*I)
2.6854520010653064453097148354817956938203822939945
The computation of ``khinchin`` is based on an efficient
implementation of the following series::
>>> f = lambda n: (zeta(2*n)-1)/n*sum((-1)**(k+1)/mpf(k)
... for k in range(1,2*int(n)))
>>> exp(nsum(f, [1,inf])/log(2))
2.6854520010653064453097148354817956938203822939945
"""
glaisher = r"""
Glaisher's constant `A`, also known as the Glaisher-Kinkelin
constant, is a number approximately equal to 1.282427129 that
sometimes appears in formulas related to gamma and zeta functions.
It is also related to the Barnes G-function (see :func:`~mpmath.barnesg`).
The constant is defined as `A = \exp(1/12-\zeta'(-1))` where
`\zeta'(s)` denotes the derivative of the Riemann zeta function
(see :func:`~mpmath.zeta`).
Mpmath can evaluate Glaisher's constant to arbitrary precision:
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +glaisher
1.282427129100622636875342568869791727767688927325
We can verify that the value computed by :data:`glaisher` is
correct using mpmath's facilities for numerical
differentiation and arbitrary evaluation of the zeta function:
>>> exp(mpf(1)/12 - diff(zeta, -1))
1.282427129100622636875342568869791727767688927325
Here is an example of an integral that can be evaluated in
terms of Glaisher's constant:
>>> mp.dps = 15
>>> quad(lambda x: log(gamma(x)), [1, 1.5])
-0.0428537406502909
>>> -0.5 - 7*log(2)/24 + log(pi)/4 + 3*log(glaisher)/2
-0.042853740650291
Mpmath computes Glaisher's constant by applying Euler-Maclaurin
summation to a slowly convergent series. The implementation is
reasonably efficient up to about 10,000 digits. See the source
code for additional details.
References:
http://mathworld.wolfram.com/Glaisher-KinkelinConstant.html
"""
apery = r"""
Represents Apery's constant, which is the irrational number
approximately equal to 1.2020569 given by
.. math ::
\zeta(3) = \sum_{k=1}^\infty\frac{1}{k^3}.
The calculation is based on an efficient hypergeometric
series. To 50 decimal places, the value is given by::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +apery
1.2020569031595942853997381615114499907649862923405
Other ways to evaluate Apery's constant using mpmath
include::
>>> zeta(3)
1.2020569031595942853997381615114499907649862923405
>>> -psi(2,1)/2
1.2020569031595942853997381615114499907649862923405
>>> 8*nsum(lambda k: 1/(2*k+1)**3, [0,inf])/7
1.2020569031595942853997381615114499907649862923405
>>> f = lambda k: 2/k**3/(exp(2*pi*k)-1)
>>> 7*pi**3/180 - nsum(f, [1,inf])
1.2020569031595942853997381615114499907649862923405
This shows digits 9991-10000 of Apery's constant::
>>> mp.dps = 10000
>>> str(apery)[-10:]
'3189504235'
"""
mertens = r"""
Represents the Mertens or Meissel-Mertens constant, which is the
prime number analog of Euler's constant:
.. math ::
B_1 = \lim_{N\to\infty}
\left(\sum_{p_k \le N} \frac{1}{p_k} - \log \log N \right)
Here `p_k` denotes the `k`-th prime number. Other names for this
constant include the Hadamard-de la Vallee-Poussin constant or
the prime reciprocal constant.
The following gives the Mertens constant to 50 digits::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +mertens
0.2614972128476427837554268386086958590515666482612
References:
http://mathworld.wolfram.com/MertensConstant.html
"""
twinprime = r"""
Represents the twin prime constant, which is the factor `C_2`
featuring in the Hardy-Littlewood conjecture for the growth of the
twin prime counting function,
.. math ::
\pi_2(n) \sim 2 C_2 \frac{n}{\log^2 n}.
It is given by the product over primes
.. math ::
C_2 = \prod_{p\ge3} \frac{p(p-2)}{(p-1)^2} \approx 0.66016
Computing `C_2` to 50 digits::
>>> from mpmath import *
>>> mp.dps = 50; mp.pretty = True
>>> +twinprime
0.66016181584686957392781211001455577843262336028473
References:
http://mathworld.wolfram.com/TwinPrimesConstant.html
"""
ln = r"""
Computes the natural logarithm of `x`, `\ln x`.
See :func:`~mpmath.log` for additional documentation."""
sqrt = r"""
``sqrt(x)`` gives the principal square root of `x`, `\sqrt x`.
For positive real numbers, the principal root is simply the
positive square root. For arbitrary complex numbers, the principal
square root is defined to satisfy `\sqrt x = \exp(\log(x)/2)`.
The function thus has a branch cut along the negative half real axis.
For all mpmath numbers ``x``, calling ``sqrt(x)`` is equivalent to
performing ``x**0.5``.
**Examples**
Basic examples and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> sqrt(10)
3.16227766016838
>>> sqrt(100)
10.0
>>> sqrt(-4)
(0.0 + 2.0j)
>>> sqrt(1+1j)
(1.09868411346781 + 0.455089860562227j)
>>> sqrt(inf)
+inf
Square root evaluation is fast at huge precision::
>>> mp.dps = 50000
>>> a = sqrt(3)
>>> str(a)[-10:]
'9329332814'
:func:`mpmath.iv.sqrt` supports interval arguments::
>>> iv.dps = 15; iv.pretty = True
>>> iv.sqrt([16,100])
[4.0, 10.0]
>>> iv.sqrt(2)
[1.4142135623730949234, 1.4142135623730951455]
>>> iv.sqrt(2) ** 2
[1.9999999999999995559, 2.0000000000000004441]
"""
cbrt = r"""
``cbrt(x)`` computes the cube root of `x`, `x^{1/3}`. This
function is faster and more accurate than raising to a floating-point
fraction::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> 125**(mpf(1)/3)
mpf('4.9999999999999991')
>>> cbrt(125)
mpf('5.0')
Every nonzero complex number has three cube roots. This function
returns the cube root defined by `\exp(\log(x)/3)` where the
principal branch of the natural logarithm is used. Note that this
does not give a real cube root for negative real numbers::
>>> mp.pretty = True
>>> cbrt(-1)
(0.5 + 0.866025403784439j)
"""
exp = r"""
Computes the exponential function,
.. math ::
\exp(x) = e^x = \sum_{k=0}^{\infty} \frac{x^k}{k!}.
For complex numbers, the exponential function also satisfies
.. math ::
\exp(x+yi) = e^x (\cos y + i \sin y).
**Basic examples**
Some values of the exponential function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> exp(0)
1.0
>>> exp(1)
2.718281828459045235360287
>>> exp(-1)
0.3678794411714423215955238
>>> exp(inf)
+inf
>>> exp(-inf)
0.0
Arguments can be arbitrarily large::
>>> exp(10000)
8.806818225662921587261496e+4342
>>> exp(-10000)
1.135483865314736098540939e-4343
Evaluation is supported for interval arguments via
:func:`mpmath.iv.exp`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.exp([-inf,0])
[0.0, 1.0]
>>> iv.exp([0,1])
[1.0, 2.71828182845904523536028749558]
The exponential function can be evaluated efficiently to arbitrary
precision::
>>> mp.dps = 10000
>>> exp(pi) #doctest: +ELLIPSIS
23.140692632779269005729...8984304016040616
**Functional properties**
Numerical verification of Euler's identity for the complex
exponential function::
>>> mp.dps = 15
>>> exp(j*pi)+1
(0.0 + 1.22464679914735e-16j)
>>> chop(exp(j*pi)+1)
0.0
This recovers the coefficients (reciprocal factorials) in the
Maclaurin series expansion of exp::
>>> nprint(taylor(exp, 0, 5))
[1.0, 1.0, 0.5, 0.166667, 0.0416667, 0.00833333]
The exponential function is its own derivative and antiderivative::
>>> exp(pi)
23.1406926327793
>>> diff(exp, pi)
23.1406926327793
>>> quad(exp, [-inf, pi])
23.1406926327793
The exponential function can be evaluated using various methods,
including direct summation of the series, limits, and solving
the defining differential equation::
>>> nsum(lambda k: pi**k/fac(k), [0,inf])
23.1406926327793
>>> limit(lambda k: (1+pi/k)**k, inf)
23.1406926327793
>>> odefun(lambda t, x: x, 0, 1)(pi)
23.1406926327793
"""
cosh = r"""
Computes the hyperbolic cosine of `x`,
`\cosh(x) = (e^x + e^{-x})/2`. Values and limits include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> cosh(0)
1.0
>>> cosh(1)
1.543080634815243778477906
>>> cosh(-inf), cosh(+inf)
(+inf, +inf)
The hyperbolic cosine is an even, convex function with
a global minimum at `x = 0`, having a Maclaurin series
that starts::
>>> nprint(chop(taylor(cosh, 0, 5)))
[1.0, 0.0, 0.5, 0.0, 0.0416667, 0.0]
Generalized to complex numbers, the hyperbolic cosine is
equivalent to a cosine with the argument rotated
in the imaginary direction, or `\cosh x = \cos ix`::
>>> cosh(2+3j)
(-3.724545504915322565473971 + 0.5118225699873846088344638j)
>>> cos(3-2j)
(-3.724545504915322565473971 + 0.5118225699873846088344638j)
"""
sinh = r"""
Computes the hyperbolic sine of `x`,
`\sinh(x) = (e^x - e^{-x})/2`. Values and limits include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> sinh(0)
0.0
>>> sinh(1)
1.175201193643801456882382
>>> sinh(-inf), sinh(+inf)
(-inf, +inf)
The hyperbolic sine is an odd function, with a Maclaurin
series that starts::
>>> nprint(chop(taylor(sinh, 0, 5)))
[0.0, 1.0, 0.0, 0.166667, 0.0, 0.00833333]
Generalized to complex numbers, the hyperbolic sine is
essentially a sine with a rotation `i` applied to
the argument; more precisely, `\sinh x = -i \sin ix`::
>>> sinh(2+3j)
(-3.590564589985779952012565 + 0.5309210862485198052670401j)
>>> j*sin(3-2j)
(-3.590564589985779952012565 + 0.5309210862485198052670401j)
"""
tanh = r"""
Computes the hyperbolic tangent of `x`,
`\tanh(x) = \sinh(x)/\cosh(x)`. Values and limits include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> tanh(0)
0.0
>>> tanh(1)
0.7615941559557648881194583
>>> tanh(-inf), tanh(inf)
(-1.0, 1.0)
The hyperbolic tangent is an odd, sigmoidal function, similar
to the inverse tangent and error function. Its Maclaurin
series is::
>>> nprint(chop(taylor(tanh, 0, 5)))
[0.0, 1.0, 0.0, -0.333333, 0.0, 0.133333]
Generalized to complex numbers, the hyperbolic tangent is
essentially a tangent with a rotation `i` applied to
the argument; more precisely, `\tanh x = -i \tan ix`::
>>> tanh(2+3j)
(0.9653858790221331242784803 - 0.009884375038322493720314034j)
>>> j*tan(3-2j)
(0.9653858790221331242784803 - 0.009884375038322493720314034j)
"""
cos = r"""
Computes the cosine of `x`, `\cos(x)`.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> cos(pi/3)
0.5
>>> cos(100000001)
-0.9802850113244713353133243
>>> cos(2+3j)
(-4.189625690968807230132555 - 9.109227893755336597979197j)
>>> cos(inf)
nan
>>> nprint(chop(taylor(cos, 0, 6)))
[1.0, 0.0, -0.5, 0.0, 0.0416667, 0.0, -0.00138889]
Intervals are supported via :func:`mpmath.iv.cos`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.cos([0,1])
[0.540302305868139717400936602301, 1.0]
>>> iv.cos([0,2])
[-0.41614683654714238699756823214, 1.0]
"""
sin = r"""
Computes the sine of `x`, `\sin(x)`.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> sin(pi/3)
0.8660254037844386467637232
>>> sin(100000001)
0.1975887055794968911438743
>>> sin(2+3j)
(9.1544991469114295734673 - 4.168906959966564350754813j)
>>> sin(inf)
nan
>>> nprint(chop(taylor(sin, 0, 6)))
[0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333, 0.0]
Intervals are supported via :func:`mpmath.iv.sin`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.sin([0,1])
[0.0, 0.841470984807896506652502331201]
>>> iv.sin([0,2])
[0.0, 1.0]
"""
tan = r"""
Computes the tangent of `x`, `\tan(x) = \frac{\sin(x)}{\cos(x)}`.
The tangent function is singular at `x = (n+1/2)\pi`, but
``tan(x)`` always returns a finite result since `(n+1/2)\pi`
cannot be represented exactly using floating-point arithmetic.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> tan(pi/3)
1.732050807568877293527446
>>> tan(100000001)
-0.2015625081449864533091058
>>> tan(2+3j)
(-0.003764025641504248292751221 + 1.003238627353609801446359j)
>>> tan(inf)
nan
>>> nprint(chop(taylor(tan, 0, 6)))
[0.0, 1.0, 0.0, 0.333333, 0.0, 0.133333, 0.0]
Intervals are supported via :func:`mpmath.iv.tan`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.tan([0,1])
[0.0, 1.55740772465490223050697482944]
>>> iv.tan([0,2]) # Interval includes a singularity
[-inf, +inf]
"""
sec = r"""
Computes the secant of `x`, `\mathrm{sec}(x) = \frac{1}{\cos(x)}`.
The secant function is singular at `x = (n+1/2)\pi`, but
``sec(x)`` always returns a finite result since `(n+1/2)\pi`
cannot be represented exactly using floating-point arithmetic.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> sec(pi/3)
2.0
>>> sec(10000001)
-1.184723164360392819100265
>>> sec(2+3j)
(-0.04167496441114427004834991 + 0.0906111371962375965296612j)
>>> sec(inf)
nan
>>> nprint(chop(taylor(sec, 0, 6)))
[1.0, 0.0, 0.5, 0.0, 0.208333, 0.0, 0.0847222]
Intervals are supported via :func:`mpmath.iv.sec`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.sec([0,1])
[1.0, 1.85081571768092561791175326276]
>>> iv.sec([0,2]) # Interval includes a singularity
[-inf, +inf]
"""
csc = r"""
Computes the cosecant of `x`, `\mathrm{csc}(x) = \frac{1}{\sin(x)}`.
This cosecant function is singular at `x = n \pi`, but with the
exception of the point `x = 0`, ``csc(x)`` returns a finite result
since `n \pi` cannot be represented exactly using floating-point
arithmetic.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> csc(pi/3)
1.154700538379251529018298
>>> csc(10000001)
-1.864910497503629858938891
>>> csc(2+3j)
(0.09047320975320743980579048 + 0.04120098628857412646300981j)
>>> csc(inf)
nan
Intervals are supported via :func:`mpmath.iv.csc`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.csc([0,1]) # Interval includes a singularity
[1.18839510577812121626159943988, +inf]
>>> iv.csc([0,2])
[1.0, +inf]
"""
cot = r"""
Computes the cotangent of `x`,
`\mathrm{cot}(x) = \frac{1}{\tan(x)} = \frac{\cos(x)}{\sin(x)}`.
This cotangent function is singular at `x = n \pi`, but with the
exception of the point `x = 0`, ``cot(x)`` returns a finite result
since `n \pi` cannot be represented exactly using floating-point
arithmetic.
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> cot(pi/3)
0.5773502691896257645091488
>>> cot(10000001)
1.574131876209625656003562
>>> cot(2+3j)
(-0.003739710376336956660117409 - 0.9967577965693583104609688j)
>>> cot(inf)
nan
Intervals are supported via :func:`mpmath.iv.cot`::
>>> iv.dps = 25; iv.pretty = True
>>> iv.cot([0,1]) # Interval includes a singularity
[0.642092615934330703006419974862, +inf]
>>> iv.cot([1,2])
[-inf, +inf]
"""
acos = r"""
Computes the inverse cosine or arccosine of `x`, `\cos^{-1}(x)`.
Since `-1 \le \cos(x) \le 1` for real `x`, the inverse
cosine is real-valued only for `-1 \le x \le 1`. On this interval,
:func:`~mpmath.acos` is defined to be a monotonically decreasing
function assuming values between `+\pi` and `0`.
Basic values are::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> acos(-1)
3.141592653589793238462643
>>> acos(0)
1.570796326794896619231322
>>> acos(1)
0.0
>>> nprint(chop(taylor(acos, 0, 6)))
[1.5708, -1.0, 0.0, -0.166667, 0.0, -0.075, 0.0]
:func:`~mpmath.acos` is defined so as to be a proper inverse function of
`\cos(\theta)` for `0 \le \theta < \pi`.
We have `\cos(\cos^{-1}(x)) = x` for all `x`, but
`\cos^{-1}(\cos(x)) = x` only for `0 \le \Re[x] < \pi`::
>>> for x in [1, 10, -1, 2+3j, 10+3j]:
... print("%s %s" % (cos(acos(x)), acos(cos(x))))
...
1.0 1.0
(10.0 + 0.0j) 2.566370614359172953850574
-1.0 1.0
(2.0 + 3.0j) (2.0 + 3.0j)
(10.0 + 3.0j) (2.566370614359172953850574 - 3.0j)
The inverse cosine has two branch points: `x = \pm 1`. :func:`~mpmath.acos`
places the branch cuts along the line segments `(-\infty, -1)` and
`(+1, +\infty)`. In general,
.. math ::
\cos^{-1}(x) = \frac{\pi}{2} + i \log\left(ix + \sqrt{1-x^2} \right)
where the principal-branch log and square root are implied.
"""
asin = r"""
Computes the inverse sine or arcsine of `x`, `\sin^{-1}(x)`.
Since `-1 \le \sin(x) \le 1` for real `x`, the inverse
sine is real-valued only for `-1 \le x \le 1`.
On this interval, it is defined to be a monotonically increasing
function assuming values between `-\pi/2` and `\pi/2`.
Basic values are::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> asin(-1)
-1.570796326794896619231322
>>> asin(0)
0.0
>>> asin(1)
1.570796326794896619231322
>>> nprint(chop(taylor(asin, 0, 6)))
[0.0, 1.0, 0.0, 0.166667, 0.0, 0.075, 0.0]
:func:`~mpmath.asin` is defined so as to be a proper inverse function of
`\sin(\theta)` for `-\pi/2 < \theta < \pi/2`.
We have `\sin(\sin^{-1}(x)) = x` for all `x`, but
`\sin^{-1}(\sin(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`::
>>> for x in [1, 10, -1, 1+3j, -2+3j]:
... print("%s %s" % (chop(sin(asin(x))), asin(sin(x))))
...
1.0 1.0
10.0 -0.5752220392306202846120698
-1.0 -1.0
(1.0 + 3.0j) (1.0 + 3.0j)
(-2.0 + 3.0j) (-1.141592653589793238462643 - 3.0j)
The inverse sine has two branch points: `x = \pm 1`. :func:`~mpmath.asin`
places the branch cuts along the line segments `(-\infty, -1)` and
`(+1, +\infty)`. In general,
.. math ::
\sin^{-1}(x) = -i \log\left(ix + \sqrt{1-x^2} \right)
where the principal-branch log and square root are implied.
"""
atan = r"""
Computes the inverse tangent or arctangent of `x`, `\tan^{-1}(x)`.
This is a real-valued function for all real `x`, with range
`(-\pi/2, \pi/2)`.
Basic values are::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> atan(-inf)
-1.570796326794896619231322
>>> atan(-1)
-0.7853981633974483096156609
>>> atan(0)
0.0
>>> atan(1)
0.7853981633974483096156609
>>> atan(inf)
1.570796326794896619231322
>>> nprint(chop(taylor(atan, 0, 6)))
[0.0, 1.0, 0.0, -0.333333, 0.0, 0.2, 0.0]
The inverse tangent is often used to compute angles. However,
the atan2 function is often better for this as it preserves sign
(see :func:`~mpmath.atan2`).
:func:`~mpmath.atan` is defined so as to be a proper inverse function of
`\tan(\theta)` for `-\pi/2 < \theta < \pi/2`.
We have `\tan(\tan^{-1}(x)) = x` for all `x`, but
`\tan^{-1}(\tan(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`::
>>> mp.dps = 25
>>> for x in [1, 10, -1, 1+3j, -2+3j]:
... print("%s %s" % (tan(atan(x)), atan(tan(x))))
...
1.0 1.0
10.0 0.5752220392306202846120698
-1.0 -1.0
(1.0 + 3.0j) (1.000000000000000000000001 + 3.0j)
(-2.0 + 3.0j) (1.141592653589793238462644 + 3.0j)
The inverse tangent has two branch points: `x = \pm i`. :func:`~mpmath.atan`
places the branch cuts along the line segments `(-i \infty, -i)` and
`(+i, +i \infty)`. In general,
.. math ::
\tan^{-1}(x) = \frac{i}{2}\left(\log(1-ix)-\log(1+ix)\right)
where the principal-branch log is implied.
"""
acot = r"""Computes the inverse cotangent of `x`,
`\mathrm{cot}^{-1}(x) = \tan^{-1}(1/x)`."""
asec = r"""Computes the inverse secant of `x`,
`\mathrm{sec}^{-1}(x) = \cos^{-1}(1/x)`."""
acsc = r"""Computes the inverse cosecant of `x`,
`\mathrm{csc}^{-1}(x) = \sin^{-1}(1/x)`."""
coth = r"""Computes the hyperbolic cotangent of `x`,
`\mathrm{coth}(x) = \frac{\cosh(x)}{\sinh(x)}`.
"""
sech = r"""Computes the hyperbolic secant of `x`,
`\mathrm{sech}(x) = \frac{1}{\cosh(x)}`.
"""
csch = r"""Computes the hyperbolic cosecant of `x`,
`\mathrm{csch}(x) = \frac{1}{\sinh(x)}`.
"""
acosh = r"""Computes the inverse hyperbolic cosine of `x`,
`\mathrm{cosh}^{-1}(x) = \log(x+\sqrt{x+1}\sqrt{x-1})`.
"""
asinh = r"""Computes the inverse hyperbolic sine of `x`,
`\mathrm{sinh}^{-1}(x) = \log(x+\sqrt{1+x^2})`.
"""
atanh = r"""Computes the inverse hyperbolic tangent of `x`,
`\mathrm{tanh}^{-1}(x) = \frac{1}{2}\left(\log(1+x)-\log(1-x)\right)`.
"""
acoth = r"""Computes the inverse hyperbolic cotangent of `x`,
`\mathrm{coth}^{-1}(x) = \tanh^{-1}(1/x)`."""
asech = r"""Computes the inverse hyperbolic secant of `x`,
`\mathrm{sech}^{-1}(x) = \cosh^{-1}(1/x)`."""
acsch = r"""Computes the inverse hyperbolic cosecant of `x`,
`\mathrm{csch}^{-1}(x) = \sinh^{-1}(1/x)`."""
sinpi = r"""
Computes `\sin(\pi x)`, more accurately than the expression
``sin(pi*x)``::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> sinpi(10**10), sin(pi*(10**10))
(0.0, -2.23936276195592e-6)
>>> sinpi(10**10+0.5), sin(pi*(10**10+0.5))
(1.0, 0.999999999998721)
"""
cospi = r"""
Computes `\cos(\pi x)`, more accurately than the expression
``cos(pi*x)``::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> cospi(10**10), cos(pi*(10**10))
(1.0, 0.999999999997493)
>>> cospi(10**10+0.5), cos(pi*(10**10+0.5))
(0.0, 1.59960492420134e-6)
"""
sinc = r"""
``sinc(x)`` computes the unnormalized sinc function, defined as
.. math ::
\mathrm{sinc}(x) = \begin{cases}
\sin(x)/x, & \mbox{if } x \ne 0 \\
1, & \mbox{if } x = 0.
\end{cases}
See :func:`~mpmath.sincpi` for the normalized sinc function.
Simple values and limits include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> sinc(0)
1.0
>>> sinc(1)
0.841470984807897
>>> sinc(inf)
0.0
The integral of the sinc function is the sine integral Si::
>>> quad(sinc, [0, 1])
0.946083070367183
>>> si(1)
0.946083070367183
"""
sincpi = r"""
``sincpi(x)`` computes the normalized sinc function, defined as
.. math ::
\mathrm{sinc}_{\pi}(x) = \begin{cases}
\sin(\pi x)/(\pi x), & \mbox{if } x \ne 0 \\
1, & \mbox{if } x = 0.
\end{cases}
Equivalently, we have
`\mathrm{sinc}_{\pi}(x) = \mathrm{sinc}(\pi x)`.
The normalization entails that the function integrates
to unity over the entire real line::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> quadosc(sincpi, [-inf, inf], period=2.0)
1.0
Like, :func:`~mpmath.sinpi`, :func:`~mpmath.sincpi` is evaluated accurately
at its roots::
>>> sincpi(10)
0.0
"""
expj = r"""
Convenience function for computing `e^{ix}`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> expj(0)
(1.0 + 0.0j)
>>> expj(-1)
(0.5403023058681397174009366 - 0.8414709848078965066525023j)
>>> expj(j)
(0.3678794411714423215955238 + 0.0j)
>>> expj(1+j)
(0.1987661103464129406288032 + 0.3095598756531121984439128j)
"""
expjpi = r"""
Convenience function for computing `e^{i \pi x}`.
Evaluation is accurate near zeros (see also :func:`~mpmath.cospi`,
:func:`~mpmath.sinpi`)::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> expjpi(0)
(1.0 + 0.0j)
>>> expjpi(1)
(-1.0 + 0.0j)
>>> expjpi(0.5)
(0.0 + 1.0j)
>>> expjpi(-1)
(-1.0 + 0.0j)
>>> expjpi(j)
(0.04321391826377224977441774 + 0.0j)
>>> expjpi(1+j)
(-0.04321391826377224977441774 + 0.0j)
"""
floor = r"""
Computes the floor of `x`, `\lfloor x \rfloor`, defined as
the largest integer less than or equal to `x`::
>>> from mpmath import *
>>> mp.pretty = False
>>> floor(3.5)
mpf('3.0')
.. note ::
:func:`~mpmath.floor`, :func:`~mpmath.ceil` and :func:`~mpmath.nint` return a
floating-point number, not a Python ``int``. If `\lfloor x \rfloor` is
too large to be represented exactly at the present working precision,
the result will be rounded, not necessarily in the direction
implied by the mathematical definition of the function.
To avoid rounding, use *prec=0*::
>>> mp.dps = 15
>>> print(int(floor(10**30+1)))
1000000000000000019884624838656
>>> print(int(floor(10**30+1, prec=0)))
1000000000000000000000000000001
The floor function is defined for complex numbers and
acts on the real and imaginary parts separately::
>>> floor(3.25+4.75j)
mpc(real='3.0', imag='4.0')
"""
ceil = r"""
Computes the ceiling of `x`, `\lceil x \rceil`, defined as
the smallest integer greater than or equal to `x`::
>>> from mpmath import *
>>> mp.pretty = False
>>> ceil(3.5)
mpf('4.0')
The ceiling function is defined for complex numbers and
acts on the real and imaginary parts separately::
>>> ceil(3.25+4.75j)
mpc(real='4.0', imag='5.0')
See notes about rounding for :func:`~mpmath.floor`.
"""
nint = r"""
Evaluates the nearest integer function, `\mathrm{nint}(x)`.
This gives the nearest integer to `x`; on a tie, it
gives the nearest even integer::
>>> from mpmath import *
>>> mp.pretty = False
>>> nint(3.2)
mpf('3.0')
>>> nint(3.8)
mpf('4.0')
>>> nint(3.5)
mpf('4.0')
>>> nint(4.5)
mpf('4.0')
The nearest integer function is defined for complex numbers and
acts on the real and imaginary parts separately::
>>> nint(3.25+4.75j)
mpc(real='3.0', imag='5.0')
See notes about rounding for :func:`~mpmath.floor`.
"""
frac = r"""
Gives the fractional part of `x`, defined as
`\mathrm{frac}(x) = x - \lfloor x \rfloor` (see :func:`~mpmath.floor`).
In effect, this computes `x` modulo 1, or `x+n` where
`n \in \mathbb{Z}` is such that `x+n \in [0,1)`::
>>> from mpmath import *
>>> mp.pretty = False
>>> frac(1.25)
mpf('0.25')
>>> frac(3)
mpf('0.0')
>>> frac(-1.25)
mpf('0.75')
For a complex number, the fractional part function applies to
the real and imaginary parts separately::
>>> frac(2.25+3.75j)
mpc(real='0.25', imag='0.75')
Plotted, the fractional part function gives a sawtooth
wave. The Fourier series coefficients have a simple
form::
>>> mp.dps = 15
>>> nprint(fourier(lambda x: frac(x)-0.5, [0,1], 4))
([0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -0.31831, -0.159155, -0.106103, -0.0795775])
>>> nprint([-1/(pi*k) for k in range(1,5)])
[-0.31831, -0.159155, -0.106103, -0.0795775]
.. note::
The fractional part is sometimes defined as a symmetric
function, i.e. returning `-\mathrm{frac}(-x)` if `x < 0`.
This convention is used, for instance, by Mathematica's
``FractionalPart``.
"""
sign = r"""
Returns the sign of `x`, defined as `\mathrm{sign}(x) = x / |x|`
(with the special case `\mathrm{sign}(0) = 0`)::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> sign(10)
mpf('1.0')
>>> sign(-10)
mpf('-1.0')
>>> sign(0)
mpf('0.0')
Note that the sign function is also defined for complex numbers,
for which it gives the projection onto the unit circle::
>>> mp.dps = 15; mp.pretty = True
>>> sign(1+j)
(0.707106781186547 + 0.707106781186547j)
"""
arg = r"""
Computes the complex argument (phase) of `x`, defined as the
signed angle between the positive real axis and `x` in the
complex plane::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> arg(3)
0.0
>>> arg(3+3j)
0.785398163397448
>>> arg(3j)
1.5707963267949
>>> arg(-3)
3.14159265358979
>>> arg(-3j)
-1.5707963267949
The angle is defined to satisfy `-\pi < \arg(x) \le \pi` and
with the sign convention that a nonnegative imaginary part
results in a nonnegative argument.
The value returned by :func:`~mpmath.arg` is an ``mpf`` instance.
"""
fabs = r"""
Returns the absolute value of `x`, `|x|`. Unlike :func:`abs`,
:func:`~mpmath.fabs` converts non-mpmath numbers (such as ``int``)
into mpmath numbers::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> fabs(3)
mpf('3.0')
>>> fabs(-3)
mpf('3.0')
>>> fabs(3+4j)
mpf('5.0')
"""
re = r"""
Returns the real part of `x`, `\Re(x)`. Unlike ``x.real``,
:func:`~mpmath.re` converts `x` to a mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> re(3)
mpf('3.0')
>>> re(-1+4j)
mpf('-1.0')
"""
im = r"""
Returns the imaginary part of `x`, `\Im(x)`. Unlike ``x.imag``,
:func:`~mpmath.im` converts `x` to a mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> im(3)
mpf('0.0')
>>> im(-1+4j)
mpf('4.0')
"""
conj = r"""
Returns the complex conjugate of `x`, `\overline{x}`. Unlike
``x.conjugate()``, :func:`~mpmath.im` converts `x` to a mpmath number::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> conj(3)
mpf('3.0')
>>> conj(-1+4j)
mpc(real='-1.0', imag='-4.0')
"""
polar = r"""
Returns the polar representation of the complex number `z`
as a pair `(r, \phi)` such that `z = r e^{i \phi}`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> polar(-2)
(2.0, 3.14159265358979)
>>> polar(3-4j)
(5.0, -0.927295218001612)
"""
rect = r"""
Returns the complex number represented by polar
coordinates `(r, \phi)`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> chop(rect(2, pi))
-2.0
>>> rect(sqrt(2), -pi/4)
(1.0 - 1.0j)
"""
expm1 = r"""
Computes `e^x - 1`, accurately for small `x`.
Unlike the expression ``exp(x) - 1``, ``expm1(x)`` does not suffer from
potentially catastrophic cancellation::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> exp(1e-10)-1; print(expm1(1e-10))
1.00000008274037e-10
1.00000000005e-10
>>> exp(1e-20)-1; print(expm1(1e-20))
0.0
1.0e-20
>>> 1/(exp(1e-20)-1)
Traceback (most recent call last):
...
ZeroDivisionError
>>> 1/expm1(1e-20)
1.0e+20
Evaluation works for extremely tiny values::
>>> expm1(0)
0.0
>>> expm1('1e-10000000')
1.0e-10000000
"""
powm1 = r"""
Computes `x^y - 1`, accurately when `x^y` is very close to 1.
This avoids potentially catastrophic cancellation::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> power(0.99999995, 1e-10) - 1
0.0
>>> powm1(0.99999995, 1e-10)
-5.00000012791934e-18
Powers exactly equal to 1, and only those powers, yield 0 exactly::
>>> powm1(-j, 4)
(0.0 + 0.0j)
>>> powm1(3, 0)
0.0
>>> powm1(fadd(-1, 1e-100, exact=True), 4)
-4.0e-100
Evaluation works for extremely tiny `y`::
>>> powm1(2, '1e-100000')
6.93147180559945e-100001
>>> powm1(j, '1e-1000')
(-1.23370055013617e-2000 + 1.5707963267949e-1000j)
"""
root = r"""
``root(z, n, k=0)`` computes an `n`-th root of `z`, i.e. returns a number
`r` that (up to possible approximation error) satisfies `r^n = z`.
(``nthroot`` is available as an alias for ``root``.)
Every complex number `z \ne 0` has `n` distinct `n`-th roots, which are
equidistant points on a circle with radius `|z|^{1/n}`, centered around the
origin. A specific root may be selected using the optional index
`k`. The roots are indexed counterclockwise, starting with `k = 0` for the root
closest to the positive real half-axis.
The `k = 0` root is the so-called principal `n`-th root, often denoted by
`\sqrt[n]{z}` or `z^{1/n}`, and also given by `\exp(\log(z) / n)`. If `z` is
a positive real number, the principal root is just the unique positive
`n`-th root of `z`. Under some circumstances, non-principal real roots exist:
for positive real `z`, `n` even, there is a negative root given by `k = n/2`;
for negative real `z`, `n` odd, there is a negative root given by `k = (n-1)/2`.
To obtain all roots with a simple expression, use
``[root(z,n,k) for k in range(n)]``.
An important special case, ``root(1, n, k)`` returns the `k`-th `n`-th root of
unity, `\zeta_k = e^{2 \pi i k / n}`. Alternatively, :func:`~mpmath.unitroots`
provides a slightly more convenient way to obtain the roots of unity,
including the option to compute only the primitive roots of unity.
Both `k` and `n` should be integers; `k` outside of ``range(n)`` will be
reduced modulo `n`. If `n` is negative, `x^{-1/n} = 1/x^{1/n}` (or
the equivalent reciprocal for a non-principal root with `k \ne 0`) is computed.
:func:`~mpmath.root` is implemented to use Newton's method for small
`n`. At high precision, this makes `x^{1/n}` not much more
expensive than the regular exponentiation, `x^n`. For very large
`n`, :func:`~mpmath.nthroot` falls back to use the exponential function.
**Examples**
:func:`~mpmath.nthroot`/:func:`~mpmath.root` is faster and more accurate than raising to a
floating-point fraction::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = False
>>> 16807 ** (mpf(1)/5)
mpf('7.0000000000000009')
>>> root(16807, 5)
mpf('7.0')
>>> nthroot(16807, 5) # Alias
mpf('7.0')
A high-precision root::
>>> mp.dps = 50; mp.pretty = True
>>> nthroot(10, 5)
1.584893192461113485202101373391507013269442133825
>>> nthroot(10, 5) ** 5
10.0
Computing principal and non-principal square and cube roots::
>>> mp.dps = 15
>>> root(10, 2)
3.16227766016838
>>> root(10, 2, 1)
-3.16227766016838
>>> root(-10, 3)
(1.07721734501594 + 1.86579517236206j)
>>> root(-10, 3, 1)
-2.15443469003188
>>> root(-10, 3, 2)
(1.07721734501594 - 1.86579517236206j)
All the 7th roots of a complex number::
>>> for r in [root(3+4j, 7, k) for k in range(7)]:
... print("%s %s" % (r, r**7))
...
(1.24747270589553 + 0.166227124177353j) (3.0 + 4.0j)
(0.647824911301003 + 1.07895435170559j) (3.0 + 4.0j)
(-0.439648254723098 + 1.17920694574172j) (3.0 + 4.0j)
(-1.19605731775069 + 0.391492658196305j) (3.0 + 4.0j)
(-1.05181082538903 - 0.691023585965793j) (3.0 + 4.0j)
(-0.115529328478668 - 1.25318497558335j) (3.0 + 4.0j)
(0.907748109144957 - 0.871672518271819j) (3.0 + 4.0j)
Cube roots of unity::
>>> for k in range(3): print(root(1, 3, k))
...
1.0
(-0.5 + 0.866025403784439j)
(-0.5 - 0.866025403784439j)
Some exact high order roots::
>>> root(75**210, 105)
5625.0
>>> root(1, 128, 96)
(0.0 - 1.0j)
>>> root(4**128, 128, 96)
(0.0 - 4.0j)
"""
unitroots = r"""
``unitroots(n)`` returns `\zeta_0, \zeta_1, \ldots, \zeta_{n-1}`,
all the distinct `n`-th roots of unity, as a list. If the option
*primitive=True* is passed, only the primitive roots are returned.
Every `n`-th root of unity satisfies `(\zeta_k)^n = 1`. There are `n` distinct
roots for each `n` (`\zeta_k` and `\zeta_j` are the same when
`k = j \pmod n`), which form a regular polygon with vertices on the unit
circle. They are ordered counterclockwise with increasing `k`, starting
with `\zeta_0 = 1`.
**Examples**
The roots of unity up to `n = 4`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint(unitroots(1))
[1.0]
>>> nprint(unitroots(2))
[1.0, -1.0]
>>> nprint(unitroots(3))
[1.0, (-0.5 + 0.866025j), (-0.5 - 0.866025j)]
>>> nprint(unitroots(4))
[1.0, (0.0 + 1.0j), -1.0, (0.0 - 1.0j)]
Roots of unity form a geometric series that sums to 0::
>>> mp.dps = 50
>>> chop(fsum(unitroots(25)))
0.0
Primitive roots up to `n = 4`::
>>> mp.dps = 15
>>> nprint(unitroots(1, primitive=True))
[1.0]
>>> nprint(unitroots(2, primitive=True))
[-1.0]
>>> nprint(unitroots(3, primitive=True))
[(-0.5 + 0.866025j), (-0.5 - 0.866025j)]
>>> nprint(unitroots(4, primitive=True))
[(0.0 + 1.0j), (0.0 - 1.0j)]
There are only four primitive 12th roots::
>>> nprint(unitroots(12, primitive=True))
[(0.866025 + 0.5j), (-0.866025 + 0.5j), (-0.866025 - 0.5j), (0.866025 - 0.5j)]
The `n`-th roots of unity form a group, the cyclic group of order `n`.
Any primitive root `r` is a generator for this group, meaning that
`r^0, r^1, \ldots, r^{n-1}` gives the whole set of unit roots (in
some permuted order)::
>>> for r in unitroots(6): print(r)
...
1.0
(0.5 + 0.866025403784439j)
(-0.5 + 0.866025403784439j)
-1.0
(-0.5 - 0.866025403784439j)
(0.5 - 0.866025403784439j)
>>> r = unitroots(6, primitive=True)[1]
>>> for k in range(6): print(chop(r**k))
...
1.0
(0.5 - 0.866025403784439j)
(-0.5 - 0.866025403784439j)
-1.0
(-0.5 + 0.866025403784438j)
(0.5 + 0.866025403784438j)
The number of primitive roots equals the Euler totient function `\phi(n)`::
>>> [len(unitroots(n, primitive=True)) for n in range(1,20)]
[1, 1, 2, 2, 4, 2, 6, 4, 6, 4, 10, 4, 12, 6, 8, 8, 16, 6, 18]
"""
log = r"""
Computes the base-`b` logarithm of `x`, `\log_b(x)`. If `b` is
unspecified, :func:`~mpmath.log` computes the natural (base `e`) logarithm
and is equivalent to :func:`~mpmath.ln`. In general, the base `b` logarithm
is defined in terms of the natural logarithm as
`\log_b(x) = \ln(x)/\ln(b)`.
By convention, we take `\log(0) = -\infty`.
The natural logarithm is real if `x > 0` and complex if `x < 0` or if
`x` is complex. The principal branch of the complex logarithm is
used, meaning that `\Im(\ln(x)) = -\pi < \arg(x) \le \pi`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> log(1)
0.0
>>> log(2)
0.693147180559945
>>> log(1000,10)
3.0
>>> log(4, 16)
0.5
>>> log(j)
(0.0 + 1.5707963267949j)
>>> log(-1)
(0.0 + 3.14159265358979j)
>>> log(0)
-inf
>>> log(inf)
+inf
The natural logarithm is the antiderivative of `1/x`::
>>> quad(lambda x: 1/x, [1, 5])
1.6094379124341
>>> log(5)
1.6094379124341
>>> diff(log, 10)
0.1
The Taylor series expansion of the natural logarithm around
`x = 1` has coefficients `(-1)^{n+1}/n`::
>>> nprint(taylor(log, 1, 7))
[0.0, 1.0, -0.5, 0.333333, -0.25, 0.2, -0.166667, 0.142857]
:func:`~mpmath.log` supports arbitrary precision evaluation::
>>> mp.dps = 50
>>> log(pi)
1.1447298858494001741434273513530587116472948129153
>>> log(pi, pi**3)
0.33333333333333333333333333333333333333333333333333
>>> mp.dps = 25
>>> log(3+4j)
(1.609437912434100374600759 + 0.9272952180016122324285125j)
"""
log10 = r"""
Computes the base-10 logarithm of `x`, `\log_{10}(x)`. ``log10(x)``
is equivalent to ``log(x, 10)``.
"""
fmod = r"""
Converts `x` and `y` to mpmath numbers and returns `x \mod y`.
For mpmath numbers, this is equivalent to ``x % y``.
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> fmod(100, pi)
2.61062773871641
You can use :func:`~mpmath.fmod` to compute fractional parts of numbers::
>>> fmod(10.25, 1)
0.25
"""
radians = r"""
Converts the degree angle `x` to radians::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> radians(60)
1.0471975511966
"""
degrees = r"""
Converts the radian angle `x` to a degree angle::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> degrees(pi/3)
60.0
"""
atan2 = r"""
Computes the two-argument arctangent, `\mathrm{atan2}(y, x)`,
giving the signed angle between the positive `x`-axis and the
point `(x, y)` in the 2D plane. This function is defined for
real `x` and `y` only.
The two-argument arctangent essentially computes
`\mathrm{atan}(y/x)`, but accounts for the signs of both
`x` and `y` to give the angle for the correct quadrant. The
following examples illustrate the difference::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> atan2(1,1), atan(1/1.)
(0.785398163397448, 0.785398163397448)
>>> atan2(1,-1), atan(1/-1.)
(2.35619449019234, -0.785398163397448)
>>> atan2(-1,1), atan(-1/1.)
(-0.785398163397448, -0.785398163397448)
>>> atan2(-1,-1), atan(-1/-1.)
(-2.35619449019234, 0.785398163397448)
The angle convention is the same as that used for the complex
argument; see :func:`~mpmath.arg`.
"""
fibonacci = r"""
``fibonacci(n)`` computes the `n`-th Fibonacci number, `F(n)`. The
Fibonacci numbers are defined by the recurrence `F(n) = F(n-1) + F(n-2)`
with the initial values `F(0) = 0`, `F(1) = 1`. :func:`~mpmath.fibonacci`
extends this definition to arbitrary real and complex arguments
using the formula
.. math ::
F(z) = \frac{\phi^z - \cos(\pi z) \phi^{-z}}{\sqrt 5}
where `\phi` is the golden ratio. :func:`~mpmath.fibonacci` also uses this
continuous formula to compute `F(n)` for extremely large `n`, where
calculating the exact integer would be wasteful.
For convenience, :func:`~mpmath.fib` is available as an alias for
:func:`~mpmath.fibonacci`.
**Basic examples**
Some small Fibonacci numbers are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for i in range(10):
... print(fibonacci(i))
...
0.0
1.0
1.0
2.0
3.0
5.0
8.0
13.0
21.0
34.0
>>> fibonacci(50)
12586269025.0
The recurrence for `F(n)` extends backwards to negative `n`::
>>> for i in range(10):
... print(fibonacci(-i))
...
0.0
1.0
-1.0
2.0
-3.0
5.0
-8.0
13.0
-21.0
34.0
Large Fibonacci numbers will be computed approximately unless
the precision is set high enough::
>>> fib(200)
2.8057117299251e+41
>>> mp.dps = 45
>>> fib(200)
280571172992510140037611932413038677189525.0
:func:`~mpmath.fibonacci` can compute approximate Fibonacci numbers
of stupendous size::
>>> mp.dps = 15
>>> fibonacci(10**25)
3.49052338550226e+2089876402499787337692720
**Real and complex arguments**
The extended Fibonacci function is an analytic function. The
property `F(z) = F(z-1) + F(z-2)` holds for arbitrary `z`::
>>> mp.dps = 15
>>> fib(pi)
2.1170270579161
>>> fib(pi-1) + fib(pi-2)
2.1170270579161
>>> fib(3+4j)
(-5248.51130728372 - 14195.962288353j)
>>> fib(2+4j) + fib(1+4j)
(-5248.51130728372 - 14195.962288353j)
The Fibonacci function has infinitely many roots on the
negative half-real axis. The first root is at 0, the second is
close to -0.18, and then there are infinitely many roots that
asymptotically approach `-n+1/2`::
>>> findroot(fib, -0.2)
-0.183802359692956
>>> findroot(fib, -2)
-1.57077646820395
>>> findroot(fib, -17)
-16.4999999596115
>>> findroot(fib, -24)
-23.5000000000479
**Mathematical relationships**
For large `n`, `F(n+1)/F(n)` approaches the golden ratio::
>>> mp.dps = 50
>>> fibonacci(101)/fibonacci(100)
1.6180339887498948482045868343656381177203127439638
>>> +phi
1.6180339887498948482045868343656381177203091798058
The sum of reciprocal Fibonacci numbers converges to an irrational
number for which no closed form expression is known::
>>> mp.dps = 15
>>> nsum(lambda n: 1/fib(n), [1, inf])
3.35988566624318
Amazingly, however, the sum of odd-index reciprocal Fibonacci
numbers can be expressed in terms of a Jacobi theta function::
>>> nsum(lambda n: 1/fib(2*n+1), [0, inf])
1.82451515740692
>>> sqrt(5)*jtheta(2,0,(3-sqrt(5))/2)**2/4
1.82451515740692
Some related sums can be done in closed form::
>>> nsum(lambda k: 1/(1+fib(2*k+1)), [0, inf])
1.11803398874989
>>> phi - 0.5
1.11803398874989
>>> f = lambda k:(-1)**(k+1) / sum(fib(n)**2 for n in range(1,int(k+1)))
>>> nsum(f, [1, inf])
0.618033988749895
>>> phi-1
0.618033988749895
**References**
1. http://mathworld.wolfram.com/FibonacciNumber.html
"""
altzeta = r"""
Gives the Dirichlet eta function, `\eta(s)`, also known as the
alternating zeta function. This function is defined in analogy
with the Riemann zeta function as providing the sum of the
alternating series
.. math ::
\eta(s) = \sum_{k=0}^{\infty} \frac{(-1)^k}{k^s}
= 1-\frac{1}{2^s}+\frac{1}{3^s}-\frac{1}{4^s}+\ldots
The eta function, unlike the Riemann zeta function, is an entire
function, having a finite value for all complex `s`. The special case
`\eta(1) = \log(2)` gives the value of the alternating harmonic series.
The alternating zeta function may expressed using the Riemann zeta function
as `\eta(s) = (1 - 2^{1-s}) \zeta(s)`. It can also be expressed
in terms of the Hurwitz zeta function, for example using
:func:`~mpmath.dirichlet` (see documentation for that function).
**Examples**
Some special values are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> altzeta(1)
0.693147180559945
>>> altzeta(0)
0.5
>>> altzeta(-1)
0.25
>>> altzeta(-2)
0.0
An example of a sum that can be computed more accurately and
efficiently via :func:`~mpmath.altzeta` than via numerical summation::
>>> sum(-(-1)**n / n**2.5 for n in range(1, 100))
0.86720495150398402
>>> altzeta(2.5)
0.867199889012184
At positive even integers, the Dirichlet eta function
evaluates to a rational multiple of a power of `\pi`::
>>> altzeta(2)
0.822467033424113
>>> pi**2/12
0.822467033424113
Like the Riemann zeta function, `\eta(s)`, approaches 1
as `s` approaches positive infinity, although it does
so from below rather than from above::
>>> altzeta(30)
0.999999999068682
>>> altzeta(inf)
1.0
>>> mp.pretty = False
>>> altzeta(1000, rounding='d')
mpf('0.99999999999999989')
>>> altzeta(1000, rounding='u')
mpf('1.0')
**References**
1. http://mathworld.wolfram.com/DirichletEtaFunction.html
2. http://en.wikipedia.org/wiki/Dirichlet_eta_function
"""
factorial = r"""
Computes the factorial, `x!`. For integers `n \ge 0`, we have
`n! = 1 \cdot 2 \cdots (n-1) \cdot n` and more generally the factorial
is defined for real or complex `x` by `x! = \Gamma(x+1)`.
**Examples**
Basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for k in range(6):
... print("%s %s" % (k, fac(k)))
...
0 1.0
1 1.0
2 2.0
3 6.0
4 24.0
5 120.0
>>> fac(inf)
+inf
>>> fac(0.5), sqrt(pi)/2
(0.886226925452758, 0.886226925452758)
For large positive `x`, `x!` can be approximated by
Stirling's formula::
>>> x = 10**10
>>> fac(x)
2.32579620567308e+95657055186
>>> sqrt(2*pi*x)*(x/e)**x
2.32579597597705e+95657055186
:func:`~mpmath.fac` supports evaluation for astronomically large values::
>>> fac(10**30)
6.22311232304258e+29565705518096748172348871081098
Reciprocal factorials appear in the Taylor series of the
exponential function (among many other contexts)::
>>> nsum(lambda k: 1/fac(k), [0, inf]), exp(1)
(2.71828182845905, 2.71828182845905)
>>> nsum(lambda k: pi**k/fac(k), [0, inf]), exp(pi)
(23.1406926327793, 23.1406926327793)
"""
gamma = r"""
Computes the gamma function, `\Gamma(x)`. The gamma function is a
shifted version of the ordinary factorial, satisfying
`\Gamma(n) = (n-1)!` for integers `n > 0`. More generally, it
is defined by
.. math ::
\Gamma(x) = \int_0^{\infty} t^{x-1} e^{-t}\, dt
for any real or complex `x` with `\Re(x) > 0` and for `\Re(x) < 0`
by analytic continuation.
**Examples**
Basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for k in range(1, 6):
... print("%s %s" % (k, gamma(k)))
...
1 1.0
2 1.0
3 2.0
4 6.0
5 24.0
>>> gamma(inf)
+inf
>>> gamma(0)
Traceback (most recent call last):
...
ValueError: gamma function pole
The gamma function of a half-integer is a rational multiple of
`\sqrt{\pi}`::
>>> gamma(0.5), sqrt(pi)
(1.77245385090552, 1.77245385090552)
>>> gamma(1.5), sqrt(pi)/2
(0.886226925452758, 0.886226925452758)
We can check the integral definition::
>>> gamma(3.5)
3.32335097044784
>>> quad(lambda t: t**2.5*exp(-t), [0,inf])
3.32335097044784
:func:`~mpmath.gamma` supports arbitrary-precision evaluation and
complex arguments::
>>> mp.dps = 50
>>> gamma(sqrt(3))
0.91510229697308632046045539308226554038315280564184
>>> mp.dps = 25
>>> gamma(2j)
(0.009902440080927490985955066 - 0.07595200133501806872408048j)
Arguments can also be large. Note that the gamma function grows
very quickly::
>>> mp.dps = 15
>>> gamma(10**20)
1.9328495143101e+1956570551809674817225
"""
psi = r"""
Gives the polygamma function of order `m` of `z`, `\psi^{(m)}(z)`.
Special cases are known as the *digamma function* (`\psi^{(0)}(z)`),
the *trigamma function* (`\psi^{(1)}(z)`), etc. The polygamma
functions are defined as the logarithmic derivatives of the gamma
function:
.. math ::
\psi^{(m)}(z) = \left(\frac{d}{dz}\right)^{m+1} \log \Gamma(z)
In particular, `\psi^{(0)}(z) = \Gamma'(z)/\Gamma(z)`. In the
present implementation of :func:`~mpmath.psi`, the order `m` must be a
nonnegative integer, while the argument `z` may be an arbitrary
complex number (with exception for the polygamma function's poles
at `z = 0, -1, -2, \ldots`).
**Examples**
For various rational arguments, the polygamma function reduces to
a combination of standard mathematical constants::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> psi(0, 1), -euler
(-0.5772156649015328606065121, -0.5772156649015328606065121)
>>> psi(1, '1/4'), pi**2+8*catalan
(17.19732915450711073927132, 17.19732915450711073927132)
>>> psi(2, '1/2'), -14*apery
(-16.82879664423431999559633, -16.82879664423431999559633)
The polygamma functions are derivatives of each other::
>>> diff(lambda x: psi(3, x), pi), psi(4, pi)
(-0.1105749312578862734526952, -0.1105749312578862734526952)
>>> quad(lambda x: psi(4, x), [2, 3]), psi(3,3)-psi(3,2)
(-0.375, -0.375)
The digamma function diverges logarithmically as `z \to \infty`,
while higher orders tend to zero::
>>> psi(0,inf), psi(1,inf), psi(2,inf)
(+inf, 0.0, 0.0)
Evaluation for a complex argument::
>>> psi(2, -1-2j)
(0.03902435405364952654838445 + 0.1574325240413029954685366j)
Evaluation is supported for large orders `m` and/or large
arguments `z`::
>>> psi(3, 10**100)
2.0e-300
>>> psi(250, 10**30+10**20*j)
(-1.293142504363642687204865e-7010 + 3.232856260909107391513108e-7018j)
**Application to infinite series**
Any infinite series where the summand is a rational function of
the index `k` can be evaluated in closed form in terms of polygamma
functions of the roots and poles of the summand::
>>> a = sqrt(2)
>>> b = sqrt(3)
>>> nsum(lambda k: 1/((k+a)**2*(k+b)), [0, inf])
0.4049668927517857061917531
>>> (psi(0,a)-psi(0,b)-a*psi(1,a)+b*psi(1,a))/(a-b)**2
0.4049668927517857061917531
This follows from the series representation (`m > 0`)
.. math ::
\psi^{(m)}(z) = (-1)^{m+1} m! \sum_{k=0}^{\infty}
\frac{1}{(z+k)^{m+1}}.
Since the roots of a polynomial may be complex, it is sometimes
necessary to use the complex polygamma function to evaluate
an entirely real-valued sum::
>>> nsum(lambda k: 1/(k**2-2*k+3), [0, inf])
1.694361433907061256154665
>>> nprint(polyroots([1,-2,3]))
[(1.0 - 1.41421j), (1.0 + 1.41421j)]
>>> r1 = 1-sqrt(2)*j
>>> r2 = r1.conjugate()
>>> (psi(0,-r2)-psi(0,-r1))/(r1-r2)
(1.694361433907061256154665 + 0.0j)
"""
digamma = r"""
Shortcut for ``psi(0,z)``.
"""
harmonic = r"""
If `n` is an integer, ``harmonic(n)`` gives a floating-point
approximation of the `n`-th harmonic number `H(n)`, defined as
.. math ::
H(n) = 1 + \frac{1}{2} + \frac{1}{3} + \ldots + \frac{1}{n}
The first few harmonic numbers are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(8):
... print("%s %s" % (n, harmonic(n)))
...
0 0.0
1 1.0
2 1.5
3 1.83333333333333
4 2.08333333333333
5 2.28333333333333
6 2.45
7 2.59285714285714
The infinite harmonic series `1 + 1/2 + 1/3 + \ldots` diverges::
>>> harmonic(inf)
+inf
:func:`~mpmath.harmonic` is evaluated using the digamma function rather
than by summing the harmonic series term by term. It can therefore
be computed quickly for arbitrarily large `n`, and even for
nonintegral arguments::
>>> harmonic(10**100)
230.835724964306
>>> harmonic(0.5)
0.613705638880109
>>> harmonic(3+4j)
(2.24757548223494 + 0.850502209186044j)
:func:`~mpmath.harmonic` supports arbitrary precision evaluation::
>>> mp.dps = 50
>>> harmonic(11)
3.0198773448773448773448773448773448773448773448773
>>> harmonic(pi)
1.8727388590273302654363491032336134987519132374152
The harmonic series diverges, but at a glacial pace. It is possible
to calculate the exact number of terms required before the sum
exceeds a given amount, say 100::
>>> mp.dps = 50
>>> v = 10**findroot(lambda x: harmonic(10**x) - 100, 10)
>>> v
15092688622113788323693563264538101449859496.864101
>>> v = int(ceil(v))
>>> print(v)
15092688622113788323693563264538101449859497
>>> harmonic(v-1)
99.999999999999999999999999999999999999999999942747
>>> harmonic(v)
100.000000000000000000000000000000000000000000009
"""
bernoulli = r"""
Computes the nth Bernoulli number, `B_n`, for any integer `n \ge 0`.
The Bernoulli numbers are rational numbers, but this function
returns a floating-point approximation. To obtain an exact
fraction, use :func:`~mpmath.bernfrac` instead.
**Examples**
Numerical values of the first few Bernoulli numbers::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(15):
... print("%s %s" % (n, bernoulli(n)))
...
0 1.0
1 -0.5
2 0.166666666666667
3 0.0
4 -0.0333333333333333
5 0.0
6 0.0238095238095238
7 0.0
8 -0.0333333333333333
9 0.0
10 0.0757575757575758
11 0.0
12 -0.253113553113553
13 0.0
14 1.16666666666667
Bernoulli numbers can be approximated with arbitrary precision::
>>> mp.dps = 50
>>> bernoulli(100)
-2.8382249570693706959264156336481764738284680928013e+78
Arbitrarily large `n` are supported::
>>> mp.dps = 15
>>> bernoulli(10**20 + 2)
3.09136296657021e+1876752564973863312327
The Bernoulli numbers are related to the Riemann zeta function
at integer arguments::
>>> -bernoulli(8) * (2*pi)**8 / (2*fac(8))
1.00407735619794
>>> zeta(8)
1.00407735619794
**Algorithm**
For small `n` (`n < 3000`) :func:`~mpmath.bernoulli` uses a recurrence
formula due to Ramanujan. All results in this range are cached,
so sequential computation of small Bernoulli numbers is
guaranteed to be fast.
For larger `n`, `B_n` is evaluated in terms of the Riemann zeta
function.
"""
stieltjes = r"""
For a nonnegative integer `n`, ``stieltjes(n)`` computes the
`n`-th Stieltjes constant `\gamma_n`, defined as the
`n`-th coefficient in the Laurent series expansion of the
Riemann zeta function around the pole at `s = 1`. That is,
we have:
.. math ::
\zeta(s) = \frac{1}{s-1} \sum_{n=0}^{\infty}
\frac{(-1)^n}{n!} \gamma_n (s-1)^n
More generally, ``stieltjes(n, a)`` gives the corresponding
coefficient `\gamma_n(a)` for the Hurwitz zeta function
`\zeta(s,a)` (with `\gamma_n = \gamma_n(1)`).
**Examples**
The zeroth Stieltjes constant is just Euler's constant `\gamma`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> stieltjes(0)
0.577215664901533
Some more values are::
>>> stieltjes(1)
-0.0728158454836767
>>> stieltjes(10)
0.000205332814909065
>>> stieltjes(30)
0.00355772885557316
>>> stieltjes(1000)
-1.57095384420474e+486
>>> stieltjes(2000)
2.680424678918e+1109
>>> stieltjes(1, 2.5)
-0.23747539175716
An alternative way to compute `\gamma_1`::
>>> diff(extradps(15)(lambda x: 1/(x-1) - zeta(x)), 1)
-0.0728158454836767
:func:`~mpmath.stieltjes` supports arbitrary precision evaluation::
>>> mp.dps = 50
>>> stieltjes(2)
-0.0096903631928723184845303860352125293590658061013408
**Algorithm**
:func:`~mpmath.stieltjes` numerically evaluates the integral in
the following representation due to Ainsworth, Howell and
Coffey [1], [2]:
.. math ::
\gamma_n(a) = \frac{\log^n a}{2a} - \frac{\log^{n+1}(a)}{n+1} +
\frac{2}{a} \Re \int_0^{\infty}
\frac{(x/a-i)\log^n(a-ix)}{(1+x^2/a^2)(e^{2\pi x}-1)} dx.
For some reference values with `a = 1`, see e.g. [4].
**References**
1. O. R. Ainsworth & L. W. Howell, "An integral representation of
the generalized Euler-Mascheroni constants", NASA Technical
Paper 2456 (1985),
http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19850014994_1985014994.pdf
2. M. W. Coffey, "The Stieltjes constants, their relation to the
`\eta_j` coefficients, and representation of the Hurwitz
zeta function", arXiv:0706.0343v1 http://arxiv.org/abs/0706.0343
3. http://mathworld.wolfram.com/StieltjesConstants.html
4. http://pi.lacim.uqam.ca/piDATA/stieltjesgamma.txt
"""
gammaprod = r"""
Given iterables `a` and `b`, ``gammaprod(a, b)`` computes the
product / quotient of gamma functions:
.. math ::
\frac{\Gamma(a_0) \Gamma(a_1) \cdots \Gamma(a_p)}
{\Gamma(b_0) \Gamma(b_1) \cdots \Gamma(b_q)}
Unlike direct calls to :func:`~mpmath.gamma`, :func:`~mpmath.gammaprod` considers
the entire product as a limit and evaluates this limit properly if
any of the numerator or denominator arguments are nonpositive
integers such that poles of the gamma function are encountered.
That is, :func:`~mpmath.gammaprod` evaluates
.. math ::
\lim_{\epsilon \to 0}
\frac{\Gamma(a_0+\epsilon) \Gamma(a_1+\epsilon) \cdots
\Gamma(a_p+\epsilon)}
{\Gamma(b_0+\epsilon) \Gamma(b_1+\epsilon) \cdots
\Gamma(b_q+\epsilon)}
In particular:
* If there are equally many poles in the numerator and the
denominator, the limit is a rational number times the remaining,
regular part of the product.
* If there are more poles in the numerator, :func:`~mpmath.gammaprod`
returns ``+inf``.
* If there are more poles in the denominator, :func:`~mpmath.gammaprod`
returns 0.
**Examples**
The reciprocal gamma function `1/\Gamma(x)` evaluated at `x = 0`::
>>> from mpmath import *
>>> mp.dps = 15
>>> gammaprod([], [0])
0.0
A limit::
>>> gammaprod([-4], [-3])
-0.25
>>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=1)
-0.25
>>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=-1)
-0.25
"""
beta = r"""
Computes the beta function,
`B(x,y) = \Gamma(x) \Gamma(y) / \Gamma(x+y)`.
The beta function is also commonly defined by the integral
representation
.. math ::
B(x,y) = \int_0^1 t^{x-1} (1-t)^{y-1} \, dt
**Examples**
For integer and half-integer arguments where all three gamma
functions are finite, the beta function becomes either rational
number or a rational multiple of `\pi`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> beta(5, 2)
0.0333333333333333
>>> beta(1.5, 2)
0.266666666666667
>>> 16*beta(2.5, 1.5)
3.14159265358979
Where appropriate, :func:`~mpmath.beta` evaluates limits. A pole
of the beta function is taken to result in ``+inf``::
>>> beta(-0.5, 0.5)
0.0
>>> beta(-3, 3)
-0.333333333333333
>>> beta(-2, 3)
+inf
>>> beta(inf, 1)
0.0
>>> beta(inf, 0)
nan
:func:`~mpmath.beta` supports complex numbers and arbitrary precision
evaluation::
>>> beta(1, 2+j)
(0.4 - 0.2j)
>>> mp.dps = 25
>>> beta(j,0.5)
(1.079424249270925780135675 - 1.410032405664160838288752j)
>>> mp.dps = 50
>>> beta(pi, e)
0.037890298781212201348153837138927165984170287886464
Various integrals can be computed by means of the
beta function::
>>> mp.dps = 15
>>> quad(lambda t: t**2.5*(1-t)**2, [0, 1])
0.0230880230880231
>>> beta(3.5, 3)
0.0230880230880231
>>> quad(lambda t: sin(t)**4 * sqrt(cos(t)), [0, pi/2])
0.319504062596158
>>> beta(2.5, 0.75)/2
0.319504062596158
"""
betainc = r"""
``betainc(a, b, x1=0, x2=1, regularized=False)`` gives the generalized
incomplete beta function,
.. math ::
I_{x_1}^{x_2}(a,b) = \int_{x_1}^{x_2} t^{a-1} (1-t)^{b-1} dt.
When `x_1 = 0, x_2 = 1`, this reduces to the ordinary (complete)
beta function `B(a,b)`; see :func:`~mpmath.beta`.
With the keyword argument ``regularized=True``, :func:`~mpmath.betainc`
computes the regularized incomplete beta function
`I_{x_1}^{x_2}(a,b) / B(a,b)`. This is the cumulative distribution of the
beta distribution with parameters `a`, `b`.
.. note :
Implementations of the incomplete beta function in some other
software uses a different argument order. For example, Mathematica uses the
reversed argument order ``Beta[x1,x2,a,b]``. For the equivalent of SciPy's
three-argument incomplete beta integral (implicitly with `x1 = 0`), use
``betainc(a,b,0,x2,regularized=True)``.
**Examples**
Verifying that :func:`~mpmath.betainc` computes the integral in the
definition::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> x,y,a,b = 3, 4, 0, 6
>>> betainc(x, y, a, b)
-4010.4
>>> quad(lambda t: t**(x-1) * (1-t)**(y-1), [a, b])
-4010.4
The arguments may be arbitrary complex numbers::
>>> betainc(0.75, 1-4j, 0, 2+3j)
(0.2241657956955709603655887 + 0.3619619242700451992411724j)
With regularization::
>>> betainc(1, 2, 0, 0.25, regularized=True)
0.4375
>>> betainc(pi, e, 0, 1, regularized=True) # Complete
1.0
The beta integral satisfies some simple argument transformation
symmetries::
>>> mp.dps = 15
>>> betainc(2,3,4,5), -betainc(2,3,5,4), betainc(3,2,1-5,1-4)
(56.0833333333333, 56.0833333333333, 56.0833333333333)
The beta integral can often be evaluated analytically. For integer and
rational arguments, the incomplete beta function typically reduces to a
simple algebraic-logarithmic expression::
>>> mp.dps = 25
>>> identify(chop(betainc(0, 0, 3, 4)))
'-(log((9/8)))'
>>> identify(betainc(2, 3, 4, 5))
'(673/12)'
>>> identify(betainc(1.5, 1, 1, 2))
'((-12+sqrt(1152))/18)'
"""
binomial = r"""
Computes the binomial coefficient
.. math ::
{n \choose k} = \frac{n!}{k!(n-k)!}.
The binomial coefficient gives the number of ways that `k` items
can be chosen from a set of `n` items. More generally, the binomial
coefficient is a well-defined function of arbitrary real or
complex `n` and `k`, via the gamma function.
**Examples**
Generate Pascal's triangle::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint([binomial(n,k) for k in range(n+1)])
...
[1.0]
[1.0, 1.0]
[1.0, 2.0, 1.0]
[1.0, 3.0, 3.0, 1.0]
[1.0, 4.0, 6.0, 4.0, 1.0]
There is 1 way to select 0 items from the empty set, and 0 ways to
select 1 item from the empty set::
>>> binomial(0, 0)
1.0
>>> binomial(0, 1)
0.0
:func:`~mpmath.binomial` supports large arguments::
>>> binomial(10**20, 10**20-5)
8.33333333333333e+97
>>> binomial(10**20, 10**10)
2.60784095465201e+104342944813
Nonintegral binomial coefficients find use in series
expansions::
>>> nprint(taylor(lambda x: (1+x)**0.25, 0, 4))
[1.0, 0.25, -0.09375, 0.0546875, -0.0375977]
>>> nprint([binomial(0.25, k) for k in range(5)])
[1.0, 0.25, -0.09375, 0.0546875, -0.0375977]
An integral representation::
>>> n, k = 5, 3
>>> f = lambda t: exp(-j*k*t)*(1+exp(j*t))**n
>>> chop(quad(f, [-pi,pi])/(2*pi))
10.0
>>> binomial(n,k)
10.0
"""
rf = r"""
Computes the rising factorial or Pochhammer symbol,
.. math ::
x^{(n)} = x (x+1) \cdots (x+n-1) = \frac{\Gamma(x+n)}{\Gamma(x)}
where the rightmost expression is valid for nonintegral `n`.
**Examples**
For integral `n`, the rising factorial is a polynomial::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint(taylor(lambda x: rf(x,n), 0, n))
...
[1.0]
[0.0, 1.0]
[0.0, 1.0, 1.0]
[0.0, 2.0, 3.0, 1.0]
[0.0, 6.0, 11.0, 6.0, 1.0]
Evaluation is supported for arbitrary arguments::
>>> rf(2+3j, 5.5)
(-7202.03920483347 - 3777.58810701527j)
"""
ff = r"""
Computes the falling factorial,
.. math ::
(x)_n = x (x-1) \cdots (x-n+1) = \frac{\Gamma(x+1)}{\Gamma(x-n+1)}
where the rightmost expression is valid for nonintegral `n`.
**Examples**
For integral `n`, the falling factorial is a polynomial::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint(taylor(lambda x: ff(x,n), 0, n))
...
[1.0]
[0.0, 1.0]
[0.0, -1.0, 1.0]
[0.0, 2.0, -3.0, 1.0]
[0.0, -6.0, 11.0, -6.0, 1.0]
Evaluation is supported for arbitrary arguments::
>>> ff(2+3j, 5.5)
(-720.41085888203 + 316.101124983878j)
"""
fac2 = r"""
Computes the double factorial `x!!`, defined for integers
`x > 0` by
.. math ::
x!! = \begin{cases}
1 \cdot 3 \cdots (x-2) \cdot x & x \;\mathrm{odd} \\
2 \cdot 4 \cdots (x-2) \cdot x & x \;\mathrm{even}
\end{cases}
and more generally by [1]
.. math ::
x!! = 2^{x/2} \left(\frac{\pi}{2}\right)^{(\cos(\pi x)-1)/4}
\Gamma\left(\frac{x}{2}+1\right).
**Examples**
The integer sequence of double factorials begins::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint([fac2(n) for n in range(10)])
[1.0, 1.0, 2.0, 3.0, 8.0, 15.0, 48.0, 105.0, 384.0, 945.0]
For large `x`, double factorials follow a Stirling-like asymptotic
approximation::
>>> x = mpf(10000)
>>> fac2(x)
5.97272691416282e+17830
>>> sqrt(pi)*x**((x+1)/2)*exp(-x/2)
5.97262736954392e+17830
The recurrence formula `x!! = x (x-2)!!` can be reversed to
define the double factorial of negative odd integers (but
not negative even integers)::
>>> fac2(-1), fac2(-3), fac2(-5), fac2(-7)
(1.0, -1.0, 0.333333333333333, -0.0666666666666667)
>>> fac2(-2)
Traceback (most recent call last):
...
ValueError: gamma function pole
With the exception of the poles at negative even integers,
:func:`~mpmath.fac2` supports evaluation for arbitrary complex arguments.
The recurrence formula is valid generally::
>>> fac2(pi+2j)
(-1.3697207890154e-12 + 3.93665300979176e-12j)
>>> (pi+2j)*fac2(pi-2+2j)
(-1.3697207890154e-12 + 3.93665300979176e-12j)
Double factorials should not be confused with nested factorials,
which are immensely larger::
>>> fac(fac(20))
5.13805976125208e+43675043585825292774
>>> fac2(20)
3715891200.0
Double factorials appear, among other things, in series expansions
of Gaussian functions and the error function. Infinite series
include::
>>> nsum(lambda k: 1/fac2(k), [0, inf])
3.05940740534258
>>> sqrt(e)*(1+sqrt(pi/2)*erf(sqrt(2)/2))
3.05940740534258
>>> nsum(lambda k: 2**k/fac2(2*k-1), [1, inf])
4.06015693855741
>>> e * erf(1) * sqrt(pi)
4.06015693855741
A beautiful Ramanujan sum::
>>> nsum(lambda k: (-1)**k*(fac2(2*k-1)/fac2(2*k))**3, [0,inf])
0.90917279454693
>>> (gamma('9/8')/gamma('5/4')/gamma('7/8'))**2
0.90917279454693
**References**
1. http://functions.wolfram.com/GammaBetaErf/Factorial2/27/01/0002/
2. http://mathworld.wolfram.com/DoubleFactorial.html
"""
hyper = r"""
Evaluates the generalized hypergeometric function
.. math ::
\,_pF_q(a_1,\ldots,a_p; b_1,\ldots,b_q; z) =
\sum_{n=0}^\infty \frac{(a_1)_n (a_2)_n \ldots (a_p)_n}
{(b_1)_n(b_2)_n\ldots(b_q)_n} \frac{z^n}{n!}
where `(x)_n` denotes the rising factorial (see :func:`~mpmath.rf`).
The parameters lists ``a_s`` and ``b_s`` may contain integers,
real numbers, complex numbers, as well as exact fractions given in
the form of tuples `(p, q)`. :func:`~mpmath.hyper` is optimized to handle
integers and fractions more efficiently than arbitrary
floating-point parameters (since rational parameters are by
far the most common).
**Examples**
Verifying that :func:`~mpmath.hyper` gives the sum in the definition, by
comparison with :func:`~mpmath.nsum`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a,b,c,d = 2,3,4,5
>>> x = 0.25
>>> hyper([a,b],[c,d],x)
1.078903941164934876086237
>>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)*x**n/fac(n)
>>> nsum(fn, [0, inf])
1.078903941164934876086237
The parameters can be any combination of integers, fractions,
floats and complex numbers::
>>> a, b, c, d, e = 1, (-1,2), pi, 3+4j, (2,3)
>>> x = 0.2j
>>> hyper([a,b],[c,d,e],x)
(0.9923571616434024810831887 - 0.005753848733883879742993122j)
>>> b, e = -0.5, mpf(2)/3
>>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)/rf(e,n)*x**n/fac(n)
>>> nsum(fn, [0, inf])
(0.9923571616434024810831887 - 0.005753848733883879742993122j)
The `\,_0F_0` and `\,_1F_0` series are just elementary functions::
>>> a, z = sqrt(2), +pi
>>> hyper([],[],z)
23.14069263277926900572909
>>> exp(z)
23.14069263277926900572909
>>> hyper([a],[],z)
(-0.09069132879922920160334114 + 0.3283224323946162083579656j)
>>> (1-z)**(-a)
(-0.09069132879922920160334114 + 0.3283224323946162083579656j)
If any `a_k` coefficient is a nonpositive integer, the series terminates
into a finite polynomial::
>>> hyper([1,1,1,-3],[2,5],1)
0.7904761904761904761904762
>>> identify(_)
'(83/105)'
If any `b_k` is a nonpositive integer, the function is undefined (unless the
series terminates before the division by zero occurs)::
>>> hyper([1,1,1,-3],[-2,5],1)
Traceback (most recent call last):
...
ZeroDivisionError: pole in hypergeometric series
>>> hyper([1,1,1,-1],[-2,5],1)
1.1
Except for polynomial cases, the radius of convergence `R` of the hypergeometric
series is either `R = \infty` (if `p \le q`), `R = 1` (if `p = q+1`), or
`R = 0` (if `p > q+1`).
The analytic continuations of the functions with `p = q+1`, i.e. `\,_2F_1`,
`\,_3F_2`, `\,_4F_3`, etc, are all implemented and therefore these functions
can be evaluated for `|z| \ge 1`. The shortcuts :func:`~mpmath.hyp2f1`, :func:`~mpmath.hyp3f2`
are available to handle the most common cases (see their documentation),
but functions of higher degree are also supported via :func:`~mpmath.hyper`::
>>> hyper([1,2,3,4], [5,6,7], 1) # 4F3 at finite-valued branch point
1.141783505526870731311423
>>> hyper([4,5,6,7], [1,2,3], 1) # 4F3 at pole
+inf
>>> hyper([1,2,3,4,5], [6,7,8,9], 10) # 5F4
(1.543998916527972259717257 - 0.5876309929580408028816365j)
>>> hyper([1,2,3,4,5,6], [7,8,9,10,11], 1j) # 6F5
(0.9996565821853579063502466 + 0.0129721075905630604445669j)
Near `z = 1` with noninteger parameters::
>>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','41/8'], 1)
2.219433352235586121250027
>>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','5/4'], 1)
+inf
>>> eps1 = extradps(6)(lambda: 1 - mpf('1e-6'))()
>>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','5/4'], eps1)
2923978034.412973409330956
Please note that, as currently implemented, evaluation of `\,_pF_{p-1}`
with `p \ge 3` may be slow or inaccurate when `|z-1|` is small,
for some parameter values.
When `p > q+1`, ``hyper`` computes the (iterated) Borel sum of the divergent
series. For `\,_2F_0` the Borel sum has an analytic solution and can be
computed efficiently (see :func:`~mpmath.hyp2f0`). For higher degrees, the functions
is evaluated first by attempting to sum it directly as an asymptotic
series (this only works for tiny `|z|`), and then by evaluating the Borel
regularized sum using numerical integration. Except for
special parameter combinations, this can be extremely slow.
>>> hyper([1,1], [], 0.5) # regularization of 2F0
(1.340965419580146562086448 + 0.8503366631752726568782447j)
>>> hyper([1,1,1,1], [1], 0.5) # regularization of 4F1
(1.108287213689475145830699 + 0.5327107430640678181200491j)
With the following magnitude of argument, the asymptotic series for `\,_3F_1`
gives only a few digits. Using Borel summation, ``hyper`` can produce
a value with full accuracy::
>>> mp.dps = 15
>>> hyper([2,0.5,4], [5.25], '0.08', force_series=True)
Traceback (most recent call last):
...
NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms.
>>> hyper([2,0.5,4], [5.25], '0.08', asymp_tol=1e-4)
1.0725535790737
>>> hyper([2,0.5,4], [5.25], '0.08')
(1.07269542893559 + 5.54668863216891e-5j)
>>> hyper([2,0.5,4], [5.25], '-0.08', asymp_tol=1e-4)
0.946344925484879
>>> hyper([2,0.5,4], [5.25], '-0.08')
0.946312503737771
>>> mp.dps = 25
>>> hyper([2,0.5,4], [5.25], '-0.08')
0.9463125037377662296700858
Note that with the positive `z` value, there is a complex part in the
correct result, which falls below the tolerance of the asymptotic series.
"""
hypercomb = r"""
Computes a weighted combination of hypergeometric functions
.. math ::
\sum_{r=1}^N \left[ \prod_{k=1}^{l_r} {w_{r,k}}^{c_{r,k}}
\frac{\prod_{k=1}^{m_r} \Gamma(\alpha_{r,k})}{\prod_{k=1}^{n_r}
\Gamma(\beta_{r,k})}
\,_{p_r}F_{q_r}(a_{r,1},\ldots,a_{r,p}; b_{r,1},
\ldots, b_{r,q}; z_r)\right].
Typically the parameters are linear combinations of a small set of base
parameters; :func:`~mpmath.hypercomb` permits computing a correct value in
the case that some of the `\alpha`, `\beta`, `b` turn out to be
nonpositive integers, or if division by zero occurs for some `w^c`,
assuming that there are opposing singularities that cancel out.
The limit is computed by evaluating the function with the base
parameters perturbed, at a higher working precision.
The first argument should be a function that takes the perturbable
base parameters ``params`` as input and returns `N` tuples
``(w, c, alpha, beta, a, b, z)``, where the coefficients ``w``, ``c``,
gamma factors ``alpha``, ``beta``, and hypergeometric coefficients
``a``, ``b`` each should be lists of numbers, and ``z`` should be a single
number.
**Examples**
The following evaluates
.. math ::
(a-1) \frac{\Gamma(a-3)}{\Gamma(a-4)} \,_1F_1(a,a-1,z) = e^z(a-4)(a+z-1)
with `a=1, z=3`. There is a zero factor, two gamma function poles, and
the 1F1 function is singular; all singularities cancel out to give a finite
value::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> hypercomb(lambda a: [([a-1],[1],[a-3],[a-4],[a],[a-1],3)], [1])
-180.769832308689
>>> -9*exp(3)
-180.769832308689
"""
hyp0f1 = r"""
Gives the hypergeometric function `\,_0F_1`, sometimes known as the
confluent limit function, defined as
.. math ::
\,_0F_1(a,z) = \sum_{k=0}^{\infty} \frac{1}{(a)_k} \frac{z^k}{k!}.
This function satisfies the differential equation `z f''(z) + a f'(z) = f(z)`,
and is related to the Bessel function of the first kind (see :func:`~mpmath.besselj`).
``hyp0f1(a,z)`` is equivalent to ``hyper([],[a],z)``; see documentation for
:func:`~mpmath.hyper` for more information.
**Examples**
Evaluation for arbitrary arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp0f1(2, 0.25)
1.130318207984970054415392
>>> hyp0f1((1,2), 1234567)
6.27287187546220705604627e+964
>>> hyp0f1(3+4j, 1000000j)
(3.905169561300910030267132e+606 + 3.807708544441684513934213e+606j)
Evaluation is supported for arbitrarily large values of `z`,
using asymptotic expansions::
>>> hyp0f1(1, 10**50)
2.131705322874965310390701e+8685889638065036553022565
>>> hyp0f1(1, -10**50)
1.115945364792025420300208e-13
Verifying the differential equation::
>>> a = 2.5
>>> f = lambda z: hyp0f1(a,z)
>>> for z in [0, 10, 3+4j]:
... chop(z*diff(f,z,2) + a*diff(f,z) - f(z))
...
0.0
0.0
0.0
"""
hyp1f1 = r"""
Gives the confluent hypergeometric function of the first kind,
.. math ::
\,_1F_1(a,b,z) = \sum_{k=0}^{\infty} \frac{(a)_k}{(b)_k} \frac{z^k}{k!},
also known as Kummer's function and sometimes denoted by `M(a,b,z)`. This
function gives one solution to the confluent (Kummer's) differential equation
.. math ::
z f''(z) + (b-z) f'(z) - af(z) = 0.
A second solution is given by the `U` function; see :func:`~mpmath.hyperu`.
Solutions are also given in an alternate form by the Whittaker
functions (:func:`~mpmath.whitm`, :func:`~mpmath.whitw`).
``hyp1f1(a,b,z)`` is equivalent
to ``hyper([a],[b],z)``; see documentation for :func:`~mpmath.hyper` for more
information.
**Examples**
Evaluation for real and complex values of the argument `z`, with
fixed parameters `a = 2, b = -1/3`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp1f1(2, (-1,3), 3.25)
-2815.956856924817275640248
>>> hyp1f1(2, (-1,3), -3.25)
-1.145036502407444445553107
>>> hyp1f1(2, (-1,3), 1000)
-8.021799872770764149793693e+441
>>> hyp1f1(2, (-1,3), -1000)
0.000003131987633006813594535331
>>> hyp1f1(2, (-1,3), 100+100j)
(-3.189190365227034385898282e+48 - 1.106169926814270418999315e+49j)
Parameters may be complex::
>>> hyp1f1(2+3j, -1+j, 10j)
(261.8977905181045142673351 + 160.8930312845682213562172j)
Arbitrarily large values of `z` are supported::
>>> hyp1f1(3, 4, 10**20)
3.890569218254486878220752e+43429448190325182745
>>> hyp1f1(3, 4, -10**20)
6.0e-60
>>> hyp1f1(3, 4, 10**20*j)
(-1.935753855797342532571597e-20 - 2.291911213325184901239155e-20j)
Verifying the differential equation::
>>> a, b = 1.5, 2
>>> f = lambda z: hyp1f1(a,b,z)
>>> for z in [0, -10, 3, 3+4j]:
... chop(z*diff(f,z,2) + (b-z)*diff(f,z) - a*f(z))
...
0.0
0.0
0.0
0.0
An integral representation::
>>> a, b = 1.5, 3
>>> z = 1.5
>>> hyp1f1(a,b,z)
2.269381460919952778587441
>>> g = lambda t: exp(z*t)*t**(a-1)*(1-t)**(b-a-1)
>>> gammaprod([b],[a,b-a])*quad(g, [0,1])
2.269381460919952778587441
"""
hyp1f2 = r"""
Gives the hypergeometric function `\,_1F_2(a_1,a_2;b_1,b_2; z)`.
The call ``hyp1f2(a1,b1,b2,z)`` is equivalent to
``hyper([a1],[b1,b2],z)``.
Evaluation works for complex and arbitrarily large arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a, b, c = 1.5, (-1,3), 2.25
>>> hyp1f2(a, b, c, 10**20)
-1.159388148811981535941434e+8685889639
>>> hyp1f2(a, b, c, -10**20)
-12.60262607892655945795907
>>> hyp1f2(a, b, c, 10**20*j)
(4.237220401382240876065501e+6141851464 - 2.950930337531768015892987e+6141851464j)
>>> hyp1f2(2+3j, -2j, 0.5j, 10-20j)
(135881.9905586966432662004 - 86681.95885418079535738828j)
"""
hyp2f2 = r"""
Gives the hypergeometric function `\,_2F_2(a_1,a_2;b_1,b_2; z)`.
The call ``hyp2f2(a1,a2,b1,b2,z)`` is equivalent to
``hyper([a1,a2],[b1,b2],z)``.
Evaluation works for complex and arbitrarily large arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a, b, c, d = 1.5, (-1,3), 2.25, 4
>>> hyp2f2(a, b, c, d, 10**20)
-5.275758229007902299823821e+43429448190325182663
>>> hyp2f2(a, b, c, d, -10**20)
2561445.079983207701073448
>>> hyp2f2(a, b, c, d, 10**20*j)
(2218276.509664121194836667 - 1280722.539991603850462856j)
>>> hyp2f2(2+3j, -2j, 0.5j, 4j, 10-20j)
(80500.68321405666957342788 - 20346.82752982813540993502j)
"""
hyp2f3 = r"""
Gives the hypergeometric function `\,_2F_3(a_1,a_2;b_1,b_2,b_3; z)`.
The call ``hyp2f3(a1,a2,b1,b2,b3,z)`` is equivalent to
``hyper([a1,a2],[b1,b2,b3],z)``.
Evaluation works for arbitrarily large arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a1,a2,b1,b2,b3 = 1.5, (-1,3), 2.25, 4, (1,5)
>>> hyp2f3(a1,a2,b1,b2,b3,10**20)
-4.169178177065714963568963e+8685889590
>>> hyp2f3(a1,a2,b1,b2,b3,-10**20)
7064472.587757755088178629
>>> hyp2f3(a1,a2,b1,b2,b3,10**20*j)
(-5.163368465314934589818543e+6141851415 + 1.783578125755972803440364e+6141851416j)
>>> hyp2f3(2+3j, -2j, 0.5j, 4j, -1-j, 10-20j)
(-2280.938956687033150740228 + 13620.97336609573659199632j)
>>> hyp2f3(2+3j, -2j, 0.5j, 4j, -1-j, 10000000-20000000j)
(4.849835186175096516193e+3504 - 3.365981529122220091353633e+3504j)
"""
hyp2f1 = r"""
Gives the Gauss hypergeometric function `\,_2F_1` (often simply referred to as
*the* hypergeometric function), defined for `|z| < 1` as
.. math ::
\,_2F_1(a,b,c,z) = \sum_{k=0}^{\infty}
\frac{(a)_k (b)_k}{(c)_k} \frac{z^k}{k!}.
and for `|z| \ge 1` by analytic continuation, with a branch cut on `(1, \infty)`
when necessary.
Special cases of this function include many of the orthogonal polynomials as
well as the incomplete beta function and other functions. Properties of the
Gauss hypergeometric function are documented comprehensively in many references,
for example Abramowitz & Stegun, section 15.
The implementation supports the analytic continuation as well as evaluation
close to the unit circle where `|z| \approx 1`. The syntax ``hyp2f1(a,b,c,z)``
is equivalent to ``hyper([a,b],[c],z)``.
**Examples**
Evaluation with `z` inside, outside and on the unit circle, for
fixed parameters::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp2f1(2, (1,2), 4, 0.75)
1.303703703703703703703704
>>> hyp2f1(2, (1,2), 4, -1.75)
0.7431290566046919177853916
>>> hyp2f1(2, (1,2), 4, 1.75)
(1.418075801749271137026239 - 1.114976146679907015775102j)
>>> hyp2f1(2, (1,2), 4, 1)
1.6
>>> hyp2f1(2, (1,2), 4, -1)
0.8235498012182875315037882
>>> hyp2f1(2, (1,2), 4, j)
(0.9144026291433065674259078 + 0.2050415770437884900574923j)
>>> hyp2f1(2, (1,2), 4, 2+j)
(0.9274013540258103029011549 + 0.7455257875808100868984496j)
>>> hyp2f1(2, (1,2), 4, 0.25j)
(0.9931169055799728251931672 + 0.06154836525312066938147793j)
Evaluation with complex parameter values::
>>> hyp2f1(1+j, 0.75, 10j, 1+5j)
(0.8834833319713479923389638 + 0.7053886880648105068343509j)
Evaluation with `z = 1`::
>>> hyp2f1(-2.5, 3.5, 1.5, 1)
0.0
>>> hyp2f1(-2.5, 3, 4, 1)
0.06926406926406926406926407
>>> hyp2f1(2, 3, 4, 1)
+inf
Evaluation for huge arguments::
>>> hyp2f1((-1,3), 1.75, 4, '1e100')
(7.883714220959876246415651e+32 + 1.365499358305579597618785e+33j)
>>> hyp2f1((-1,3), 1.75, 4, '1e1000000')
(7.883714220959876246415651e+333332 + 1.365499358305579597618785e+333333j)
>>> hyp2f1((-1,3), 1.75, 4, '1e1000000j')
(1.365499358305579597618785e+333333 - 7.883714220959876246415651e+333332j)
An integral representation::
>>> a,b,c,z = -0.5, 1, 2.5, 0.25
>>> g = lambda t: t**(b-1) * (1-t)**(c-b-1) * (1-t*z)**(-a)
>>> gammaprod([c],[b,c-b]) * quad(g, [0,1])
0.9480458814362824478852618
>>> hyp2f1(a,b,c,z)
0.9480458814362824478852618
Verifying the hypergeometric differential equation::
>>> f = lambda z: hyp2f1(a,b,c,z)
>>> chop(z*(1-z)*diff(f,z,2) + (c-(a+b+1)*z)*diff(f,z) - a*b*f(z))
0.0
"""
hyp3f2 = r"""
Gives the generalized hypergeometric function `\,_3F_2`, defined for `|z| < 1`
as
.. math ::
\,_3F_2(a_1,a_2,a_3,b_1,b_2,z) = \sum_{k=0}^{\infty}
\frac{(a_1)_k (a_2)_k (a_3)_k}{(b_1)_k (b_2)_k} \frac{z^k}{k!}.
and for `|z| \ge 1` by analytic continuation. The analytic structure of this
function is similar to that of `\,_2F_1`, generally with a singularity at
`z = 1` and a branch cut on `(1, \infty)`.
Evaluation is supported inside, on, and outside
the circle of convergence `|z| = 1`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp3f2(1,2,3,4,5,0.25)
1.083533123380934241548707
>>> hyp3f2(1,2+2j,3,4,5,-10+10j)
(0.1574651066006004632914361 - 0.03194209021885226400892963j)
>>> hyp3f2(1,2,3,4,5,-10)
0.3071141169208772603266489
>>> hyp3f2(1,2,3,4,5,10)
(-0.4857045320523947050581423 - 0.5988311440454888436888028j)
>>> hyp3f2(0.25,1,1,2,1.5,1)
1.157370995096772047567631
>>> (8-pi-2*ln2)/3
1.157370995096772047567631
>>> hyp3f2(1+j,0.5j,2,1,-2j,-1)
(1.74518490615029486475959 + 0.1454701525056682297614029j)
>>> hyp3f2(1+j,0.5j,2,1,-2j,sqrt(j))
(0.9829816481834277511138055 - 0.4059040020276937085081127j)
>>> hyp3f2(-3,2,1,-5,4,1)
1.41
>>> hyp3f2(-3,2,1,-5,4,2)
2.12
Evaluation very close to the unit circle::
>>> hyp3f2(1,2,3,4,5,'1.0001')
(1.564877796743282766872279 - 3.76821518787438186031973e-11j)
>>> hyp3f2(1,2,3,4,5,'1+0.0001j')
(1.564747153061671573212831 + 0.0001305757570366084557648482j)
>>> hyp3f2(1,2,3,4,5,'0.9999')
1.564616644881686134983664
>>> hyp3f2(1,2,3,4,5,'-0.9999')
0.7823896253461678060196207
.. note ::
Evaluation for `|z-1|` small can currently be inaccurate or slow
for some parameter combinations.
For various parameter combinations, `\,_3F_2` admits representation in terms
of hypergeometric functions of lower degree, or in terms of
simpler functions::
>>> for a, b, z in [(1,2,-1), (2,0.5,1)]:
... hyp2f1(a,b,a+b+0.5,z)**2
... hyp3f2(2*a,a+b,2*b,a+b+0.5,2*a+2*b,z)
...
0.4246104461966439006086308
0.4246104461966439006086308
7.111111111111111111111111
7.111111111111111111111111
>>> z = 2+3j
>>> hyp3f2(0.5,1,1.5,2,2,z)
(0.7621440939243342419729144 + 0.4249117735058037649915723j)
>>> 4*(pi-2*ellipe(z))/(pi*z)
(0.7621440939243342419729144 + 0.4249117735058037649915723j)
"""
hyperu = r"""
Gives the Tricomi confluent hypergeometric function `U`, also known as
the Kummer or confluent hypergeometric function of the second kind. This
function gives a second linearly independent solution to the confluent
hypergeometric differential equation (the first is provided by `\,_1F_1` --
see :func:`~mpmath.hyp1f1`).
**Examples**
Evaluation for arbitrary complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyperu(2,3,4)
0.0625
>>> hyperu(0.25, 5, 1000)
0.1779949416140579573763523
>>> hyperu(0.25, 5, -1000)
(0.1256256609322773150118907 - 0.1256256609322773150118907j)
The `U` function may be singular at `z = 0`::
>>> hyperu(1.5, 2, 0)
+inf
>>> hyperu(1.5, -2, 0)
0.1719434921288400112603671
Verifying the differential equation::
>>> a, b = 1.5, 2
>>> f = lambda z: hyperu(a,b,z)
>>> for z in [-10, 3, 3+4j]:
... chop(z*diff(f,z,2) + (b-z)*diff(f,z) - a*f(z))
...
0.0
0.0
0.0
An integral representation::
>>> a,b,z = 2, 3.5, 4.25
>>> hyperu(a,b,z)
0.06674960718150520648014567
>>> quad(lambda t: exp(-z*t)*t**(a-1)*(1+t)**(b-a-1),[0,inf]) / gamma(a)
0.06674960718150520648014567
[1] http://www.math.ucla.edu/~cbm/aands/page_504.htm
"""
hyp2f0 = r"""
Gives the hypergeometric function `\,_2F_0`, defined formally by the
series
.. math ::
\,_2F_0(a,b;;z) = \sum_{n=0}^{\infty} (a)_n (b)_n \frac{z^n}{n!}.
This series usually does not converge. For small enough `z`, it can be viewed
as an asymptotic series that may be summed directly with an appropriate
truncation. When this is not the case, :func:`~mpmath.hyp2f0` gives a regularized sum,
or equivalently, it uses a representation in terms of the
hypergeometric U function [1]. The series also converges when either `a` or `b`
is a nonpositive integer, as it then terminates into a polynomial
after `-a` or `-b` terms.
**Examples**
Evaluation is supported for arbitrary complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hyp2f0((2,3), 1.25, -100)
0.07095851870980052763312791
>>> hyp2f0((2,3), 1.25, 100)
(-0.03254379032170590665041131 + 0.07269254613282301012735797j)
>>> hyp2f0(-0.75, 1-j, 4j)
(-0.3579987031082732264862155 - 3.052951783922142735255881j)
Even with real arguments, the regularized value of 2F0 is often complex-valued,
but the imaginary part decreases exponentially as `z \to 0`. In the following
example, the first call uses complex evaluation while the second has a small
enough `z` to evaluate using the direct series and thus the returned value
is strictly real (this should be taken to indicate that the imaginary
part is less than ``eps``)::
>>> mp.dps = 15
>>> hyp2f0(1.5, 0.5, 0.05)
(1.04166637647907 + 8.34584913683906e-8j)
>>> hyp2f0(1.5, 0.5, 0.0005)
1.00037535207621
The imaginary part can be retrieved by increasing the working precision::
>>> mp.dps = 80
>>> nprint(hyp2f0(1.5, 0.5, 0.009).imag)
1.23828e-46
In the polynomial case (the series terminating), 2F0 can evaluate exactly::
>>> mp.dps = 15
>>> hyp2f0(-6,-6,2)
291793.0
>>> identify(hyp2f0(-2,1,0.25))
'(5/8)'
The coefficients of the polynomials can be recovered using Taylor expansion::
>>> nprint(taylor(lambda x: hyp2f0(-3,0.5,x), 0, 10))
[1.0, -1.5, 2.25, -1.875, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
>>> nprint(taylor(lambda x: hyp2f0(-4,0.5,x), 0, 10))
[1.0, -2.0, 4.5, -7.5, 6.5625, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
[1] http://www.math.ucla.edu/~cbm/aands/page_504.htm
"""
gammainc = r"""
``gammainc(z, a=0, b=inf)`` computes the (generalized) incomplete
gamma function with integration limits `[a, b]`:
.. math ::
\Gamma(z,a,b) = \int_a^b t^{z-1} e^{-t} \, dt
The generalized incomplete gamma function reduces to the
following special cases when one or both endpoints are fixed:
* `\Gamma(z,0,\infty)` is the standard ("complete")
gamma function, `\Gamma(z)` (available directly
as the mpmath function :func:`~mpmath.gamma`)
* `\Gamma(z,a,\infty)` is the "upper" incomplete gamma
function, `\Gamma(z,a)`
* `\Gamma(z,0,b)` is the "lower" incomplete gamma
function, `\gamma(z,b)`.
Of course, we have
`\Gamma(z,0,x) + \Gamma(z,x,\infty) = \Gamma(z)`
for all `z` and `x`.
Note however that some authors reverse the order of the
arguments when defining the lower and upper incomplete
gamma function, so one should be careful to get the correct
definition.
If also given the keyword argument ``regularized=True``,
:func:`~mpmath.gammainc` computes the "regularized" incomplete gamma
function
.. math ::
P(z,a,b) = \frac{\Gamma(z,a,b)}{\Gamma(z)}.
**Examples**
We can compare with numerical quadrature to verify that
:func:`~mpmath.gammainc` computes the integral in the definition::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> gammainc(2+3j, 4, 10)
(0.00977212668627705160602312 - 0.0770637306312989892451977j)
>>> quad(lambda t: t**(2+3j-1) * exp(-t), [4, 10])
(0.00977212668627705160602312 - 0.0770637306312989892451977j)
Argument symmetries follow directly from the integral definition::
>>> gammainc(3, 4, 5) + gammainc(3, 5, 4)
0.0
>>> gammainc(3,0,2) + gammainc(3,2,4); gammainc(3,0,4)
1.523793388892911312363331
1.523793388892911312363331
>>> findroot(lambda z: gammainc(2,z,3), 1)
3.0
Evaluation for arbitrarily large arguments::
>>> gammainc(10, 100)
4.083660630910611272288592e-26
>>> gammainc(10, 10000000000000000)
5.290402449901174752972486e-4342944819032375
>>> gammainc(3+4j, 1000000+1000000j)
(-1.257913707524362408877881e-434284 + 2.556691003883483531962095e-434284j)
Evaluation of a generalized incomplete gamma function automatically chooses
the representation that gives a more accurate result, depending on which
parameter is larger::
>>> gammainc(10000000, 3) - gammainc(10000000, 2) # Bad
0.0
>>> gammainc(10000000, 2, 3) # Good
1.755146243738946045873491e+4771204
>>> gammainc(2, 0, 100000001) - gammainc(2, 0, 100000000) # Bad
0.0
>>> gammainc(2, 100000000, 100000001) # Good
4.078258353474186729184421e-43429441
The incomplete gamma functions satisfy simple recurrence
relations::
>>> mp.dps = 25
>>> z, a = mpf(3.5), mpf(2)
>>> gammainc(z+1, a); z*gammainc(z,a) + a**z*exp(-a)
10.60130296933533459267329
10.60130296933533459267329
>>> gammainc(z+1,0,a); z*gammainc(z,0,a) - a**z*exp(-a)
1.030425427232114336470932
1.030425427232114336470932
Evaluation at integers and poles::
>>> gammainc(-3, -4, -5)
(-0.2214577048967798566234192 + 0.0j)
>>> gammainc(-3, 0, 5)
+inf
If `z` is an integer, the recurrence reduces the incomplete gamma
function to `P(a) \exp(-a) + Q(b) \exp(-b)` where `P` and
`Q` are polynomials::
>>> gammainc(1, 2); exp(-2)
0.1353352832366126918939995
0.1353352832366126918939995
>>> mp.dps = 50
>>> identify(gammainc(6, 1, 2), ['exp(-1)', 'exp(-2)'])
'(326*exp(-1) + (-872)*exp(-2))'
The incomplete gamma functions reduce to functions such as
the exponential integral Ei and the error function for special
arguments::
>>> mp.dps = 25
>>> gammainc(0, 4); -ei(-4)
0.00377935240984890647887486
0.00377935240984890647887486
>>> gammainc(0.5, 0, 2); sqrt(pi)*erf(sqrt(2))
1.691806732945198336509541
1.691806732945198336509541
"""
erf = r"""
Computes the error function, `\mathrm{erf}(x)`. The error
function is the normalized antiderivative of the Gaussian function
`\exp(-t^2)`. More precisely,
.. math::
\mathrm{erf}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(-t^2) \,dt
**Basic examples**
Simple values and limits include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> erf(0)
0.0
>>> erf(1)
0.842700792949715
>>> erf(-1)
-0.842700792949715
>>> erf(inf)
1.0
>>> erf(-inf)
-1.0
For large real `x`, `\mathrm{erf}(x)` approaches 1 very
rapidly::
>>> erf(3)
0.999977909503001
>>> erf(5)
0.999999999998463
The error function is an odd function::
>>> nprint(chop(taylor(erf, 0, 5)))
[0.0, 1.12838, 0.0, -0.376126, 0.0, 0.112838]
:func:`~mpmath.erf` implements arbitrary-precision evaluation and
supports complex numbers::
>>> mp.dps = 50
>>> erf(0.5)
0.52049987781304653768274665389196452873645157575796
>>> mp.dps = 25
>>> erf(1+j)
(1.316151281697947644880271 + 0.1904534692378346862841089j)
Evaluation is supported for large arguments::
>>> mp.dps = 25
>>> erf('1e1000')
1.0
>>> erf('-1e1000')
-1.0
>>> erf('1e-1000')
1.128379167095512573896159e-1000
>>> erf('1e7j')
(0.0 + 8.593897639029319267398803e+43429448190317j)
>>> erf('1e7+1e7j')
(0.9999999858172446172631323 + 3.728805278735270407053139e-8j)
**Related functions**
See also :func:`~mpmath.erfc`, which is more accurate for large `x`,
and :func:`~mpmath.erfi` which gives the antiderivative of
`\exp(t^2)`.
The Fresnel integrals :func:`~mpmath.fresnels` and :func:`~mpmath.fresnelc`
are also related to the error function.
"""
erfc = r"""
Computes the complementary error function,
`\mathrm{erfc}(x) = 1-\mathrm{erf}(x)`.
This function avoids cancellation that occurs when naively
computing the complementary error function as ``1-erf(x)``::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> 1 - erf(10)
0.0
>>> erfc(10)
2.08848758376254e-45
:func:`~mpmath.erfc` works accurately even for ludicrously large
arguments::
>>> erfc(10**10)
4.3504398860243e-43429448190325182776
Complex arguments are supported::
>>> erfc(500+50j)
(1.19739830969552e-107492 + 1.46072418957528e-107491j)
"""
erfi = r"""
Computes the imaginary error function, `\mathrm{erfi}(x)`.
The imaginary error function is defined in analogy with the
error function, but with a positive sign in the integrand:
.. math ::
\mathrm{erfi}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(t^2) \,dt
Whereas the error function rapidly converges to 1 as `x` grows,
the imaginary error function rapidly diverges to infinity.
The functions are related as
`\mathrm{erfi}(x) = -i\,\mathrm{erf}(ix)` for all complex
numbers `x`.
**Examples**
Basic values and limits::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> erfi(0)
0.0
>>> erfi(1)
1.65042575879754
>>> erfi(-1)
-1.65042575879754
>>> erfi(inf)
+inf
>>> erfi(-inf)
-inf
Note the symmetry between erf and erfi::
>>> erfi(3j)
(0.0 + 0.999977909503001j)
>>> erf(3)
0.999977909503001
>>> erf(1+2j)
(-0.536643565778565 - 5.04914370344703j)
>>> erfi(2+1j)
(-5.04914370344703 - 0.536643565778565j)
Large arguments are supported::
>>> erfi(1000)
1.71130938718796e+434291
>>> erfi(10**10)
7.3167287567024e+43429448190325182754
>>> erfi(-10**10)
-7.3167287567024e+43429448190325182754
>>> erfi(1000-500j)
(2.49895233563961e+325717 + 2.6846779342253e+325717j)
>>> erfi(100000j)
(0.0 + 1.0j)
>>> erfi(-100000j)
(0.0 - 1.0j)
"""
erfinv = r"""
Computes the inverse error function, satisfying
.. math ::
\mathrm{erf}(\mathrm{erfinv}(x)) =
\mathrm{erfinv}(\mathrm{erf}(x)) = x.
This function is defined only for `-1 \le x \le 1`.
**Examples**
Special values include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> erfinv(0)
0.0
>>> erfinv(1)
+inf
>>> erfinv(-1)
-inf
The domain is limited to the standard interval::
>>> erfinv(2)
Traceback (most recent call last):
...
ValueError: erfinv(x) is defined only for -1 <= x <= 1
It is simple to check that :func:`~mpmath.erfinv` computes inverse values of
:func:`~mpmath.erf` as promised::
>>> erf(erfinv(0.75))
0.75
>>> erf(erfinv(-0.995))
-0.995
:func:`~mpmath.erfinv` supports arbitrary-precision evaluation::
>>> mp.dps = 50
>>> x = erf(2)
>>> x
0.99532226501895273416206925636725292861089179704006
>>> erfinv(x)
2.0
A definite integral involving the inverse error function::
>>> mp.dps = 15
>>> quad(erfinv, [0, 1])
0.564189583547756
>>> 1/sqrt(pi)
0.564189583547756
The inverse error function can be used to generate random numbers
with a Gaussian distribution (although this is a relatively
inefficient algorithm)::
>>> nprint([erfinv(2*rand()-1) for n in range(6)]) # doctest: +SKIP
[-0.586747, 1.10233, -0.376796, 0.926037, -0.708142, -0.732012]
"""
npdf = r"""
``npdf(x, mu=0, sigma=1)`` evaluates the probability density
function of a normal distribution with mean value `\mu`
and variance `\sigma^2`.
Elementary properties of the probability distribution can
be verified using numerical integration::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> quad(npdf, [-inf, inf])
1.0
>>> quad(lambda x: npdf(x, 3), [3, inf])
0.5
>>> quad(lambda x: npdf(x, 3, 2), [3, inf])
0.5
See also :func:`~mpmath.ncdf`, which gives the cumulative
distribution.
"""
ncdf = r"""
``ncdf(x, mu=0, sigma=1)`` evaluates the cumulative distribution
function of a normal distribution with mean value `\mu`
and variance `\sigma^2`.
See also :func:`~mpmath.npdf`, which gives the probability density.
Elementary properties include::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> ncdf(pi, mu=pi)
0.5
>>> ncdf(-inf)
0.0
>>> ncdf(+inf)
1.0
The cumulative distribution is the integral of the density
function having identical mu and sigma::
>>> mp.dps = 15
>>> diff(ncdf, 2)
0.053990966513188
>>> npdf(2)
0.053990966513188
>>> diff(lambda x: ncdf(x, 1, 0.5), 0)
0.107981933026376
>>> npdf(0, 1, 0.5)
0.107981933026376
"""
expint = r"""
:func:`~mpmath.expint(n,z)` gives the generalized exponential integral
or En-function,
.. math ::
\mathrm{E}_n(z) = \int_1^{\infty} \frac{e^{-zt}}{t^n} dt,
where `n` and `z` may both be complex numbers. The case with `n = 1` is
also given by :func:`~mpmath.e1`.
**Examples**
Evaluation at real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> expint(1, 6.25)
0.0002704758872637179088496194
>>> expint(-3, 2+3j)
(0.00299658467335472929656159 + 0.06100816202125885450319632j)
>>> expint(2+3j, 4-5j)
(0.001803529474663565056945248 - 0.002235061547756185403349091j)
At negative integer values of `n`, `E_n(z)` reduces to a
rational-exponential function::
>>> f = lambda n, z: fac(n)*sum(z**k/fac(k-1) for k in range(1,n+2))/\
... exp(z)/z**(n+2)
>>> n = 3
>>> z = 1/pi
>>> expint(-n,z)
584.2604820613019908668219
>>> f(n,z)
584.2604820613019908668219
>>> n = 5
>>> expint(-n,z)
115366.5762594725451811138
>>> f(n,z)
115366.5762594725451811138
"""
e1 = r"""
Computes the exponential integral `\mathrm{E}_1(z)`, given by
.. math ::
\mathrm{E}_1(z) = \int_z^{\infty} \frac{e^{-t}}{t} dt.
This is equivalent to :func:`~mpmath.expint` with `n = 1`.
**Examples**
Two ways to evaluate this function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> e1(6.25)
0.0002704758872637179088496194
>>> expint(1,6.25)
0.0002704758872637179088496194
The E1-function is essentially the same as the Ei-function (:func:`~mpmath.ei`)
with negated argument, except for an imaginary branch cut term::
>>> e1(2.5)
0.02491491787026973549562801
>>> -ei(-2.5)
0.02491491787026973549562801
>>> e1(-2.5)
(-7.073765894578600711923552 - 3.141592653589793238462643j)
>>> -ei(2.5)
-7.073765894578600711923552
"""
ei = r"""
Computes the exponential integral or Ei-function, `\mathrm{Ei}(x)`.
The exponential integral is defined as
.. math ::
\mathrm{Ei}(x) = \int_{-\infty\,}^x \frac{e^t}{t} \, dt.
When the integration range includes `t = 0`, the exponential
integral is interpreted as providing the Cauchy principal value.
For real `x`, the Ei-function behaves roughly like
`\mathrm{Ei}(x) \approx \exp(x) + \log(|x|)`.
The Ei-function is related to the more general family of exponential
integral functions denoted by `E_n`, which are available as :func:`~mpmath.expint`.
**Basic examples**
Some basic values and limits are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> ei(0)
-inf
>>> ei(1)
1.89511781635594
>>> ei(inf)
+inf
>>> ei(-inf)
0.0
For `x < 0`, the defining integral can be evaluated
numerically as a reference::
>>> ei(-4)
-0.00377935240984891
>>> quad(lambda t: exp(t)/t, [-inf, -4])
-0.00377935240984891
:func:`~mpmath.ei` supports complex arguments and arbitrary
precision evaluation::
>>> mp.dps = 50
>>> ei(pi)
10.928374389331410348638445906907535171566338835056
>>> mp.dps = 25
>>> ei(3+4j)
(-4.154091651642689822535359 + 4.294418620024357476985535j)
**Related functions**
The exponential integral is closely related to the logarithmic
integral. See :func:`~mpmath.li` for additional information.
The exponential integral is related to the hyperbolic
and trigonometric integrals (see :func:`~mpmath.chi`, :func:`~mpmath.shi`,
:func:`~mpmath.ci`, :func:`~mpmath.si`) similarly to how the ordinary
exponential function is related to the hyperbolic and
trigonometric functions::
>>> mp.dps = 15
>>> ei(3)
9.93383257062542
>>> chi(3) + shi(3)
9.93383257062542
>>> chop(ci(3j) - j*si(3j) - pi*j/2)
9.93383257062542
Beware that logarithmic corrections, as in the last example
above, are required to obtain the correct branch in general.
For details, see [1].
The exponential integral is also a special case of the
hypergeometric function `\,_2F_2`::
>>> z = 0.6
>>> z*hyper([1,1],[2,2],z) + (ln(z)-ln(1/z))/2 + euler
0.769881289937359
>>> ei(z)
0.769881289937359
**References**
1. Relations between Ei and other functions:
http://functions.wolfram.com/GammaBetaErf/ExpIntegralEi/27/01/
2. Abramowitz & Stegun, section 5:
http://www.math.sfu.ca/~cbm/aands/page_228.htm
3. Asymptotic expansion for Ei:
http://mathworld.wolfram.com/En-Function.html
"""
li = r"""
Computes the logarithmic integral or li-function
`\mathrm{li}(x)`, defined by
.. math ::
\mathrm{li}(x) = \int_0^x \frac{1}{\log t} \, dt
The logarithmic integral has a singularity at `x = 1`.
Alternatively, ``li(x, offset=True)`` computes the offset
logarithmic integral (used in number theory)
.. math ::
\mathrm{Li}(x) = \int_2^x \frac{1}{\log t} \, dt.
These two functions are related via the simple identity
`\mathrm{Li}(x) = \mathrm{li}(x) - \mathrm{li}(2)`.
The logarithmic integral should also not be confused with
the polylogarithm (also denoted by Li), which is implemented
as :func:`~mpmath.polylog`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 30; mp.pretty = True
>>> li(0)
0.0
>>> li(1)
-inf
>>> li(1)
-inf
>>> li(2)
1.04516378011749278484458888919
>>> findroot(li, 2)
1.45136923488338105028396848589
>>> li(inf)
+inf
>>> li(2, offset=True)
0.0
>>> li(1, offset=True)
-inf
>>> li(0, offset=True)
-1.04516378011749278484458888919
>>> li(10, offset=True)
5.12043572466980515267839286347
The logarithmic integral can be evaluated for arbitrary
complex arguments::
>>> mp.dps = 20
>>> li(3+4j)
(3.1343755504645775265 + 2.6769247817778742392j)
The logarithmic integral is related to the exponential integral::
>>> ei(log(3))
2.1635885946671919729
>>> li(3)
2.1635885946671919729
The logarithmic integral grows like `O(x/\log(x))`::
>>> mp.dps = 15
>>> x = 10**100
>>> x/log(x)
4.34294481903252e+97
>>> li(x)
4.3619719871407e+97
The prime number theorem states that the number of primes less
than `x` is asymptotic to `\mathrm{Li}(x)` (equivalently
`\mathrm{li}(x)`). For example, it is known that there are
exactly 1,925,320,391,606,803,968,923 prime numbers less than
`10^{23}` [1]. The logarithmic integral provides a very
accurate estimate::
>>> li(10**23, offset=True)
1.92532039161405e+21
A definite integral is::
>>> quad(li, [0, 1])
-0.693147180559945
>>> -ln(2)
-0.693147180559945
**References**
1. http://mathworld.wolfram.com/PrimeCountingFunction.html
2. http://mathworld.wolfram.com/LogarithmicIntegral.html
"""
ci = r"""
Computes the cosine integral,
.. math ::
\mathrm{Ci}(x) = -\int_x^{\infty} \frac{\cos t}{t}\,dt
= \gamma + \log x + \int_0^x \frac{\cos t - 1}{t}\,dt
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ci(0)
-inf
>>> ci(1)
0.3374039229009681346626462
>>> ci(pi)
0.07366791204642548599010096
>>> ci(inf)
0.0
>>> ci(-inf)
(0.0 + 3.141592653589793238462643j)
>>> ci(2+3j)
(1.408292501520849518759125 - 2.983617742029605093121118j)
The cosine integral behaves roughly like the sinc function
(see :func:`~mpmath.sinc`) for large real `x`::
>>> ci(10**10)
-4.875060251748226537857298e-11
>>> sinc(10**10)
-4.875060250875106915277943e-11
>>> chop(limit(ci, inf))
0.0
It has infinitely many roots on the positive real axis::
>>> findroot(ci, 1)
0.6165054856207162337971104
>>> findroot(ci, 2)
3.384180422551186426397851
Evaluation is supported for `z` anywhere in the complex plane::
>>> ci(10**6*(1+j))
(4.449410587611035724984376e+434287 + 9.75744874290013526417059e+434287j)
We can evaluate the defining integral as a reference::
>>> mp.dps = 15
>>> -quadosc(lambda t: cos(t)/t, [5, inf], omega=1)
-0.190029749656644
>>> ci(5)
-0.190029749656644
Some infinite series can be evaluated using the
cosine integral::
>>> nsum(lambda k: (-1)**k/(fac(2*k)*(2*k)), [1,inf])
-0.239811742000565
>>> ci(1) - euler
-0.239811742000565
"""
si = r"""
Computes the sine integral,
.. math ::
\mathrm{Si}(x) = \int_0^x \frac{\sin t}{t}\,dt.
The sine integral is thus the antiderivative of the sinc
function (see :func:`~mpmath.sinc`).
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> si(0)
0.0
>>> si(1)
0.9460830703671830149413533
>>> si(-1)
-0.9460830703671830149413533
>>> si(pi)
1.851937051982466170361053
>>> si(inf)
1.570796326794896619231322
>>> si(-inf)
-1.570796326794896619231322
>>> si(2+3j)
(4.547513889562289219853204 + 1.399196580646054789459839j)
The sine integral approaches `\pi/2` for large real `x`::
>>> si(10**10)
1.570796326707584656968511
>>> pi/2
1.570796326794896619231322
Evaluation is supported for `z` anywhere in the complex plane::
>>> si(10**6*(1+j))
(-9.75744874290013526417059e+434287 + 4.449410587611035724984376e+434287j)
We can evaluate the defining integral as a reference::
>>> mp.dps = 15
>>> quad(sinc, [0, 5])
1.54993124494467
>>> si(5)
1.54993124494467
Some infinite series can be evaluated using the
sine integral::
>>> nsum(lambda k: (-1)**k/(fac(2*k+1)*(2*k+1)), [0,inf])
0.946083070367183
>>> si(1)
0.946083070367183
"""
chi = r"""
Computes the hyperbolic cosine integral, defined
in analogy with the cosine integral (see :func:`~mpmath.ci`) as
.. math ::
\mathrm{Chi}(x) = -\int_x^{\infty} \frac{\cosh t}{t}\,dt
= \gamma + \log x + \int_0^x \frac{\cosh t - 1}{t}\,dt
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> chi(0)
-inf
>>> chi(1)
0.8378669409802082408946786
>>> chi(inf)
+inf
>>> findroot(chi, 0.5)
0.5238225713898644064509583
>>> chi(2+3j)
(-0.1683628683277204662429321 + 2.625115880451325002151688j)
Evaluation is supported for `z` anywhere in the complex plane::
>>> chi(10**6*(1+j))
(4.449410587611035724984376e+434287 - 9.75744874290013526417059e+434287j)
"""
shi = r"""
Computes the hyperbolic sine integral, defined
in analogy with the sine integral (see :func:`~mpmath.si`) as
.. math ::
\mathrm{Shi}(x) = \int_0^x \frac{\sinh t}{t}\,dt.
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> shi(0)
0.0
>>> shi(1)
1.057250875375728514571842
>>> shi(-1)
-1.057250875375728514571842
>>> shi(inf)
+inf
>>> shi(2+3j)
(-0.1931890762719198291678095 + 2.645432555362369624818525j)
Evaluation is supported for `z` anywhere in the complex plane::
>>> shi(10**6*(1+j))
(4.449410587611035724984376e+434287 - 9.75744874290013526417059e+434287j)
"""
fresnels = r"""
Computes the Fresnel sine integral
.. math ::
S(x) = \int_0^x \sin\left(\frac{\pi t^2}{2}\right) \,dt
Note that some sources define this function
without the normalization factor `\pi/2`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> fresnels(0)
0.0
>>> fresnels(inf)
0.5
>>> fresnels(-inf)
-0.5
>>> fresnels(1)
0.4382591473903547660767567
>>> fresnels(1+2j)
(36.72546488399143842838788 + 15.58775110440458732748279j)
Comparing with the definition::
>>> fresnels(3)
0.4963129989673750360976123
>>> quad(lambda t: sin(pi*t**2/2), [0,3])
0.4963129989673750360976123
"""
fresnelc = r"""
Computes the Fresnel cosine integral
.. math ::
C(x) = \int_0^x \cos\left(\frac{\pi t^2}{2}\right) \,dt
Note that some sources define this function
without the normalization factor `\pi/2`.
**Examples**
Some basic values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> fresnelc(0)
0.0
>>> fresnelc(inf)
0.5
>>> fresnelc(-inf)
-0.5
>>> fresnelc(1)
0.7798934003768228294742064
>>> fresnelc(1+2j)
(16.08787137412548041729489 - 36.22568799288165021578758j)
Comparing with the definition::
>>> fresnelc(3)
0.6057207892976856295561611
>>> quad(lambda t: cos(pi*t**2/2), [0,3])
0.6057207892976856295561611
"""
airyai = r"""
Computes the Airy function `\operatorname{Ai}(z)`, which is
the solution of the Airy differential equation `f''(z) - z f(z) = 0`
with initial conditions
.. math ::
\operatorname{Ai}(0) =
\frac{1}{3^{2/3}\Gamma\left(\frac{2}{3}\right)}
\operatorname{Ai}'(0) =
-\frac{1}{3^{1/3}\Gamma\left(\frac{1}{3}\right)}.
Other common ways of defining the Ai-function include
integrals such as
.. math ::
\operatorname{Ai}(x) = \frac{1}{\pi}
\int_0^{\infty} \cos\left(\frac{1}{3}t^3+xt\right) dt
\qquad x \in \mathbb{R}
\operatorname{Ai}(z) = \frac{\sqrt{3}}{2\pi}
\int_0^{\infty}
\exp\left(-\frac{t^3}{3}-\frac{z^3}{3t^3}\right) dt.
The Ai-function is an entire function with a turning point,
behaving roughly like a slowly decaying sine wave for `z < 0` and
like a rapidly decreasing exponential for `z > 0`.
A second solution of the Airy differential equation
is given by `\operatorname{Bi}(z)` (see :func:`~mpmath.airybi`).
Optionally, with *derivative=alpha*, :func:`airyai` can compute the
`\alpha`-th order fractional derivative with respect to `z`.
For `\alpha = n = 1,2,3,\ldots` this gives the derivative
`\operatorname{Ai}^{(n)}(z)`, and for `\alpha = -n = -1,-2,-3,\ldots`
this gives the `n`-fold iterated integral
.. math ::
f_0(z) = \operatorname{Ai}(z)
f_n(z) = \int_0^z f_{n-1}(t) dt.
The Ai-function has infinitely many zeros, all located along the
negative half of the real axis. They can be computed with
:func:`~mpmath.airyaizero`.
**Plots**
.. literalinclude :: /modules/mpmath/plots/ai.py
.. image :: /modules/mpmath/plots/ai.png
.. literalinclude :: /modules/mpmath/plots/ai_c.py
.. image :: /modules/mpmath/plots/ai_c.png
**Basic examples**
Limits and values include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> airyai(0); 1/(power(3,'2/3')*gamma('2/3'))
0.3550280538878172392600632
0.3550280538878172392600632
>>> airyai(1)
0.1352924163128814155241474
>>> airyai(-1)
0.5355608832923521187995166
>>> airyai(inf); airyai(-inf)
0.0
0.0
Evaluation is supported for large magnitudes of the argument::
>>> airyai(-100)
0.1767533932395528780908311
>>> airyai(100)
2.634482152088184489550553e-291
>>> airyai(50+50j)
(-5.31790195707456404099817e-68 - 1.163588003770709748720107e-67j)
>>> airyai(-50+50j)
(1.041242537363167632587245e+158 + 3.347525544923600321838281e+157j)
Huge arguments are also fine::
>>> airyai(10**10)
1.162235978298741779953693e-289529654602171
>>> airyai(-10**10)
0.0001736206448152818510510181
>>> w = airyai(10**10*(1+j))
>>> w.real
5.711508683721355528322567e-186339621747698
>>> w.imag
1.867245506962312577848166e-186339621747697
The first root of the Ai-function is::
>>> findroot(airyai, -2)
-2.338107410459767038489197
>>> airyaizero(1)
-2.338107410459767038489197
**Properties and relations**
Verifying the Airy differential equation::
>>> for z in [-3.4, 0, 2.5, 1+2j]:
... chop(airyai(z,2) - z*airyai(z))
...
0.0
0.0
0.0
0.0
The first few terms of the Taylor series expansion around `z = 0`
(every third term is zero)::
>>> nprint(taylor(airyai, 0, 5))
[0.355028, -0.258819, 0.0, 0.0591713, -0.0215683, 0.0]
The Airy functions satisfy the Wronskian relation
`\operatorname{Ai}(z) \operatorname{Bi}'(z) -
\operatorname{Ai}'(z) \operatorname{Bi}(z) = 1/\pi`::
>>> z = -0.5
>>> airyai(z)*airybi(z,1) - airyai(z,1)*airybi(z)
0.3183098861837906715377675
>>> 1/pi
0.3183098861837906715377675
The Airy functions can be expressed in terms of Bessel
functions of order `\pm 1/3`. For `\Re[z] \le 0`, we have::
>>> z = -3
>>> airyai(z)
-0.3788142936776580743472439
>>> y = 2*power(-z,'3/2')/3
>>> (sqrt(-z) * (besselj('1/3',y) + besselj('-1/3',y)))/3
-0.3788142936776580743472439
**Derivatives and integrals**
Derivatives of the Ai-function (directly and using :func:`~mpmath.diff`)::
>>> airyai(-3,1); diff(airyai,-3)
0.3145837692165988136507873
0.3145837692165988136507873
>>> airyai(-3,2); diff(airyai,-3,2)
1.136442881032974223041732
1.136442881032974223041732
>>> airyai(1000,1); diff(airyai,1000)
-2.943133917910336090459748e-9156
-2.943133917910336090459748e-9156
Several derivatives at `z = 0`::
>>> airyai(0,0); airyai(0,1); airyai(0,2)
0.3550280538878172392600632
-0.2588194037928067984051836
0.0
>>> airyai(0,3); airyai(0,4); airyai(0,5)
0.3550280538878172392600632
-0.5176388075856135968103671
0.0
>>> airyai(0,15); airyai(0,16); airyai(0,17)
1292.30211615165475090663
-3188.655054727379756351861
0.0
The integral of the Ai-function::
>>> airyai(3,-1); quad(airyai, [0,3])
0.3299203760070217725002701
0.3299203760070217725002701
>>> airyai(-10,-1); quad(airyai, [0,-10])
-0.765698403134212917425148
-0.765698403134212917425148
Integrals of high or fractional order::
>>> airyai(-2,0.5); differint(airyai,-2,0.5,0)
(0.0 + 0.2453596101351438273844725j)
(0.0 + 0.2453596101351438273844725j)
>>> airyai(-2,-4); differint(airyai,-2,-4,0)
0.2939176441636809580339365
0.2939176441636809580339365
>>> airyai(0,-1); airyai(0,-2); airyai(0,-3)
0.0
0.0
0.0
Integrals of the Ai-function can be evaluated at limit points::
>>> airyai(-1000000,-1); airyai(-inf,-1)
-0.6666843728311539978751512
-0.6666666666666666666666667
>>> airyai(10,-1); airyai(+inf,-1)
0.3333333332991690159427932
0.3333333333333333333333333
>>> airyai(+inf,-2); airyai(+inf,-3)
+inf
+inf
>>> airyai(-1000000,-2); airyai(-inf,-2)
666666.4078472650651209742
+inf
>>> airyai(-1000000,-3); airyai(-inf,-3)
-333333074513.7520264995733
-inf
**References**
1. [DLMF]_ Chapter 9: Airy and Related Functions
2. [WolframFunctions]_ section: Bessel-Type Functions
"""
airybi = r"""
Computes the Airy function `\operatorname{Bi}(z)`, which is
the solution of the Airy differential equation `f''(z) - z f(z) = 0`
with initial conditions
.. math ::
\operatorname{Bi}(0) =
\frac{1}{3^{1/6}\Gamma\left(\frac{2}{3}\right)}
\operatorname{Bi}'(0) =
\frac{3^{1/6}}{\Gamma\left(\frac{1}{3}\right)}.
Like the Ai-function (see :func:`~mpmath.airyai`), the Bi-function
is oscillatory for `z < 0`, but it grows rather than decreases
for `z > 0`.
Optionally, as for :func:`~mpmath.airyai`, derivatives, integrals
and fractional derivatives can be computed with the *derivative*
parameter.
The Bi-function has infinitely many zeros along the negative
half-axis, as well as complex zeros, which can all be computed
with :func:`~mpmath.airybizero`.
**Plots**
.. literalinclude :: /modules/mpmath/plots/bi.py
.. image :: /modules/mpmath/plots/bi.png
.. literalinclude :: /modules/mpmath/plots/bi_c.py
.. image :: /modules/mpmath/plots/bi_c.png
**Basic examples**
Limits and values include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> airybi(0); 1/(power(3,'1/6')*gamma('2/3'))
0.6149266274460007351509224
0.6149266274460007351509224
>>> airybi(1)
1.207423594952871259436379
>>> airybi(-1)
0.10399738949694461188869
>>> airybi(inf); airybi(-inf)
+inf
0.0
Evaluation is supported for large magnitudes of the argument::
>>> airybi(-100)
0.02427388768016013160566747
>>> airybi(100)
6.041223996670201399005265e+288
>>> airybi(50+50j)
(-5.322076267321435669290334e+63 + 1.478450291165243789749427e+65j)
>>> airybi(-50+50j)
(-3.347525544923600321838281e+157 + 1.041242537363167632587245e+158j)
Huge arguments::
>>> airybi(10**10)
1.369385787943539818688433e+289529654602165
>>> airybi(-10**10)
0.001775656141692932747610973
>>> w = airybi(10**10*(1+j))
>>> w.real
-6.559955931096196875845858e+186339621747689
>>> w.imag
-6.822462726981357180929024e+186339621747690
The first real root of the Bi-function is::
>>> findroot(airybi, -1); airybizero(1)
-1.17371322270912792491998
-1.17371322270912792491998
**Properties and relations**
Verifying the Airy differential equation::
>>> for z in [-3.4, 0, 2.5, 1+2j]:
... chop(airybi(z,2) - z*airybi(z))
...
0.0
0.0
0.0
0.0
The first few terms of the Taylor series expansion around `z = 0`
(every third term is zero)::
>>> nprint(taylor(airybi, 0, 5))
[0.614927, 0.448288, 0.0, 0.102488, 0.0373574, 0.0]
The Airy functions can be expressed in terms of Bessel
functions of order `\pm 1/3`. For `\Re[z] \le 0`, we have::
>>> z = -3
>>> airybi(z)
-0.1982896263749265432206449
>>> p = 2*power(-z,'3/2')/3
>>> sqrt(-mpf(z)/3)*(besselj('-1/3',p) - besselj('1/3',p))
-0.1982896263749265432206449
**Derivatives and integrals**
Derivatives of the Bi-function (directly and using :func:`~mpmath.diff`)::
>>> airybi(-3,1); diff(airybi,-3)
-0.675611222685258537668032
-0.675611222685258537668032
>>> airybi(-3,2); diff(airybi,-3,2)
0.5948688791247796296619346
0.5948688791247796296619346
>>> airybi(1000,1); diff(airybi,1000)
1.710055114624614989262335e+9156
1.710055114624614989262335e+9156
Several derivatives at `z = 0`::
>>> airybi(0,0); airybi(0,1); airybi(0,2)
0.6149266274460007351509224
0.4482883573538263579148237
0.0
>>> airybi(0,3); airybi(0,4); airybi(0,5)
0.6149266274460007351509224
0.8965767147076527158296474
0.0
>>> airybi(0,15); airybi(0,16); airybi(0,17)
2238.332923903442675949357
5522.912562599140729510628
0.0
The integral of the Bi-function::
>>> airybi(3,-1); quad(airybi, [0,3])
10.06200303130620056316655
10.06200303130620056316655
>>> airybi(-10,-1); quad(airybi, [0,-10])
-0.01504042480614002045135483
-0.01504042480614002045135483
Integrals of high or fractional order::
>>> airybi(-2,0.5); differint(airybi, -2, 0.5, 0)
(0.0 + 0.5019859055341699223453257j)
(0.0 + 0.5019859055341699223453257j)
>>> airybi(-2,-4); differint(airybi,-2,-4,0)
0.2809314599922447252139092
0.2809314599922447252139092
>>> airybi(0,-1); airybi(0,-2); airybi(0,-3)
0.0
0.0
0.0
Integrals of the Bi-function can be evaluated at limit points::
>>> airybi(-1000000,-1); airybi(-inf,-1)
0.000002191261128063434047966873
0.0
>>> airybi(10,-1); airybi(+inf,-1)
147809803.1074067161675853
+inf
>>> airybi(+inf,-2); airybi(+inf,-3)
+inf
+inf
>>> airybi(-1000000,-2); airybi(-inf,-2)
0.4482883750599908479851085
0.4482883573538263579148237
>>> gamma('2/3')*power(3,'2/3')/(2*pi)
0.4482883573538263579148237
>>> airybi(-100000,-3); airybi(-inf,-3)
-44828.52827206932872493133
-inf
>>> airybi(-100000,-4); airybi(-inf,-4)
2241411040.437759489540248
+inf
"""
airyaizero = r"""
Gives the `k`-th zero of the Airy Ai-function,
i.e. the `k`-th number `a_k` ordered by magnitude for which
`\operatorname{Ai}(a_k) = 0`.
Optionally, with *derivative=1*, the corresponding
zero `a'_k` of the derivative function, i.e.
`\operatorname{Ai}'(a'_k) = 0`, is computed.
**Examples**
Some values of `a_k`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> airyaizero(1)
-2.338107410459767038489197
>>> airyaizero(2)
-4.087949444130970616636989
>>> airyaizero(3)
-5.520559828095551059129856
>>> airyaizero(1000)
-281.0315196125215528353364
Some values of `a'_k`::
>>> airyaizero(1,1)
-1.018792971647471089017325
>>> airyaizero(2,1)
-3.248197582179836537875424
>>> airyaizero(3,1)
-4.820099211178735639400616
>>> airyaizero(1000,1)
-280.9378080358935070607097
Verification::
>>> chop(airyai(airyaizero(1)))
0.0
>>> chop(airyai(airyaizero(1,1),1))
0.0
"""
airybizero = r"""
With *complex=False*, gives the `k`-th real zero of the Airy Bi-function,
i.e. the `k`-th number `b_k` ordered by magnitude for which
`\operatorname{Bi}(b_k) = 0`.
With *complex=True*, gives the `k`-th complex zero in the upper
half plane `\beta_k`. Also the conjugate `\overline{\beta_k}`
is a zero.
Optionally, with *derivative=1*, the corresponding
zero `b'_k` or `\beta'_k` of the derivative function, i.e.
`\operatorname{Bi}'(b'_k) = 0` or `\operatorname{Bi}'(\beta'_k) = 0`,
is computed.
**Examples**
Some values of `b_k`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> airybizero(1)
-1.17371322270912792491998
>>> airybizero(2)
-3.271093302836352715680228
>>> airybizero(3)
-4.830737841662015932667709
>>> airybizero(1000)
-280.9378112034152401578834
Some values of `b_k`::
>>> airybizero(1,1)
-2.294439682614123246622459
>>> airybizero(2,1)
-4.073155089071828215552369
>>> airybizero(3,1)
-5.512395729663599496259593
>>> airybizero(1000,1)
-281.0315164471118527161362
Some values of `\beta_k`::
>>> airybizero(1,complex=True)
(0.9775448867316206859469927 + 2.141290706038744575749139j)
>>> airybizero(2,complex=True)
(1.896775013895336346627217 + 3.627291764358919410440499j)
>>> airybizero(3,complex=True)
(2.633157739354946595708019 + 4.855468179979844983174628j)
>>> airybizero(1000,complex=True)
(140.4978560578493018899793 + 243.3907724215792121244867j)
Some values of `\beta'_k`::
>>> airybizero(1,1,complex=True)
(0.2149470745374305676088329 + 1.100600143302797880647194j)
>>> airybizero(2,1,complex=True)
(1.458168309223507392028211 + 2.912249367458445419235083j)
>>> airybizero(3,1,complex=True)
(2.273760763013482299792362 + 4.254528549217097862167015j)
>>> airybizero(1000,1,complex=True)
(140.4509972835270559730423 + 243.3096175398562811896208j)
Verification::
>>> chop(airybi(airybizero(1)))
0.0
>>> chop(airybi(airybizero(1,1),1))
0.0
>>> u = airybizero(1,complex=True)
>>> chop(airybi(u))
0.0
>>> chop(airybi(conj(u)))
0.0
The complex zeros (in the upper and lower half-planes respectively)
asymptotically approach the rays `z = R \exp(\pm i \pi /3)`::
>>> arg(airybizero(1,complex=True))
1.142532510286334022305364
>>> arg(airybizero(1000,complex=True))
1.047271114786212061583917
>>> arg(airybizero(1000000,complex=True))
1.047197624741816183341355
>>> pi/3
1.047197551196597746154214
"""
ellipk = r"""
Evaluates the complete elliptic integral of the first kind,
`K(m)`, defined by
.. math ::
K(m) = \int_0^{\pi/2} \frac{dt}{\sqrt{1-m \sin^2 t}} \, = \,
\frac{\pi}{2} \,_2F_1\left(\frac{1}{2}, \frac{1}{2}, 1, m\right).
Note that the argument is the parameter `m = k^2`,
not the modulus `k` which is sometimes used.
**Plots**
.. literalinclude :: /modules/mpmath/plots/ellipk.py
.. image :: /modules/mpmath/plots/ellipk.png
**Examples**
Values and limits include::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ellipk(0)
1.570796326794896619231322
>>> ellipk(inf)
(0.0 + 0.0j)
>>> ellipk(-inf)
0.0
>>> ellipk(1)
+inf
>>> ellipk(-1)
1.31102877714605990523242
>>> ellipk(2)
(1.31102877714605990523242 - 1.31102877714605990523242j)
Verifying the defining integral and hypergeometric
representation::
>>> ellipk(0.5)
1.85407467730137191843385
>>> quad(lambda t: (1-0.5*sin(t)**2)**-0.5, [0, pi/2])
1.85407467730137191843385
>>> pi/2*hyp2f1(0.5,0.5,1,0.5)
1.85407467730137191843385
Evaluation is supported for arbitrary complex `m`::
>>> ellipk(3+4j)
(0.9111955638049650086562171 + 0.6313342832413452438845091j)
A definite integral::
>>> quad(ellipk, [0, 1])
2.0
"""
agm = r"""
``agm(a, b)`` computes the arithmetic-geometric mean of `a` and
`b`, defined as the limit of the following iteration:
.. math ::
a_0 = a
b_0 = b
a_{n+1} = \frac{a_n+b_n}{2}
b_{n+1} = \sqrt{a_n b_n}
This function can be called with a single argument, computing
`\mathrm{agm}(a,1) = \mathrm{agm}(1,a)`.
**Examples**
It is a well-known theorem that the geometric mean of
two distinct positive numbers is less than the arithmetic
mean. It follows that the arithmetic-geometric mean lies
between the two means::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> a = mpf(3)
>>> b = mpf(4)
>>> sqrt(a*b)
3.46410161513775
>>> agm(a,b)
3.48202767635957
>>> (a+b)/2
3.5
The arithmetic-geometric mean is scale-invariant::
>>> agm(10*e, 10*pi)
29.261085515723
>>> 10*agm(e, pi)
29.261085515723
As an order-of-magnitude estimate, `\mathrm{agm}(1,x) \approx x`
for large `x`::
>>> agm(10**10)
643448704.760133
>>> agm(10**50)
1.34814309345871e+48
For tiny `x`, `\mathrm{agm}(1,x) \approx -\pi/(2 \log(x/4))`::
>>> agm('0.01')
0.262166887202249
>>> -pi/2/log('0.0025')
0.262172347753122
The arithmetic-geometric mean can also be computed for complex
numbers::
>>> agm(3, 2+j)
(2.51055133276184 + 0.547394054060638j)
The AGM iteration converges very quickly (each step doubles
the number of correct digits), so :func:`~mpmath.agm` supports efficient
high-precision evaluation::
>>> mp.dps = 10000
>>> a = agm(1,2)
>>> str(a)[-10:]
'1679581912'
**Mathematical relations**
The arithmetic-geometric mean may be used to evaluate the
following two parametric definite integrals:
.. math ::
I_1 = \int_0^{\infty}
\frac{1}{\sqrt{(x^2+a^2)(x^2+b^2)}} \,dx
I_2 = \int_0^{\pi/2}
\frac{1}{\sqrt{a^2 \cos^2(x) + b^2 \sin^2(x)}} \,dx
We have::
>>> mp.dps = 15
>>> a = 3
>>> b = 4
>>> f1 = lambda x: ((x**2+a**2)*(x**2+b**2))**-0.5
>>> f2 = lambda x: ((a*cos(x))**2 + (b*sin(x))**2)**-0.5
>>> quad(f1, [0, inf])
0.451115405388492
>>> quad(f2, [0, pi/2])
0.451115405388492
>>> pi/(2*agm(a,b))
0.451115405388492
A formula for `\Gamma(1/4)`::
>>> gamma(0.25)
3.62560990822191
>>> sqrt(2*sqrt(2*pi**3)/agm(1,sqrt(2)))
3.62560990822191
**Possible issues**
The branch cut chosen for complex `a` and `b` is somewhat
arbitrary.
"""
gegenbauer = r"""
Evaluates the Gegenbauer polynomial, or ultraspherical polynomial,
.. math ::
C_n^{(a)}(z) = {n+2a-1 \choose n} \,_2F_1\left(-n, n+2a;
a+\frac{1}{2}; \frac{1}{2}(1-z)\right).
When `n` is a nonnegative integer, this formula gives a polynomial
in `z` of degree `n`, but all parameters are permitted to be
complex numbers. With `a = 1/2`, the Gegenbauer polynomial
reduces to a Legendre polynomial.
**Examples**
Evaluation for arbitrary arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> gegenbauer(3, 0.5, -10)
-2485.0
>>> gegenbauer(1000, 10, 100)
3.012757178975667428359374e+2322
>>> gegenbauer(2+3j, -0.75, -1000j)
(-5038991.358609026523401901 + 9414549.285447104177860806j)
Evaluation at negative integer orders::
>>> gegenbauer(-4, 2, 1.75)
-1.0
>>> gegenbauer(-4, 3, 1.75)
0.0
>>> gegenbauer(-4, 2j, 1.75)
0.0
>>> gegenbauer(-7, 0.5, 3)
8989.0
The Gegenbauer polynomials solve the differential equation::
>>> n, a = 4.5, 1+2j
>>> f = lambda z: gegenbauer(n, a, z)
>>> for z in [0, 0.75, -0.5j]:
... chop((1-z**2)*diff(f,z,2) - (2*a+1)*z*diff(f,z) + n*(n+2*a)*f(z))
...
0.0
0.0
0.0
The Gegenbauer polynomials have generating function
`(1-2zt+t^2)^{-a}`::
>>> a, z = 2.5, 1
>>> taylor(lambda t: (1-2*z*t+t**2)**(-a), 0, 3)
[1.0, 5.0, 15.0, 35.0]
>>> [gegenbauer(n,a,z) for n in range(4)]
[1.0, 5.0, 15.0, 35.0]
The Gegenbauer polynomials are orthogonal on `[-1, 1]` with respect
to the weight `(1-z^2)^{a-\frac{1}{2}}`::
>>> a, n, m = 2.5, 4, 5
>>> Cn = lambda z: gegenbauer(n, a, z, zeroprec=1000)
>>> Cm = lambda z: gegenbauer(m, a, z, zeroprec=1000)
>>> chop(quad(lambda z: Cn(z)*Cm(z)*(1-z**2)*(a-0.5), [-1, 1]))
0.0
"""
laguerre = r"""
Gives the generalized (associated) Laguerre polynomial, defined by
.. math ::
L_n^a(z) = \frac{\Gamma(n+b+1)}{\Gamma(b+1) \Gamma(n+1)}
\,_1F_1(-n, a+1, z).
With `a = 0` and `n` a nonnegative integer, this reduces to an ordinary
Laguerre polynomial, the sequence of which begins
`L_0(z) = 1, L_1(z) = 1-z, L_2(z) = z^2-2z+1, \ldots`.
The Laguerre polynomials are orthogonal with respect to the weight
`z^a e^{-z}` on `[0, \infty)`.
**Plots**
.. literalinclude :: /modules/mpmath/plots/laguerre.py
.. image :: /modules/mpmath/plots/laguerre.png
**Examples**
Evaluation for arbitrary arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> laguerre(5, 0, 0.25)
0.03726399739583333333333333
>>> laguerre(1+j, 0.5, 2+3j)
(4.474921610704496808379097 - 11.02058050372068958069241j)
>>> laguerre(2, 0, 10000)
49980001.0
>>> laguerre(2.5, 0, 10000)
-9.327764910194842158583189e+4328
The first few Laguerre polynomials, normalized to have integer
coefficients::
>>> for n in range(7):
... chop(taylor(lambda z: fac(n)*laguerre(n, 0, z), 0, n))
...
[1.0]
[1.0, -1.0]
[2.0, -4.0, 1.0]
[6.0, -18.0, 9.0, -1.0]
[24.0, -96.0, 72.0, -16.0, 1.0]
[120.0, -600.0, 600.0, -200.0, 25.0, -1.0]
[720.0, -4320.0, 5400.0, -2400.0, 450.0, -36.0, 1.0]
Verifying orthogonality::
>>> Lm = lambda t: laguerre(m,a,t)
>>> Ln = lambda t: laguerre(n,a,t)
>>> a, n, m = 2.5, 2, 3
>>> chop(quad(lambda t: exp(-t)*t**a*Lm(t)*Ln(t), [0,inf]))
0.0
"""
hermite = r"""
Evaluates the Hermite polynomial `H_n(z)`, which may be defined using
the recurrence
.. math ::
H_0(z) = 1
H_1(z) = 2z
H_{n+1} = 2z H_n(z) - 2n H_{n-1}(z).
The Hermite polynomials are orthogonal on `(-\infty, \infty)` with
respect to the weight `e^{-z^2}`. More generally, allowing arbitrary complex
values of `n`, the Hermite function `H_n(z)` is defined as
.. math ::
H_n(z) = (2z)^n \,_2F_0\left(-\frac{n}{2}, \frac{1-n}{2},
-\frac{1}{z^2}\right)
for `\Re{z} > 0`, or generally
.. math ::
H_n(z) = 2^n \sqrt{\pi} \left(
\frac{1}{\Gamma\left(\frac{1-n}{2}\right)}
\,_1F_1\left(-\frac{n}{2}, \frac{1}{2}, z^2\right) -
\frac{2z}{\Gamma\left(-\frac{n}{2}\right)}
\,_1F_1\left(\frac{1-n}{2}, \frac{3}{2}, z^2\right)
\right).
**Plots**
.. literalinclude :: /modules/mpmath/plots/hermite.py
.. image :: /modules/mpmath/plots/hermite.png
**Examples**
Evaluation for arbitrary arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hermite(0, 10)
1.0
>>> hermite(1, 10); hermite(2, 10)
20.0
398.0
>>> hermite(10000, 2)
4.950440066552087387515653e+19334
>>> hermite(3, -10**8)
-7999999999999998800000000.0
>>> hermite(-3, -10**8)
1.675159751729877682920301e+4342944819032534
>>> hermite(2+3j, -1+2j)
(-0.07652130602993513389421901 - 0.1084662449961914580276007j)
Coefficients of the first few Hermite polynomials are::
>>> for n in range(7):
... chop(taylor(lambda z: hermite(n, z), 0, n))
...
[1.0]
[0.0, 2.0]
[-2.0, 0.0, 4.0]
[0.0, -12.0, 0.0, 8.0]
[12.0, 0.0, -48.0, 0.0, 16.0]
[0.0, 120.0, 0.0, -160.0, 0.0, 32.0]
[-120.0, 0.0, 720.0, 0.0, -480.0, 0.0, 64.0]
Values at `z = 0`::
>>> for n in range(-5, 9):
... hermite(n, 0)
...
0.02769459142039868792653387
0.08333333333333333333333333
0.2215567313631895034122709
0.5
0.8862269254527580136490837
1.0
0.0
-2.0
0.0
12.0
0.0
-120.0
0.0
1680.0
Hermite functions satisfy the differential equation::
>>> n = 4
>>> f = lambda z: hermite(n, z)
>>> z = 1.5
>>> chop(diff(f,z,2) - 2*z*diff(f,z) + 2*n*f(z))
0.0
Verifying orthogonality::
>>> chop(quad(lambda t: hermite(2,t)*hermite(4,t)*exp(-t**2), [-inf,inf]))
0.0
"""
jacobi = r"""
``jacobi(n, a, b, x)`` evaluates the Jacobi polynomial
`P_n^{(a,b)}(x)`. The Jacobi polynomials are a special
case of the hypergeometric function `\,_2F_1` given by:
.. math ::
P_n^{(a,b)}(x) = {n+a \choose n}
\,_2F_1\left(-n,1+a+b+n,a+1,\frac{1-x}{2}\right).
Note that this definition generalizes to nonintegral values
of `n`. When `n` is an integer, the hypergeometric series
terminates after a finite number of terms, giving
a polynomial in `x`.
**Evaluation of Jacobi polynomials**
A special evaluation is `P_n^{(a,b)}(1) = {n+a \choose n}`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> jacobi(4, 0.5, 0.25, 1)
2.4609375
>>> binomial(4+0.5, 4)
2.4609375
A Jacobi polynomial of degree `n` is equal to its
Taylor polynomial of degree `n`. The explicit
coefficients of Jacobi polynomials can therefore
be recovered easily using :func:`~mpmath.taylor`::
>>> for n in range(5):
... nprint(taylor(lambda x: jacobi(n,1,2,x), 0, n))
...
[1.0]
[-0.5, 2.5]
[-0.75, -1.5, 5.25]
[0.5, -3.5, -3.5, 10.5]
[0.625, 2.5, -11.25, -7.5, 20.625]
For nonintegral `n`, the Jacobi "polynomial" is no longer
a polynomial::
>>> nprint(taylor(lambda x: jacobi(0.5,1,2,x), 0, 4))
[0.309983, 1.84119, -1.26933, 1.26699, -1.34808]
**Orthogonality**
The Jacobi polynomials are orthogonal on the interval
`[-1, 1]` with respect to the weight function
`w(x) = (1-x)^a (1+x)^b`. That is,
`w(x) P_n^{(a,b)}(x) P_m^{(a,b)}(x)` integrates to
zero if `m \ne n` and to a nonzero number if `m = n`.
The orthogonality is easy to verify using numerical
quadrature::
>>> P = jacobi
>>> f = lambda x: (1-x)**a * (1+x)**b * P(m,a,b,x) * P(n,a,b,x)
>>> a = 2
>>> b = 3
>>> m, n = 3, 4
>>> chop(quad(f, [-1, 1]), 1)
0.0
>>> m, n = 4, 4
>>> quad(f, [-1, 1])
1.9047619047619
**Differential equation**
The Jacobi polynomials are solutions of the differential
equation
.. math ::
(1-x^2) y'' + (b-a-(a+b+2)x) y' + n (n+a+b+1) y = 0.
We can verify that :func:`~mpmath.jacobi` approximately satisfies
this equation::
>>> from mpmath import *
>>> mp.dps = 15
>>> a = 2.5
>>> b = 4
>>> n = 3
>>> y = lambda x: jacobi(n,a,b,x)
>>> x = pi
>>> A0 = n*(n+a+b+1)*y(x)
>>> A1 = (b-a-(a+b+2)*x)*diff(y,x)
>>> A2 = (1-x**2)*diff(y,x,2)
>>> nprint(A2 + A1 + A0, 1)
4.0e-12
The difference of order `10^{-12}` is as close to zero as
it could be at 15-digit working precision, since the terms
are large::
>>> A0, A1, A2
(26560.2328981879, -21503.7641037294, -5056.46879445852)
"""
legendre = r"""
``legendre(n, x)`` evaluates the Legendre polynomial `P_n(x)`.
The Legendre polynomials are given by the formula
.. math ::
P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n} (x^2 -1)^n.
Alternatively, they can be computed recursively using
.. math ::
P_0(x) = 1
P_1(x) = x
(n+1) P_{n+1}(x) = (2n+1) x P_n(x) - n P_{n-1}(x).
A third definition is in terms of the hypergeometric function
`\,_2F_1`, whereby they can be generalized to arbitrary `n`:
.. math ::
P_n(x) = \,_2F_1\left(-n, n+1, 1, \frac{1-x}{2}\right)
**Plots**
.. literalinclude :: /modules/mpmath/plots/legendre.py
.. image :: /modules/mpmath/plots/legendre.png
**Basic evaluation**
The Legendre polynomials assume fixed values at the points
`x = -1` and `x = 1`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> nprint([legendre(n, 1) for n in range(6)])
[1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
>>> nprint([legendre(n, -1) for n in range(6)])
[1.0, -1.0, 1.0, -1.0, 1.0, -1.0]
The coefficients of Legendre polynomials can be recovered
using degree-`n` Taylor expansion::
>>> for n in range(5):
... nprint(chop(taylor(lambda x: legendre(n, x), 0, n)))
...
[1.0]
[0.0, 1.0]
[-0.5, 0.0, 1.5]
[0.0, -1.5, 0.0, 2.5]
[0.375, 0.0, -3.75, 0.0, 4.375]
The roots of Legendre polynomials are located symmetrically
on the interval `[-1, 1]`::
>>> for n in range(5):
... nprint(polyroots(taylor(lambda x: legendre(n, x), 0, n)[::-1]))
...
[]
[0.0]
[-0.57735, 0.57735]
[-0.774597, 0.0, 0.774597]
[-0.861136, -0.339981, 0.339981, 0.861136]
An example of an evaluation for arbitrary `n`::
>>> legendre(0.75, 2+4j)
(1.94952805264875 + 2.1071073099422j)
**Orthogonality**
The Legendre polynomials are orthogonal on `[-1, 1]` with respect
to the trivial weight `w(x) = 1`. That is, `P_m(x) P_n(x)`
integrates to zero if `m \ne n` and to `2/(2n+1)` if `m = n`::
>>> m, n = 3, 4
>>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1])
0.0
>>> m, n = 4, 4
>>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1])
0.222222222222222
**Differential equation**
The Legendre polynomials satisfy the differential equation
.. math ::
((1-x^2) y')' + n(n+1) y' = 0.
We can verify this numerically::
>>> n = 3.6
>>> x = 0.73
>>> P = legendre
>>> A = diff(lambda t: (1-t**2)*diff(lambda u: P(n,u), t), x)
>>> B = n*(n+1)*P(n,x)
>>> nprint(A+B,1)
9.0e-16
"""
legenp = r"""
Calculates the (associated) Legendre function of the first kind of
degree *n* and order *m*, `P_n^m(z)`. Taking `m = 0` gives the ordinary
Legendre function of the first kind, `P_n(z)`. The parameters may be
complex numbers.
In terms of the Gauss hypergeometric function, the (associated) Legendre
function is defined as
.. math ::
P_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(1+z)^{m/2}}{(1-z)^{m/2}}
\,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right).
With *type=3* instead of *type=2*, the alternative
definition
.. math ::
\hat{P}_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(z+1)^{m/2}}{(z-1)^{m/2}}
\,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right).
is used. These functions correspond respectively to ``LegendreP[n,m,2,z]``
and ``LegendreP[n,m,3,z]`` in Mathematica.
The general solution of the (associated) Legendre differential equation
.. math ::
(1-z^2) f''(z) - 2zf'(z) + \left(n(n+1)-\frac{m^2}{1-z^2}\right)f(z) = 0
is given by `C_1 P_n^m(z) + C_2 Q_n^m(z)` for arbitrary constants
`C_1`, `C_2`, where `Q_n^m(z)` is a Legendre function of the
second kind as implemented by :func:`~mpmath.legenq`.
**Examples**
Evaluation for arbitrary parameters and arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> legenp(2, 0, 10); legendre(2, 10)
149.5
149.5
>>> legenp(-2, 0.5, 2.5)
(1.972260393822275434196053 - 1.972260393822275434196053j)
>>> legenp(2+3j, 1-j, -0.5+4j)
(-3.335677248386698208736542 - 5.663270217461022307645625j)
>>> chop(legenp(3, 2, -1.5, type=2))
28.125
>>> chop(legenp(3, 2, -1.5, type=3))
-28.125
Verifying the associated Legendre differential equation::
>>> n, m = 2, -0.5
>>> C1, C2 = 1, -3
>>> f = lambda z: C1*legenp(n,m,z) + C2*legenq(n,m,z)
>>> deq = lambda z: (1-z**2)*diff(f,z,2) - 2*z*diff(f,z) + \
... (n*(n+1)-m**2/(1-z**2))*f(z)
>>> for z in [0, 2, -1.5, 0.5+2j]:
... chop(deq(mpmathify(z)))
...
0.0
0.0
0.0
0.0
"""
legenq = r"""
Calculates the (associated) Legendre function of the second kind of
degree *n* and order *m*, `Q_n^m(z)`. Taking `m = 0` gives the ordinary
Legendre function of the second kind, `Q_n(z)`. The parameters may
complex numbers.
The Legendre functions of the second kind give a second set of
solutions to the (associated) Legendre differential equation.
(See :func:`~mpmath.legenp`.)
Unlike the Legendre functions of the first kind, they are not
polynomials of `z` for integer `n`, `m` but rational or logarithmic
functions with poles at `z = \pm 1`.
There are various ways to define Legendre functions of
the second kind, giving rise to different complex structure.
A version can be selected using the *type* keyword argument.
The *type=2* and *type=3* functions are given respectively by
.. math ::
Q_n^m(z) = \frac{\pi}{2 \sin(\pi m)}
\left( \cos(\pi m) P_n^m(z) -
\frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} P_n^{-m}(z)\right)
\hat{Q}_n^m(z) = \frac{\pi}{2 \sin(\pi m)} e^{\pi i m}
\left( \hat{P}_n^m(z) -
\frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} \hat{P}_n^{-m}(z)\right)
where `P` and `\hat{P}` are the *type=2* and *type=3* Legendre functions
of the first kind. The formulas above should be understood as limits
when `m` is an integer.
These functions correspond to ``LegendreQ[n,m,2,z]`` (or ``LegendreQ[n,m,z]``)
and ``LegendreQ[n,m,3,z]`` in Mathematica. The *type=3* function
is essentially the same as the function defined in
Abramowitz & Stegun (eq. 8.1.3) but with `(z+1)^{m/2}(z-1)^{m/2}` instead
of `(z^2-1)^{m/2}`, giving slightly different branches.
**Examples**
Evaluation for arbitrary parameters and arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> legenq(2, 0, 0.5)
-0.8186632680417568557122028
>>> legenq(-1.5, -2, 2.5)
(0.6655964618250228714288277 + 0.3937692045497259717762649j)
>>> legenq(2-j, 3+4j, -6+5j)
(-10001.95256487468541686564 - 6011.691337610097577791134j)
Different versions of the function::
>>> legenq(2, 1, 0.5)
0.7298060598018049369381857
>>> legenq(2, 1, 1.5)
(-7.902916572420817192300921 + 0.1998650072605976600724502j)
>>> legenq(2, 1, 0.5, type=3)
(2.040524284763495081918338 - 0.7298060598018049369381857j)
>>> chop(legenq(2, 1, 1.5, type=3))
-0.1998650072605976600724502
"""
chebyt = r"""
``chebyt(n, x)`` evaluates the Chebyshev polynomial of the first
kind `T_n(x)`, defined by the identity
.. math ::
T_n(\cos x) = \cos(n x).
The Chebyshev polynomials of the first kind are a special
case of the Jacobi polynomials, and by extension of the
hypergeometric function `\,_2F_1`. They can thus also be
evaluated for nonintegral `n`.
**Plots**
.. literalinclude :: /modules/mpmath/plots/chebyt.py
.. image :: /modules/mpmath/plots/chebyt.png
**Basic evaluation**
The coefficients of the `n`-th polynomial can be recovered
using using degree-`n` Taylor expansion::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint(chop(taylor(lambda x: chebyt(n, x), 0, n)))
...
[1.0]
[0.0, 1.0]
[-1.0, 0.0, 2.0]
[0.0, -3.0, 0.0, 4.0]
[1.0, 0.0, -8.0, 0.0, 8.0]
**Orthogonality**
The Chebyshev polynomials of the first kind are orthogonal
on the interval `[-1, 1]` with respect to the weight
function `w(x) = 1/\sqrt{1-x^2}`::
>>> f = lambda x: chebyt(m,x)*chebyt(n,x)/sqrt(1-x**2)
>>> m, n = 3, 4
>>> nprint(quad(f, [-1, 1]),1)
0.0
>>> m, n = 4, 4
>>> quad(f, [-1, 1])
1.57079632596448
"""
chebyu = r"""
``chebyu(n, x)`` evaluates the Chebyshev polynomial of the second
kind `U_n(x)`, defined by the identity
.. math ::
U_n(\cos x) = \frac{\sin((n+1)x)}{\sin(x)}.
The Chebyshev polynomials of the second kind are a special
case of the Jacobi polynomials, and by extension of the
hypergeometric function `\,_2F_1`. They can thus also be
evaluated for nonintegral `n`.
**Plots**
.. literalinclude :: /modules/mpmath/plots/chebyu.py
.. image :: /modules/mpmath/plots/chebyu.png
**Basic evaluation**
The coefficients of the `n`-th polynomial can be recovered
using using degree-`n` Taylor expansion::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(5):
... nprint(chop(taylor(lambda x: chebyu(n, x), 0, n)))
...
[1.0]
[0.0, 2.0]
[-1.0, 0.0, 4.0]
[0.0, -4.0, 0.0, 8.0]
[1.0, 0.0, -12.0, 0.0, 16.0]
**Orthogonality**
The Chebyshev polynomials of the second kind are orthogonal
on the interval `[-1, 1]` with respect to the weight
function `w(x) = \sqrt{1-x^2}`::
>>> f = lambda x: chebyu(m,x)*chebyu(n,x)*sqrt(1-x**2)
>>> m, n = 3, 4
>>> quad(f, [-1, 1])
0.0
>>> m, n = 4, 4
>>> quad(f, [-1, 1])
1.5707963267949
"""
besselj = r"""
``besselj(n, x, derivative=0)`` gives the Bessel function of the first kind
`J_n(x)`. Bessel functions of the first kind are defined as
solutions of the differential equation
.. math ::
x^2 y'' + x y' + (x^2 - n^2) y = 0
which appears, among other things, when solving the radial
part of Laplace's equation in cylindrical coordinates. This
equation has two solutions for given `n`, where the
`J_n`-function is the solution that is nonsingular at `x = 0`.
For positive integer `n`, `J_n(x)` behaves roughly like a sine
(odd `n`) or cosine (even `n`) multiplied by a magnitude factor
that decays slowly as `x \to \pm\infty`.
Generally, `J_n` is a special case of the hypergeometric
function `\,_0F_1`:
.. math ::
J_n(x) = \frac{x^n}{2^n \Gamma(n+1)}
\,_0F_1\left(n+1,-\frac{x^2}{4}\right)
With *derivative* = `m \ne 0`, the `m`-th derivative
.. math ::
\frac{d^m}{dx^m} J_n(x)
is computed.
**Plots**
.. literalinclude :: /modules/mpmath/plots/besselj.py
.. image :: /modules/mpmath/plots/besselj.png
.. literalinclude :: /modules/mpmath/plots/besselj_c.py
.. image :: /modules/mpmath/plots/besselj_c.png
**Examples**
Evaluation is supported for arbitrary arguments, and at
arbitrary precision::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> besselj(2, 1000)
-0.024777229528606
>>> besselj(4, 0.75)
0.000801070086542314
>>> besselj(2, 1000j)
(-2.48071721019185e+432 + 6.41567059811949e-437j)
>>> mp.dps = 25
>>> besselj(0.75j, 3+4j)
(-2.778118364828153309919653 - 1.5863603889018621585533j)
>>> mp.dps = 50
>>> besselj(1, pi)
0.28461534317975275734531059968613140570981118184947
Arguments may be large::
>>> mp.dps = 25
>>> besselj(0, 10000)
-0.007096160353388801477265164
>>> besselj(0, 10**10)
0.000002175591750246891726859055
>>> besselj(2, 10**100)
7.337048736538615712436929e-51
>>> besselj(2, 10**5*j)
(-3.540725411970948860173735e+43426 + 4.4949812409615803110051e-43433j)
The Bessel functions of the first kind satisfy simple
symmetries around `x = 0`::
>>> mp.dps = 15
>>> nprint([besselj(n,0) for n in range(5)])
[1.0, 0.0, 0.0, 0.0, 0.0]
>>> nprint([besselj(n,pi) for n in range(5)])
[-0.304242, 0.284615, 0.485434, 0.333458, 0.151425]
>>> nprint([besselj(n,-pi) for n in range(5)])
[-0.304242, -0.284615, 0.485434, -0.333458, 0.151425]
Roots of Bessel functions are often used::
>>> nprint([findroot(j0, k) for k in [2, 5, 8, 11, 14]])
[2.40483, 5.52008, 8.65373, 11.7915, 14.9309]
>>> nprint([findroot(j1, k) for k in [3, 7, 10, 13, 16]])
[3.83171, 7.01559, 10.1735, 13.3237, 16.4706]
The roots are not periodic, but the distance between successive
roots asymptotically approaches `2 \pi`. Bessel functions of
the first kind have the following normalization::
>>> quadosc(j0, [0, inf], period=2*pi)
1.0
>>> quadosc(j1, [0, inf], period=2*pi)
1.0
For `n = 1/2` or `n = -1/2`, the Bessel function reduces to a
trigonometric function::
>>> x = 10
>>> besselj(0.5, x), sqrt(2/(pi*x))*sin(x)
(-0.13726373575505, -0.13726373575505)
>>> besselj(-0.5, x), sqrt(2/(pi*x))*cos(x)
(-0.211708866331398, -0.211708866331398)
Derivatives of any order can be computed (negative orders
correspond to integration)::
>>> mp.dps = 25
>>> besselj(0, 7.5, 1)
-0.1352484275797055051822405
>>> diff(lambda x: besselj(0,x), 7.5)
-0.1352484275797055051822405
>>> besselj(0, 7.5, 10)
-0.1377811164763244890135677
>>> diff(lambda x: besselj(0,x), 7.5, 10)
-0.1377811164763244890135677
>>> besselj(0,7.5,-1) - besselj(0,3.5,-1)
-0.1241343240399987693521378
>>> quad(j0, [3.5, 7.5])
-0.1241343240399987693521378
Differentiation with a noninteger order gives the fractional derivative
in the sense of the Riemann-Liouville differintegral, as computed by
:func:`~mpmath.differint`::
>>> mp.dps = 15
>>> besselj(1, 3.5, 0.75)
-0.385977722939384
>>> differint(lambda x: besselj(1, x), 3.5, 0.75)
-0.385977722939384
"""
besseli = r"""
``besseli(n, x, derivative=0)`` gives the modified Bessel function of the
first kind,
.. math ::
I_n(x) = i^{-n} J_n(ix).
With *derivative* = `m \ne 0`, the `m`-th derivative
.. math ::
\frac{d^m}{dx^m} I_n(x)
is computed.
**Plots**
.. literalinclude :: /modules/mpmath/plots/besseli.py
.. image :: /modules/mpmath/plots/besseli.png
.. literalinclude :: /modules/mpmath/plots/besseli_c.py
.. image :: /modules/mpmath/plots/besseli_c.png
**Examples**
Some values of `I_n(x)`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> besseli(0,0)
1.0
>>> besseli(1,0)
0.0
>>> besseli(0,1)
1.266065877752008335598245
>>> besseli(3.5, 2+3j)
(-0.2904369752642538144289025 - 0.4469098397654815837307006j)
Arguments may be large::
>>> besseli(2, 1000)
2.480717210191852440616782e+432
>>> besseli(2, 10**10)
4.299602851624027900335391e+4342944813
>>> besseli(2, 6000+10000j)
(-2.114650753239580827144204e+2603 + 4.385040221241629041351886e+2602j)
For integers `n`, the following integral representation holds::
>>> mp.dps = 15
>>> n = 3
>>> x = 2.3
>>> quad(lambda t: exp(x*cos(t))*cos(n*t), [0,pi])/pi
0.349223221159309
>>> besseli(n,x)
0.349223221159309
Derivatives and antiderivatives of any order can be computed::
>>> mp.dps = 25
>>> besseli(2, 7.5, 1)
195.8229038931399062565883
>>> diff(lambda x: besseli(2,x), 7.5)
195.8229038931399062565883
>>> besseli(2, 7.5, 10)
153.3296508971734525525176
>>> diff(lambda x: besseli(2,x), 7.5, 10)
153.3296508971734525525176
>>> besseli(2,7.5,-1) - besseli(2,3.5,-1)
202.5043900051930141956876
>>> quad(lambda x: besseli(2,x), [3.5, 7.5])
202.5043900051930141956876
"""
bessely = r"""
``bessely(n, x, derivative=0)`` gives the Bessel function of the second kind,
.. math ::
Y_n(x) = \frac{J_n(x) \cos(\pi n) - J_{-n}(x)}{\sin(\pi n)}.
For `n` an integer, this formula should be understood as a
limit. With *derivative* = `m \ne 0`, the `m`-th derivative
.. math ::
\frac{d^m}{dx^m} Y_n(x)
is computed.
**Plots**
.. literalinclude :: /modules/mpmath/plots/bessely.py
.. image :: /modules/mpmath/plots/bessely.png
.. literalinclude :: /modules/mpmath/plots/bessely_c.py
.. image :: /modules/mpmath/plots/bessely_c.png
**Examples**
Some values of `Y_n(x)`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> bessely(0,0), bessely(1,0), bessely(2,0)
(-inf, -inf, -inf)
>>> bessely(1, pi)
0.3588729167767189594679827
>>> bessely(0.5, 3+4j)
(9.242861436961450520325216 - 3.085042824915332562522402j)
Arguments may be large::
>>> bessely(0, 10000)
0.00364780555898660588668872
>>> bessely(2.5, 10**50)
-4.8952500412050989295774e-26
>>> bessely(2.5, -10**50)
(0.0 + 4.8952500412050989295774e-26j)
Derivatives and antiderivatives of any order can be computed::
>>> bessely(2, 3.5, 1)
0.3842618820422660066089231
>>> diff(lambda x: bessely(2, x), 3.5)
0.3842618820422660066089231
>>> bessely(0.5, 3.5, 1)
-0.2066598304156764337900417
>>> diff(lambda x: bessely(0.5, x), 3.5)
-0.2066598304156764337900417
>>> diff(lambda x: bessely(2, x), 0.5, 10)
-208173867409.5547350101511
>>> bessely(2, 0.5, 10)
-208173867409.5547350101511
>>> bessely(2, 100.5, 100)
0.02668487547301372334849043
>>> quad(lambda x: bessely(2,x), [1,3])
-1.377046859093181969213262
>>> bessely(2,3,-1) - bessely(2,1,-1)
-1.377046859093181969213262
"""
besselk = r"""
``besselk(n, x)`` gives the modified Bessel function of the
second kind,
.. math ::
K_n(x) = \frac{\pi}{2} \frac{I_{-n}(x)-I_{n}(x)}{\sin(\pi n)}
For `n` an integer, this formula should be understood as a
limit.
**Plots**
.. literalinclude :: /modules/mpmath/plots/besselk.py
.. image :: /modules/mpmath/plots/besselk.png
.. literalinclude :: /modules/mpmath/plots/besselk_c.py
.. image :: /modules/mpmath/plots/besselk_c.png
**Examples**
Evaluation is supported for arbitrary complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> besselk(0,1)
0.4210244382407083333356274
>>> besselk(0, -1)
(0.4210244382407083333356274 - 3.97746326050642263725661j)
>>> besselk(3.5, 2+3j)
(-0.02090732889633760668464128 + 0.2464022641351420167819697j)
>>> besselk(2+3j, 0.5)
(0.9615816021726349402626083 + 0.1918250181801757416908224j)
Arguments may be large::
>>> besselk(0, 100)
4.656628229175902018939005e-45
>>> besselk(1, 10**6)
4.131967049321725588398296e-434298
>>> besselk(1, 10**6*j)
(0.001140348428252385844876706 - 0.0005200017201681152909000961j)
>>> besselk(4.5, fmul(10**50, j, exact=True))
(1.561034538142413947789221e-26 + 1.243554598118700063281496e-25j)
The point `x = 0` is a singularity (logarithmic if `n = 0`)::
>>> besselk(0,0)
+inf
>>> besselk(1,0)
+inf
>>> for n in range(-4, 5):
... print(besselk(n, '1e-1000'))
...
4.8e+4001
8.0e+3000
2.0e+2000
1.0e+1000
2302.701024509704096466802
1.0e+1000
2.0e+2000
8.0e+3000
4.8e+4001
"""
hankel1 = r"""
``hankel1(n,x)`` computes the Hankel function of the first kind,
which is the complex combination of Bessel functions given by
.. math ::
H_n^{(1)}(x) = J_n(x) + i Y_n(x).
**Plots**
.. literalinclude :: /modules/mpmath/plots/hankel1.py
.. image :: /modules/mpmath/plots/hankel1.png
.. literalinclude :: /modules/mpmath/plots/hankel1_c.py
.. image :: /modules/mpmath/plots/hankel1_c.png
**Examples**
The Hankel function is generally complex-valued::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hankel1(2, pi)
(0.4854339326315091097054957 - 0.0999007139290278787734903j)
>>> hankel1(3.5, pi)
(0.2340002029630507922628888 - 0.6419643823412927142424049j)
"""
hankel2 = r"""
``hankel2(n,x)`` computes the Hankel function of the second kind,
which is the complex combination of Bessel functions given by
.. math ::
H_n^{(2)}(x) = J_n(x) - i Y_n(x).
**Plots**
.. literalinclude :: /modules/mpmath/plots/hankel2.py
.. image :: /modules/mpmath/plots/hankel2.png
.. literalinclude :: /modules/mpmath/plots/hankel2_c.py
.. image :: /modules/mpmath/plots/hankel2_c.png
**Examples**
The Hankel function is generally complex-valued::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> hankel2(2, pi)
(0.4854339326315091097054957 + 0.0999007139290278787734903j)
>>> hankel2(3.5, pi)
(0.2340002029630507922628888 + 0.6419643823412927142424049j)
"""
lambertw = r"""
The Lambert W function `W(z)` is defined as the inverse function
of `w \exp(w)`. In other words, the value of `W(z)` is such that
`z = W(z) \exp(W(z))` for any complex number `z`.
The Lambert W function is a multivalued function with infinitely
many branches `W_k(z)`, indexed by `k \in \mathbb{Z}`. Each branch
gives a different solution `w` of the equation `z = w \exp(w)`.
All branches are supported by :func:`~mpmath.lambertw`:
* ``lambertw(z)`` gives the principal solution (branch 0)
* ``lambertw(z, k)`` gives the solution on branch `k`
The Lambert W function has two partially real branches: the
principal branch (`k = 0`) is real for real `z > -1/e`, and the
`k = -1` branch is real for `-1/e < z < 0`. All branches except
`k = 0` have a logarithmic singularity at `z = 0`.
The definition, implementation and choice of branches
is based on [Corless]_.
**Plots**
.. literalinclude :: /modules/mpmath/plots/lambertw.py
.. image :: /modules/mpmath/plots/lambertw.png
.. literalinclude :: /modules/mpmath/plots/lambertw_c.py
.. image :: /modules/mpmath/plots/lambertw_c.png
**Basic examples**
The Lambert W function is the inverse of `w \exp(w)`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> w = lambertw(1)
>>> w
0.5671432904097838729999687
>>> w*exp(w)
1.0
Any branch gives a valid inverse::
>>> w = lambertw(1, k=3)
>>> w
(-2.853581755409037807206819 + 17.11353553941214591260783j)
>>> w = lambertw(1, k=25)
>>> w
(-5.047020464221569709378686 + 155.4763860949415867162066j)
>>> chop(w*exp(w))
1.0
**Applications to equation-solving**
The Lambert W function may be used to solve various kinds of
equations, such as finding the value of the infinite power
tower `z^{z^{z^{\ldots}}}`::
>>> def tower(z, n):
... if n == 0:
... return z
... return z ** tower(z, n-1)
...
>>> tower(mpf(0.5), 100)
0.6411857445049859844862005
>>> -lambertw(-log(0.5))/log(0.5)
0.6411857445049859844862005
**Properties**
The Lambert W function grows roughly like the natural logarithm
for large arguments::
>>> lambertw(1000); log(1000)
5.249602852401596227126056
6.907755278982137052053974
>>> lambertw(10**100); log(10**100)
224.8431064451185015393731
230.2585092994045684017991
The principal branch of the Lambert W function has a rational
Taylor series expansion around `z = 0`::
>>> nprint(taylor(lambertw, 0, 6), 10)
[0.0, 1.0, -1.0, 1.5, -2.666666667, 5.208333333, -10.8]
Some special values and limits are::
>>> lambertw(0)
0.0
>>> lambertw(1)
0.5671432904097838729999687
>>> lambertw(e)
1.0
>>> lambertw(inf)
+inf
>>> lambertw(0, k=-1)
-inf
>>> lambertw(0, k=3)
-inf
>>> lambertw(inf, k=2)
(+inf + 12.56637061435917295385057j)
>>> lambertw(inf, k=3)
(+inf + 18.84955592153875943077586j)
>>> lambertw(-inf, k=3)
(+inf + 21.9911485751285526692385j)
The `k = 0` and `k = -1` branches join at `z = -1/e` where
`W(z) = -1` for both branches. Since `-1/e` can only be represented
approximately with binary floating-point numbers, evaluating the
Lambert W function at this point only gives `-1` approximately::
>>> lambertw(-1/e, 0)
-0.9999999999998371330228251
>>> lambertw(-1/e, -1)
-1.000000000000162866977175
If `-1/e` happens to round in the negative direction, there might be
a small imaginary part::
>>> mp.dps = 15
>>> lambertw(-1/e)
(-1.0 + 8.22007971483662e-9j)
>>> lambertw(-1/e+eps)
-0.999999966242188
**References**
1. [Corless]_
"""
barnesg = r"""
Evaluates the Barnes G-function, which generalizes the
superfactorial (:func:`~mpmath.superfac`) and by extension also the
hyperfactorial (:func:`~mpmath.hyperfac`) to the complex numbers
in an analogous way to how the gamma function generalizes
the ordinary factorial.
The Barnes G-function may be defined in terms of a Weierstrass
product:
.. math ::
G(z+1) = (2\pi)^{z/2} e^{-[z(z+1)+\gamma z^2]/2}
\prod_{n=1}^\infty
\left[\left(1+\frac{z}{n}\right)^ne^{-z+z^2/(2n)}\right]
For positive integers `n`, we have have relation to superfactorials
`G(n) = \mathrm{sf}(n-2) = 0! \cdot 1! \cdots (n-2)!`.
**Examples**
Some elementary values and limits of the Barnes G-function::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> barnesg(1), barnesg(2), barnesg(3)
(1.0, 1.0, 1.0)
>>> barnesg(4)
2.0
>>> barnesg(5)
12.0
>>> barnesg(6)
288.0
>>> barnesg(7)
34560.0
>>> barnesg(8)
24883200.0
>>> barnesg(inf)
+inf
>>> barnesg(0), barnesg(-1), barnesg(-2)
(0.0, 0.0, 0.0)
Closed-form values are known for some rational arguments::
>>> barnesg('1/2')
0.603244281209446
>>> sqrt(exp(0.25+log(2)/12)/sqrt(pi)/glaisher**3)
0.603244281209446
>>> barnesg('1/4')
0.29375596533861
>>> nthroot(exp('3/8')/exp(catalan/pi)/
... gamma(0.25)**3/sqrt(glaisher)**9, 4)
0.29375596533861
The Barnes G-function satisfies the functional equation
`G(z+1) = \Gamma(z) G(z)`::
>>> z = pi
>>> barnesg(z+1)
2.39292119327948
>>> gamma(z)*barnesg(z)
2.39292119327948
The asymptotic growth rate of the Barnes G-function is related to
the Glaisher-Kinkelin constant::
>>> limit(lambda n: barnesg(n+1)/(n**(n**2/2-mpf(1)/12)*
... (2*pi)**(n/2)*exp(-3*n**2/4)), inf)
0.847536694177301
>>> exp('1/12')/glaisher
0.847536694177301
The Barnes G-function can be differentiated in closed form::
>>> z = 3
>>> diff(barnesg, z)
0.264507203401607
>>> barnesg(z)*((z-1)*psi(0,z)-z+(log(2*pi)+1)/2)
0.264507203401607
Evaluation is supported for arbitrary arguments and at arbitrary
precision::
>>> barnesg(6.5)
2548.7457695685
>>> barnesg(-pi)
0.00535976768353037
>>> barnesg(3+4j)
(-0.000676375932234244 - 4.42236140124728e-5j)
>>> mp.dps = 50
>>> barnesg(1/sqrt(2))
0.81305501090451340843586085064413533788206204124732
>>> q = barnesg(10j)
>>> q.real
0.000000000021852360840356557241543036724799812371995850552234
>>> q.imag
-0.00000000000070035335320062304849020654215545839053210041457588
>>> mp.dps = 15
>>> barnesg(100)
3.10361006263698e+6626
>>> barnesg(-101)
0.0
>>> barnesg(-10.5)
5.94463017605008e+25
>>> barnesg(-10000.5)
-6.14322868174828e+167480422
>>> barnesg(1000j)
(5.21133054865546e-1173597 + 4.27461836811016e-1173597j)
>>> barnesg(-1000+1000j)
(2.43114569750291e+1026623 + 2.24851410674842e+1026623j)
**References**
1. Whittaker & Watson, *A Course of Modern Analysis*,
Cambridge University Press, 4th edition (1927), p.264
2. http://en.wikipedia.org/wiki/Barnes_G-function
3. http://mathworld.wolfram.com/BarnesG-Function.html
"""
superfac = r"""
Computes the superfactorial, defined as the product of
consecutive factorials
.. math ::
\mathrm{sf}(n) = \prod_{k=1}^n k!
For general complex `z`, `\mathrm{sf}(z)` is defined
in terms of the Barnes G-function (see :func:`~mpmath.barnesg`).
**Examples**
The first few superfactorials are (OEIS A000178)::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(10):
... print("%s %s" % (n, superfac(n)))
...
0 1.0
1 1.0
2 2.0
3 12.0
4 288.0
5 34560.0
6 24883200.0
7 125411328000.0
8 5.05658474496e+15
9 1.83493347225108e+21
Superfactorials grow very rapidly::
>>> superfac(1000)
3.24570818422368e+1177245
>>> superfac(10**10)
2.61398543581249e+467427913956904067453
Evaluation is supported for arbitrary arguments::
>>> mp.dps = 25
>>> superfac(pi)
17.20051550121297985285333
>>> superfac(2+3j)
(-0.005915485633199789627466468 + 0.008156449464604044948738263j)
>>> diff(superfac, 1)
0.2645072034016070205673056
**References**
1. http://www.research.att.com/~njas/sequences/A000178
"""
hyperfac = r"""
Computes the hyperfactorial, defined for integers as the product
.. math ::
H(n) = \prod_{k=1}^n k^k.
The hyperfactorial satisfies the recurrence formula `H(z) = z^z H(z-1)`.
It can be defined more generally in terms of the Barnes G-function (see
:func:`~mpmath.barnesg`) and the gamma function by the formula
.. math ::
H(z) = \frac{\Gamma(z+1)^z}{G(z)}.
The extension to complex numbers can also be done via
the integral representation
.. math ::
H(z) = (2\pi)^{-z/2} \exp \left[
{z+1 \choose 2} + \int_0^z \log(t!)\,dt
\right].
**Examples**
The rapidly-growing sequence of hyperfactorials begins
(OEIS A002109)::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(10):
... print("%s %s" % (n, hyperfac(n)))
...
0 1.0
1 1.0
2 4.0
3 108.0
4 27648.0
5 86400000.0
6 4031078400000.0
7 3.3197663987712e+18
8 5.56964379417266e+25
9 2.15779412229419e+34
Some even larger hyperfactorials are::
>>> hyperfac(1000)
5.46458120882585e+1392926
>>> hyperfac(10**10)
4.60408207642219e+489142638002418704309
The hyperfactorial can be evaluated for arbitrary arguments::
>>> hyperfac(0.5)
0.880449235173423
>>> diff(hyperfac, 1)
0.581061466795327
>>> hyperfac(pi)
205.211134637462
>>> hyperfac(-10+1j)
(3.01144471378225e+46 - 2.45285242480185e+46j)
The recurrence property of the hyperfactorial holds
generally::
>>> z = 3-4*j
>>> hyperfac(z)
(-4.49795891462086e-7 - 6.33262283196162e-7j)
>>> z**z * hyperfac(z-1)
(-4.49795891462086e-7 - 6.33262283196162e-7j)
>>> z = mpf(-0.6)
>>> chop(z**z * hyperfac(z-1))
1.28170142849352
>>> hyperfac(z)
1.28170142849352
The hyperfactorial may also be computed using the integral
definition::
>>> z = 2.5
>>> hyperfac(z)
15.9842119922237
>>> (2*pi)**(-z/2)*exp(binomial(z+1,2) +
... quad(lambda t: loggamma(t+1), [0, z]))
15.9842119922237
:func:`~mpmath.hyperfac` supports arbitrary-precision evaluation::
>>> mp.dps = 50
>>> hyperfac(10)
215779412229418562091680268288000000000000000.0
>>> hyperfac(1/sqrt(2))
0.89404818005227001975423476035729076375705084390942
**References**
1. http://www.research.att.com/~njas/sequences/A002109
2. http://mathworld.wolfram.com/Hyperfactorial.html
"""
rgamma = r"""
Computes the reciprocal of the gamma function, `1/\Gamma(z)`. This
function evaluates to zero at the poles
of the gamma function, `z = 0, -1, -2, \ldots`.
**Examples**
Basic examples::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> rgamma(1)
1.0
>>> rgamma(4)
0.1666666666666666666666667
>>> rgamma(0); rgamma(-1)
0.0
0.0
>>> rgamma(1000)
2.485168143266784862783596e-2565
>>> rgamma(inf)
0.0
A definite integral that can be evaluated in terms of elementary
integrals::
>>> quad(rgamma, [0,inf])
2.807770242028519365221501
>>> e + quad(lambda t: exp(-t)/(pi**2+log(t)**2), [0,inf])
2.807770242028519365221501
"""
loggamma = r"""
Computes the principal branch of the log-gamma function,
`\ln \Gamma(z)`. Unlike `\ln(\Gamma(z))`, which has infinitely many
complex branch cuts, the principal log-gamma function only has a single
branch cut along the negative half-axis. The principal branch
continuously matches the asymptotic Stirling expansion
.. math ::
\ln \Gamma(z) \sim \frac{\ln(2 \pi)}{2} +
\left(z-\frac{1}{2}\right) \ln(z) - z + O(z^{-1}).
The real parts of both functions agree, but their imaginary
parts generally differ by `2 n \pi` for some `n \in \mathbb{Z}`.
They coincide for `z \in \mathbb{R}, z > 0`.
Computationally, it is advantageous to use :func:`~mpmath.loggamma`
instead of :func:`~mpmath.gamma` for extremely large arguments.
**Examples**
Comparing with `\ln(\Gamma(z))`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> loggamma('13.2'); log(gamma('13.2'))
20.49400419456603678498394
20.49400419456603678498394
>>> loggamma(3+4j)
(-1.756626784603784110530604 + 4.742664438034657928194889j)
>>> log(gamma(3+4j))
(-1.756626784603784110530604 - 1.540520869144928548730397j)
>>> log(gamma(3+4j)) + 2*pi*j
(-1.756626784603784110530604 + 4.742664438034657928194889j)
Note the imaginary parts for negative arguments::
>>> loggamma(-0.5); loggamma(-1.5); loggamma(-2.5)
(1.265512123484645396488946 - 3.141592653589793238462643j)
(0.8600470153764810145109327 - 6.283185307179586476925287j)
(-0.05624371649767405067259453 - 9.42477796076937971538793j)
Some special values::
>>> loggamma(1); loggamma(2)
0.0
0.0
>>> loggamma(3); +ln2
0.6931471805599453094172321
0.6931471805599453094172321
>>> loggamma(3.5); log(15*sqrt(pi)/8)
1.200973602347074224816022
1.200973602347074224816022
>>> loggamma(inf)
+inf
Huge arguments are permitted::
>>> loggamma('1e30')
6.807755278982137052053974e+31
>>> loggamma('1e300')
6.897755278982137052053974e+302
>>> loggamma('1e3000')
6.906755278982137052053974e+3003
>>> loggamma('1e100000000000000000000')
2.302585092994045684007991e+100000000000000000020
>>> loggamma('1e30j')
(-1.570796326794896619231322e+30 + 6.807755278982137052053974e+31j)
>>> loggamma('1e300j')
(-1.570796326794896619231322e+300 + 6.897755278982137052053974e+302j)
>>> loggamma('1e3000j')
(-1.570796326794896619231322e+3000 + 6.906755278982137052053974e+3003j)
The log-gamma function can be integrated analytically
on any interval of unit length::
>>> z = 0
>>> quad(loggamma, [z,z+1]); log(2*pi)/2
0.9189385332046727417803297
0.9189385332046727417803297
>>> z = 3+4j
>>> quad(loggamma, [z,z+1]); (log(z)-1)*z + log(2*pi)/2
(-0.9619286014994750641314421 + 5.219637303741238195688575j)
(-0.9619286014994750641314421 + 5.219637303741238195688575j)
The derivatives of the log-gamma function are given by the
polygamma function (:func:`~mpmath.psi`)::
>>> diff(loggamma, -4+3j); psi(0, -4+3j)
(1.688493531222971393607153 + 2.554898911356806978892748j)
(1.688493531222971393607153 + 2.554898911356806978892748j)
>>> diff(loggamma, -4+3j, 2); psi(1, -4+3j)
(-0.1539414829219882371561038 - 0.1020485197430267719746479j)
(-0.1539414829219882371561038 - 0.1020485197430267719746479j)
The log-gamma function satisfies an additive form of the
recurrence relation for the ordinary gamma function::
>>> z = 2+3j
>>> loggamma(z); loggamma(z+1) - log(z)
(-2.092851753092733349564189 + 2.302396543466867626153708j)
(-2.092851753092733349564189 + 2.302396543466867626153708j)
"""
siegeltheta = r"""
Computes the Riemann-Siegel theta function,
.. math ::
\theta(t) = \frac{
\log\Gamma\left(\frac{1+2it}{4}\right) -
\log\Gamma\left(\frac{1-2it}{4}\right)
}{2i} - \frac{\log \pi}{2} t.
The Riemann-Siegel theta function is important in
providing the phase factor for the Z-function
(see :func:`~mpmath.siegelz`). Evaluation is supported for real and
complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> siegeltheta(0)
0.0
>>> siegeltheta(inf)
+inf
>>> siegeltheta(-inf)
-inf
>>> siegeltheta(1)
-1.767547952812290388302216
>>> siegeltheta(10+0.25j)
(-3.068638039426838572528867 + 0.05804937947429712998395177j)
Arbitrary derivatives may be computed with derivative = k
>>> siegeltheta(1234, derivative=2)
0.0004051864079114053109473741
>>> diff(siegeltheta, 1234, n=2)
0.0004051864079114053109473741
The Riemann-Siegel theta function has odd symmetry around `t = 0`,
two local extreme points and three real roots including 0 (located
symmetrically)::
>>> nprint(chop(taylor(siegeltheta, 0, 5)))
[0.0, -2.68609, 0.0, 2.69433, 0.0, -6.40218]
>>> findroot(diffun(siegeltheta), 7)
6.28983598883690277966509
>>> findroot(siegeltheta, 20)
17.84559954041086081682634
For large `t`, there is a famous asymptotic formula
for `\theta(t)`, to first order given by::
>>> t = mpf(10**6)
>>> siegeltheta(t)
5488816.353078403444882823
>>> -t*log(2*pi/t)/2-t/2
5488816.745777464310273645
"""
grampoint = r"""
Gives the `n`-th Gram point `g_n`, defined as the solution
to the equation `\theta(g_n) = \pi n` where `\theta(t)`
is the Riemann-Siegel theta function (:func:`~mpmath.siegeltheta`).
The first few Gram points are::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> grampoint(0)
17.84559954041086081682634
>>> grampoint(1)
23.17028270124630927899664
>>> grampoint(2)
27.67018221781633796093849
>>> grampoint(3)
31.71797995476405317955149
Checking the definition::
>>> siegeltheta(grampoint(3))
9.42477796076937971538793
>>> 3*pi
9.42477796076937971538793
A large Gram point::
>>> grampoint(10**10)
3293531632.728335454561153
Gram points are useful when studying the Z-function
(:func:`~mpmath.siegelz`). See the documentation of that function
for additional examples.
:func:`~mpmath.grampoint` can solve the defining equation for
nonintegral `n`. There is a fixed point where `g(x) = x`::
>>> findroot(lambda x: grampoint(x) - x, 10000)
9146.698193171459265866198
**References**
1. http://mathworld.wolfram.com/GramPoint.html
"""
siegelz = r"""
Computes the Z-function, also known as the Riemann-Siegel Z function,
.. math ::
Z(t) = e^{i \theta(t)} \zeta(1/2+it)
where `\zeta(s)` is the Riemann zeta function (:func:`~mpmath.zeta`)
and where `\theta(t)` denotes the Riemann-Siegel theta function
(see :func:`~mpmath.siegeltheta`).
Evaluation is supported for real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> siegelz(1)
-0.7363054628673177346778998
>>> siegelz(3+4j)
(-0.1852895764366314976003936 - 0.2773099198055652246992479j)
The first four derivatives are supported, using the
optional *derivative* keyword argument::
>>> siegelz(1234567, derivative=3)
56.89689348495089294249178
>>> diff(siegelz, 1234567, n=3)
56.89689348495089294249178
The Z-function has a Maclaurin expansion::
>>> nprint(chop(taylor(siegelz, 0, 4)))
[-1.46035, 0.0, 2.73588, 0.0, -8.39357]
The Z-function `Z(t)` is equal to `\pm |\zeta(s)|` on the
critical line `s = 1/2+it` (i.e. for real arguments `t`
to `Z`). Its zeros coincide with those of the Riemann zeta
function::
>>> findroot(siegelz, 14)
14.13472514173469379045725
>>> findroot(siegelz, 20)
21.02203963877155499262848
>>> findroot(zeta, 0.5+14j)
(0.5 + 14.13472514173469379045725j)
>>> findroot(zeta, 0.5+20j)
(0.5 + 21.02203963877155499262848j)
Since the Z-function is real-valued on the critical line
(and unlike `|\zeta(s)|` analytic), it is useful for
investigating the zeros of the Riemann zeta function.
For example, one can use a root-finding algorithm based
on sign changes::
>>> findroot(siegelz, [100, 200], solver='bisect')
176.4414342977104188888926
To locate roots, Gram points `g_n` which can be computed
by :func:`~mpmath.grampoint` are useful. If `(-1)^n Z(g_n)` is
positive for two consecutive `n`, then `Z(t)` must have
a zero between those points::
>>> g10 = grampoint(10)
>>> g11 = grampoint(11)
>>> (-1)**10 * siegelz(g10) > 0
True
>>> (-1)**11 * siegelz(g11) > 0
True
>>> findroot(siegelz, [g10, g11], solver='bisect')
56.44624769706339480436776
>>> g10, g11
(54.67523744685325626632663, 57.54516517954725443703014)
"""
riemannr = r"""
Evaluates the Riemann R function, a smooth approximation of the
prime counting function `\pi(x)` (see :func:`~mpmath.primepi`). The Riemann
R function gives a fast numerical approximation useful e.g. to
roughly estimate the number of primes in a given interval.
The Riemann R function is computed using the rapidly convergent Gram
series,
.. math ::
R(x) = 1 + \sum_{k=1}^{\infty}
\frac{\log^k x}{k k! \zeta(k+1)}.
From the Gram series, one sees that the Riemann R function is a
well-defined analytic function (except for a branch cut along
the negative real half-axis); it can be evaluated for arbitrary
real or complex arguments.
The Riemann R function gives a very accurate approximation
of the prime counting function. For example, it is wrong by at
most 2 for `x < 1000`, and for `x = 10^9` differs from the exact
value of `\pi(x)` by 79, or less than two parts in a million.
It is about 10 times more accurate than the logarithmic integral
estimate (see :func:`~mpmath.li`), which however is even faster to evaluate.
It is orders of magnitude more accurate than the extremely
fast `x/\log x` estimate.
**Examples**
For small arguments, the Riemann R function almost exactly
gives the prime counting function if rounded to the nearest
integer::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> primepi(50), riemannr(50)
(15, 14.9757023241462)
>>> max(abs(primepi(n)-int(round(riemannr(n)))) for n in range(100))
1
>>> max(abs(primepi(n)-int(round(riemannr(n)))) for n in range(300))
2
The Riemann R function can be evaluated for arguments far too large
for exact determination of `\pi(x)` to be computationally
feasible with any presently known algorithm::
>>> riemannr(10**30)
1.46923988977204e+28
>>> riemannr(10**100)
4.3619719871407e+97
>>> riemannr(10**1000)
4.3448325764012e+996
A comparison of the Riemann R function and logarithmic integral estimates
for `\pi(x)` using exact values of `\pi(10^n)` up to `n = 9`.
The fractional error is shown in parentheses::
>>> exact = [4,25,168,1229,9592,78498,664579,5761455,50847534]
>>> for n, p in enumerate(exact):
... n += 1
... r, l = riemannr(10**n), li(10**n)
... rerr, lerr = nstr((r-p)/p,3), nstr((l-p)/p,3)
... print("%i %i %s(%s) %s(%s)" % (n, p, r, rerr, l, lerr))
...
1 4 4.56458314100509(0.141) 6.1655995047873(0.541)
2 25 25.6616332669242(0.0265) 30.1261415840796(0.205)
3 168 168.359446281167(0.00214) 177.609657990152(0.0572)
4 1229 1226.93121834343(-0.00168) 1246.13721589939(0.0139)
5 9592 9587.43173884197(-0.000476) 9629.8090010508(0.00394)
6 78498 78527.3994291277(0.000375) 78627.5491594622(0.00165)
7 664579 664667.447564748(0.000133) 664918.405048569(0.000511)
8 5761455 5761551.86732017(1.68e-5) 5762209.37544803(0.000131)
9 50847534 50847455.4277214(-1.55e-6) 50849234.9570018(3.35e-5)
The derivative of the Riemann R function gives the approximate
probability for a number of magnitude `x` to be prime::
>>> diff(riemannr, 1000)
0.141903028110784
>>> mpf(primepi(1050) - primepi(950)) / 100
0.15
Evaluation is supported for arbitrary arguments and at arbitrary
precision::
>>> mp.dps = 30
>>> riemannr(7.5)
3.72934743264966261918857135136
>>> riemannr(-4+2j)
(-0.551002208155486427591793957644 + 2.16966398138119450043195899746j)
"""
primepi = r"""
Evaluates the prime counting function, `\pi(x)`, which gives
the number of primes less than or equal to `x`. The argument
`x` may be fractional.
The prime counting function is very expensive to evaluate
precisely for large `x`, and the present implementation is
not optimized in any way. For numerical approximation of the
prime counting function, it is better to use :func:`~mpmath.primepi2`
or :func:`~mpmath.riemannr`.
Some values of the prime counting function::
>>> from mpmath import *
>>> [primepi(k) for k in range(20)]
[0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 8]
>>> primepi(3.5)
2
>>> primepi(100000)
9592
"""
primepi2 = r"""
Returns an interval (as an ``mpi`` instance) providing bounds
for the value of the prime counting function `\pi(x)`. For small
`x`, :func:`~mpmath.primepi2` returns an exact interval based on
the output of :func:`~mpmath.primepi`. For `x > 2656`, a loose interval
based on Schoenfeld's inequality
.. math ::
|\pi(x) - \mathrm{li}(x)| < \frac{\sqrt x \log x}{8 \pi}
is returned. This estimate is rigorous assuming the truth of
the Riemann hypothesis, and can be computed very quickly.
**Examples**
Exact values of the prime counting function for small `x`::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> iv.dps = 15; iv.pretty = True
>>> primepi2(10)
[4.0, 4.0]
>>> primepi2(100)
[25.0, 25.0]
>>> primepi2(1000)
[168.0, 168.0]
Loose intervals are generated for moderately large `x`:
>>> primepi2(10000), primepi(10000)
([1209.0, 1283.0], 1229)
>>> primepi2(50000), primepi(50000)
([5070.0, 5263.0], 5133)
As `x` increases, the absolute error gets worse while the relative
error improves. The exact value of `\pi(10^{23})` is
1925320391606803968923, and :func:`~mpmath.primepi2` gives 9 significant
digits::
>>> p = primepi2(10**23)
>>> p
[1.9253203909477020467e+21, 1.925320392280406229e+21]
>>> mpf(p.delta) / mpf(p.a)
6.9219865355293e-10
A more precise, nonrigorous estimate for `\pi(x)` can be
obtained using the Riemann R function (:func:`~mpmath.riemannr`).
For large enough `x`, the value returned by :func:`~mpmath.primepi2`
essentially amounts to a small perturbation of the value returned by
:func:`~mpmath.riemannr`::
>>> primepi2(10**100)
[4.3619719871407024816e+97, 4.3619719871407032404e+97]
>>> riemannr(10**100)
4.3619719871407e+97
"""
primezeta = r"""
Computes the prime zeta function, which is defined
in analogy with the Riemann zeta function (:func:`~mpmath.zeta`)
as
.. math ::
P(s) = \sum_p \frac{1}{p^s}
where the sum is taken over all prime numbers `p`. Although
this sum only converges for `\mathrm{Re}(s) > 1`, the
function is defined by analytic continuation in the
half-plane `\mathrm{Re}(s) > 0`.
**Examples**
Arbitrary-precision evaluation for real and complex arguments is
supported::
>>> from mpmath import *
>>> mp.dps = 30; mp.pretty = True
>>> primezeta(2)
0.452247420041065498506543364832
>>> primezeta(pi)
0.15483752698840284272036497397
>>> mp.dps = 50
>>> primezeta(3)
0.17476263929944353642311331466570670097541212192615
>>> mp.dps = 20
>>> primezeta(3+4j)
(-0.12085382601645763295 - 0.013370403397787023602j)
The prime zeta function has a logarithmic pole at `s = 1`,
with residue equal to the difference of the Mertens and
Euler constants::
>>> primezeta(1)
+inf
>>> extradps(25)(lambda x: primezeta(1+x)+log(x))(+eps)
-0.31571845205389007685
>>> mertens-euler
-0.31571845205389007685
The analytic continuation to `0 < \mathrm{Re}(s) \le 1`
is implemented. In this strip the function exhibits
very complex behavior; on the unit interval, it has poles at
`1/n` for every squarefree integer `n`::
>>> primezeta(0.5) # Pole at s = 1/2
(-inf + 3.1415926535897932385j)
>>> primezeta(0.25)
(-1.0416106801757269036 + 0.52359877559829887308j)
>>> primezeta(0.5+10j)
(0.54892423556409790529 + 0.45626803423487934264j)
Although evaluation works in principle for any `\mathrm{Re}(s) > 0`,
it should be noted that the evaluation time increases exponentially
as `s` approaches the imaginary axis.
For large `\mathrm{Re}(s)`, `P(s)` is asymptotic to `2^{-s}`::
>>> primezeta(inf)
0.0
>>> primezeta(10), mpf(2)**-10
(0.00099360357443698021786, 0.0009765625)
>>> primezeta(1000)
9.3326361850321887899e-302
>>> primezeta(1000+1000j)
(-3.8565440833654995949e-302 - 8.4985390447553234305e-302j)
**References**
Carl-Erik Froberg, "On the prime zeta function",
BIT 8 (1968), pp. 187-202.
"""
bernpoly = r"""
Evaluates the Bernoulli polynomial `B_n(z)`.
The first few Bernoulli polynomials are::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(6):
... nprint(chop(taylor(lambda x: bernpoly(n,x), 0, n)))
...
[1.0]
[-0.5, 1.0]
[0.166667, -1.0, 1.0]
[0.0, 0.5, -1.5, 1.0]
[-0.0333333, 0.0, 1.0, -2.0, 1.0]
[0.0, -0.166667, 0.0, 1.66667, -2.5, 1.0]
At `z = 0`, the Bernoulli polynomial evaluates to a
Bernoulli number (see :func:`~mpmath.bernoulli`)::
>>> bernpoly(12, 0), bernoulli(12)
(-0.253113553113553, -0.253113553113553)
>>> bernpoly(13, 0), bernoulli(13)
(0.0, 0.0)
Evaluation is accurate for large `n` and small `z`::
>>> mp.dps = 25
>>> bernpoly(100, 0.5)
2.838224957069370695926416e+78
>>> bernpoly(1000, 10.5)
5.318704469415522036482914e+1769
"""
polylog = r"""
Computes the polylogarithm, defined by the sum
.. math ::
\mathrm{Li}_s(z) = \sum_{k=1}^{\infty} \frac{z^k}{k^s}.
This series is convergent only for `|z| < 1`, so elsewhere
the analytic continuation is implied.
The polylogarithm should not be confused with the logarithmic
integral (also denoted by Li or li), which is implemented
as :func:`~mpmath.li`.
**Examples**
The polylogarithm satisfies a huge number of functional identities.
A sample of polylogarithm evaluations is shown below::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> polylog(1,0.5), log(2)
(0.693147180559945, 0.693147180559945)
>>> polylog(2,0.5), (pi**2-6*log(2)**2)/12
(0.582240526465012, 0.582240526465012)
>>> polylog(2,-phi), -log(phi)**2-pi**2/10
(-1.21852526068613, -1.21852526068613)
>>> polylog(3,0.5), 7*zeta(3)/8-pi**2*log(2)/12+log(2)**3/6
(0.53721319360804, 0.53721319360804)
:func:`~mpmath.polylog` can evaluate the analytic continuation of the
polylogarithm when `s` is an integer::
>>> polylog(2, 10)
(0.536301287357863 - 7.23378441241546j)
>>> polylog(2, -10)
-4.1982778868581
>>> polylog(2, 10j)
(-3.05968879432873 + 3.71678149306807j)
>>> polylog(-2, 10)
-0.150891632373114
>>> polylog(-2, -10)
0.067618332081142
>>> polylog(-2, 10j)
(0.0384353698579347 + 0.0912451798066779j)
Some more examples, with arguments on the unit circle (note that
the series definition cannot be used for computation here)::
>>> polylog(2,j)
(-0.205616758356028 + 0.915965594177219j)
>>> j*catalan-pi**2/48
(-0.205616758356028 + 0.915965594177219j)
>>> polylog(3,exp(2*pi*j/3))
(-0.534247512515375 + 0.765587078525922j)
>>> -4*zeta(3)/9 + 2*j*pi**3/81
(-0.534247512515375 + 0.765587078525921j)
Polylogarithms of different order are related by integration
and differentiation::
>>> s, z = 3, 0.5
>>> polylog(s+1, z)
0.517479061673899
>>> quad(lambda t: polylog(s,t)/t, [0, z])
0.517479061673899
>>> z*diff(lambda t: polylog(s+2,t), z)
0.517479061673899
Taylor series expansions around `z = 0` are::
>>> for n in range(-3, 4):
... nprint(taylor(lambda x: polylog(n,x), 0, 5))
...
[0.0, 1.0, 8.0, 27.0, 64.0, 125.0]
[0.0, 1.0, 4.0, 9.0, 16.0, 25.0]
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0]
[0.0, 1.0, 1.0, 1.0, 1.0, 1.0]
[0.0, 1.0, 0.5, 0.333333, 0.25, 0.2]
[0.0, 1.0, 0.25, 0.111111, 0.0625, 0.04]
[0.0, 1.0, 0.125, 0.037037, 0.015625, 0.008]
The series defining the polylogarithm is simultaneously
a Taylor series and an L-series. For certain values of `z`, the
polylogarithm reduces to a pure zeta function::
>>> polylog(pi, 1), zeta(pi)
(1.17624173838258, 1.17624173838258)
>>> polylog(pi, -1), -altzeta(pi)
(-0.909670702980385, -0.909670702980385)
Evaluation for arbitrary, nonintegral `s` is supported
for `z` within the unit circle:
>>> polylog(3+4j, 0.25)
(0.24258605789446 - 0.00222938275488344j)
>>> nsum(lambda k: 0.25**k / k**(3+4j), [1,inf])
(0.24258605789446 - 0.00222938275488344j)
It is also currently supported outside of the unit circle for `z`
not too large in magnitude::
>>> polylog(1+j, 20+40j)
(-7.1421172179728 - 3.92726697721369j)
>>> polylog(1+j, 200+400j)
Traceback (most recent call last):
...
NotImplementedError: polylog for arbitrary s and z
**References**
1. Richard Crandall, "Note on fast polylogarithm computation"
http://people.reed.edu/~crandall/papers/Polylog.pdf
2. http://en.wikipedia.org/wiki/Polylogarithm
3. http://mathworld.wolfram.com/Polylogarithm.html
"""
bell = r"""
For `n` a nonnegative integer, ``bell(n,x)`` evaluates the Bell
polynomial `B_n(x)`, the first few of which are
.. math ::
B_0(x) = 1
B_1(x) = x
B_2(x) = x^2+x
B_3(x) = x^3+3x^2+x
If `x = 1` or :func:`~mpmath.bell` is called with only one argument, it
gives the `n`-th Bell number `B_n`, which is the number of
partitions of a set with `n` elements. By setting the precision to
at least `\log_{10} B_n` digits, :func:`~mpmath.bell` provides fast
calculation of exact Bell numbers.
In general, :func:`~mpmath.bell` computes
.. math ::
B_n(x) = e^{-x} \left(\mathrm{sinc}(\pi n) + E_n(x)\right)
where `E_n(x)` is the generalized exponential function implemented
by :func:`~mpmath.polyexp`. This is an extension of Dobinski's formula [1],
where the modification is the sinc term ensuring that `B_n(x)` is
continuous in `n`; :func:`~mpmath.bell` can thus be evaluated,
differentiated, etc for arbitrary complex arguments.
**Examples**
Simple evaluations::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> bell(0, 2.5)
1.0
>>> bell(1, 2.5)
2.5
>>> bell(2, 2.5)
8.75
Evaluation for arbitrary complex arguments::
>>> bell(5.75+1j, 2-3j)
(-10767.71345136587098445143 - 15449.55065599872579097221j)
The first few Bell polynomials::
>>> for k in range(7):
... nprint(taylor(lambda x: bell(k,x), 0, k))
...
[1.0]
[0.0, 1.0]
[0.0, 1.0, 1.0]
[0.0, 1.0, 3.0, 1.0]
[0.0, 1.0, 7.0, 6.0, 1.0]
[0.0, 1.0, 15.0, 25.0, 10.0, 1.0]
[0.0, 1.0, 31.0, 90.0, 65.0, 15.0, 1.0]
The first few Bell numbers and complementary Bell numbers::
>>> [int(bell(k)) for k in range(10)]
[1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147]
>>> [int(bell(k,-1)) for k in range(10)]
[1, -1, 0, 1, 1, -2, -9, -9, 50, 267]
Large Bell numbers::
>>> mp.dps = 50
>>> bell(50)
185724268771078270438257767181908917499221852770.0
>>> bell(50,-1)
-29113173035759403920216141265491160286912.0
Some even larger values::
>>> mp.dps = 25
>>> bell(1000,-1)
-1.237132026969293954162816e+1869
>>> bell(1000)
2.989901335682408421480422e+1927
>>> bell(1000,2)
6.591553486811969380442171e+1987
>>> bell(1000,100.5)
9.101014101401543575679639e+2529
A determinant identity satisfied by Bell numbers::
>>> mp.dps = 15
>>> N = 8
>>> det([[bell(k+j) for j in range(N)] for k in range(N)])
125411328000.0
>>> superfac(N-1)
125411328000.0
**References**
1. http://mathworld.wolfram.com/DobinskisFormula.html
"""
polyexp = r"""
Evaluates the polyexponential function, defined for arbitrary
complex `s`, `z` by the series
.. math ::
E_s(z) = \sum_{k=1}^{\infty} \frac{k^s}{k!} z^k.
`E_s(z)` is constructed from the exponential function analogously
to how the polylogarithm is constructed from the ordinary
logarithm; as a function of `s` (with `z` fixed), `E_s` is an L-series
It is an entire function of both `s` and `z`.
The polyexponential function provides a generalization of the
Bell polynomials `B_n(x)` (see :func:`~mpmath.bell`) to noninteger orders `n`.
In terms of the Bell polynomials,
.. math ::
E_s(z) = e^z B_s(z) - \mathrm{sinc}(\pi s).
Note that `B_n(x)` and `e^{-x} E_n(x)` are identical if `n`
is a nonzero integer, but not otherwise. In particular, they differ
at `n = 0`.
**Examples**
Evaluating a series::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> nsum(lambda k: sqrt(k)/fac(k), [1,inf])
2.101755547733791780315904
>>> polyexp(0.5,1)
2.101755547733791780315904
Evaluation for arbitrary arguments::
>>> polyexp(-3-4j, 2.5+2j)
(2.351660261190434618268706 + 1.202966666673054671364215j)
Evaluation is accurate for tiny function values::
>>> polyexp(4, -100)
3.499471750566824369520223e-36
If `n` is a nonpositive integer, `E_n` reduces to a special
instance of the hypergeometric function `\,_pF_q`::
>>> n = 3
>>> x = pi
>>> polyexp(-n,x)
4.042192318847986561771779
>>> x*hyper([1]*(n+1), [2]*(n+1), x)
4.042192318847986561771779
"""
cyclotomic = r"""
Evaluates the cyclotomic polynomial `\Phi_n(x)`, defined by
.. math ::
\Phi_n(x) = \prod_{\zeta} (x - \zeta)
where `\zeta` ranges over all primitive `n`-th roots of unity
(see :func:`~mpmath.unitroots`). An equivalent representation, used
for computation, is
.. math ::
\Phi_n(x) = \prod_{d\mid n}(x^d-1)^{\mu(n/d)} = \Phi_n(x)
where `\mu(m)` denotes the Moebius function. The cyclotomic
polynomials are integer polynomials, the first of which can be
written explicitly as
.. math ::
\Phi_0(x) = 1
\Phi_1(x) = x - 1
\Phi_2(x) = x + 1
\Phi_3(x) = x^3 + x^2 + 1
\Phi_4(x) = x^2 + 1
\Phi_5(x) = x^4 + x^3 + x^2 + x + 1
\Phi_6(x) = x^2 - x + 1
**Examples**
The coefficients of low-order cyclotomic polynomials can be recovered
using Taylor expansion::
>>> from mpmath import *
>>> mp.dps = 15; mp.pretty = True
>>> for n in range(9):
... p = chop(taylor(lambda x: cyclotomic(n,x), 0, 10))
... print("%s %s" % (n, nstr(p[:10+1-p[::-1].index(1)])))
...
0 [1.0]
1 [-1.0, 1.0]
2 [1.0, 1.0]
3 [1.0, 1.0, 1.0]
4 [1.0, 0.0, 1.0]
5 [1.0, 1.0, 1.0, 1.0, 1.0]
6 [1.0, -1.0, 1.0]
7 [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
8 [1.0, 0.0, 0.0, 0.0, 1.0]
The definition as a product over primitive roots may be checked
by computing the product explicitly (for a real argument, this
method will generally introduce numerical noise in the imaginary
part)::
>>> mp.dps = 25
>>> z = 3+4j
>>> cyclotomic(10, z)
(-419.0 - 360.0j)
>>> fprod(z-r for r in unitroots(10, primitive=True))
(-419.0 - 360.0j)
>>> z = 3
>>> cyclotomic(10, z)
61.0
>>> fprod(z-r for r in unitroots(10, primitive=True))
(61.0 - 3.146045605088568607055454e-25j)
Up to permutation, the roots of a given cyclotomic polynomial
can be checked to agree with the list of primitive roots::
>>> p = taylor(lambda x: cyclotomic(6,x), 0, 6)[:3]
>>> for r in polyroots(p[::-1]):
... print(r)
...
(0.5 - 0.8660254037844386467637232j)
(0.5 + 0.8660254037844386467637232j)
>>>
>>> for r in unitroots(6, primitive=True):
... print(r)
...
(0.5 + 0.8660254037844386467637232j)
(0.5 - 0.8660254037844386467637232j)
"""
meijerg = r"""
Evaluates the Meijer G-function, defined as
.. math ::
G^{m,n}_{p,q} \left( \left. \begin{matrix}
a_1, \dots, a_n ; a_{n+1} \dots a_p \\
b_1, \dots, b_m ; b_{m+1} \dots b_q
\end{matrix}\; \right| \; z ; r \right) =
\frac{1}{2 \pi i} \int_L
\frac{\prod_{j=1}^m \Gamma(b_j+s) \prod_{j=1}^n\Gamma(1-a_j-s)}
{\prod_{j=n+1}^{p}\Gamma(a_j+s) \prod_{j=m+1}^q \Gamma(1-b_j-s)}
z^{-s/r} ds
for an appropriate choice of the contour `L` (see references).
There are `p` elements `a_j`.
The argument *a_s* should be a pair of lists, the first containing the
`n` elements `a_1, \ldots, a_n` and the second containing
the `p-n` elements `a_{n+1}, \ldots a_p`.
There are `q` elements `b_j`.
The argument *b_s* should be a pair of lists, the first containing the
`m` elements `b_1, \ldots, b_m` and the second containing
the `q-m` elements `b_{m+1}, \ldots b_q`.
The implicit tuple `(m, n, p, q)` constitutes the order or degree of the
Meijer G-function, and is determined by the lengths of the coefficient
vectors. Confusingly, the indices in this tuple appear in a different order
from the coefficients, but this notation is standard. The many examples
given below should hopefully clear up any potential confusion.
**Algorithm**
The Meijer G-function is evaluated as a combination of hypergeometric series.
There are two versions of the function, which can be selected with
the optional *series* argument.
*series=1* uses a sum of `m` `\,_pF_{q-1}` functions of `z`
*series=2* uses a sum of `n` `\,_qF_{p-1}` functions of `1/z`
The default series is chosen based on the degree and `|z|` in order
to be consistent with Mathematica's. This definition of the Meijer G-function
has a discontinuity at `|z| = 1` for some orders, which can
be avoided by explicitly specifying a series.
Keyword arguments are forwarded to :func:`~mpmath.hypercomb`.
**Examples**
Many standard functions are special cases of the Meijer G-function
(possibly rescaled and/or with branch cut corrections). We define
some test parameters::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> a = mpf(0.75)
>>> b = mpf(1.5)
>>> z = mpf(2.25)
The exponential function:
`e^z = G^{1,0}_{0,1} \left( \left. \begin{matrix} - \\ 0 \end{matrix} \;
\right| \; -z \right)`
>>> meijerg([[],[]], [[0],[]], -z)
9.487735836358525720550369
>>> exp(z)
9.487735836358525720550369
The natural logarithm:
`\log(1+z) = G^{1,2}_{2,2} \left( \left. \begin{matrix} 1, 1 \\ 1, 0
\end{matrix} \; \right| \; -z \right)`
>>> meijerg([[1,1],[]], [[1],[0]], z)
1.178654996341646117219023
>>> log(1+z)
1.178654996341646117219023
A rational function:
`\frac{z}{z+1} = G^{1,2}_{2,2} \left( \left. \begin{matrix} 1, 1 \\ 1, 1
\end{matrix} \; \right| \; z \right)`
>>> meijerg([[1,1],[]], [[1],[1]], z)
0.6923076923076923076923077
>>> z/(z+1)
0.6923076923076923076923077
The sine and cosine functions:
`\frac{1}{\sqrt \pi} \sin(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix}
- \\ \frac{1}{2}, 0 \end{matrix} \; \right| \; z \right)`
`\frac{1}{\sqrt \pi} \cos(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix}
- \\ 0, \frac{1}{2} \end{matrix} \; \right| \; z \right)`
>>> meijerg([[],[]], [[0.5],[0]], (z/2)**2)
0.4389807929218676682296453
>>> sin(z)/sqrt(pi)
0.4389807929218676682296453
>>> meijerg([[],[]], [[0],[0.5]], (z/2)**2)
-0.3544090145996275423331762
>>> cos(z)/sqrt(pi)
-0.3544090145996275423331762
Bessel functions:
`J_a(2 \sqrt z) = G^{1,0}_{0,2} \left( \left.
\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2}
\end{matrix} \; \right| \; z \right)`
`Y_a(2 \sqrt z) = G^{2,0}_{1,3} \left( \left.
\begin{matrix} \frac{-a-1}{2} \\ \frac{a}{2}, -\frac{a}{2}, \frac{-a-1}{2}
\end{matrix} \; \right| \; z \right)`
`(-z)^{a/2} z^{-a/2} I_a(2 \sqrt z) = G^{1,0}_{0,2} \left( \left.
\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2}
\end{matrix} \; \right| \; -z \right)`
`2 K_a(2 \sqrt z) = G^{2,0}_{0,2} \left( \left.
\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2}
\end{matrix} \; \right| \; z \right)`
As the example with the Bessel *I* function shows, a branch
factor is required for some arguments when inverting the square root.
>>> meijerg([[],[]], [[a/2],[-a/2]], (z/2)**2)
0.5059425789597154858527264
>>> besselj(a,z)
0.5059425789597154858527264
>>> meijerg([[],[(-a-1)/2]], [[a/2,-a/2],[(-a-1)/2]], (z/2)**2)
0.1853868950066556941442559
>>> bessely(a, z)
0.1853868950066556941442559
>>> meijerg([[],[]], [[a/2],[-a/2]], -(z/2)**2)
(0.8685913322427653875717476 + 2.096964974460199200551738j)
>>> (-z)**(a/2) / z**(a/2) * besseli(a, z)
(0.8685913322427653875717476 + 2.096964974460199200551738j)
>>> 0.5*meijerg([[],[]], [[a/2,-a/2],[]], (z/2)**2)
0.09334163695597828403796071
>>> besselk(a,z)
0.09334163695597828403796071
Error functions:
`\sqrt{\pi} z^{2(a-1)} \mathrm{erfc}(z) = G^{2,0}_{1,2} \left( \left.
\begin{matrix} a \\ a-1, a-\frac{1}{2}
\end{matrix} \; \right| \; z, \frac{1}{2} \right)`
>>> meijerg([[],[a]], [[a-1,a-0.5],[]], z, 0.5)
0.00172839843123091957468712
>>> sqrt(pi) * z**(2*a-2) * erfc(z)
0.00172839843123091957468712
A Meijer G-function of higher degree, (1,1,2,3):
>>> meijerg([[a],[b]], [[a],[b,a-1]], z)
1.55984467443050210115617
>>> sin((b-a)*pi)/pi*(exp(z)-1)*z**(a-1)
1.55984467443050210115617
A Meijer G-function of still higher degree, (4,1,2,4), that can
be expanded as a messy combination of exponential integrals:
>>> meijerg([[a],[2*b-a]], [[b,a,b-0.5,-1-a+2*b],[]], z)
0.3323667133658557271898061
>>> chop(4**(a-b+1)*sqrt(pi)*gamma(2*b-2*a)*z**a*\
... expint(2*b-2*a, -2*sqrt(-z))*expint(2*b-2*a, 2*sqrt(-z)))
0.3323667133658557271898061
In the following case, different series give different values::
>>> chop(meijerg([[1],[0.25]],[[3],[0.5]],-2))
-0.06417628097442437076207337
>>> meijerg([[1],[0.25]],[[3],[0.5]],-2,series=1)
0.1428699426155117511873047
>>> chop(meijerg([[1],[0.25]],[[3],[0.5]],-2,series=2))
-0.06417628097442437076207337
**References**
1. http://en.wikipedia.org/wiki/Meijer_G-function
2. http://mathworld.wolfram.com/MeijerG-Function.html
3. http://functions.wolfram.com/HypergeometricFunctions/MeijerG/
4. http://functions.wolfram.com/HypergeometricFunctions/MeijerG1/
"""
clsin = r"""
Computes the Clausen sine function, defined formally by the series
.. math ::
\mathrm{Cl}_s(z) = \sum_{k=1}^{\infty} \frac{\sin(kz)}{k^s}.
The special case `\mathrm{Cl}_2(z)` (i.e. ``clsin(2,z)``) is the classical
"Clausen function". More generally, the Clausen function is defined for
complex `s` and `z`, even when the series does not converge. The
Clausen function is related to the polylogarithm (:func:`~mpmath.polylog`) as
.. math ::
\mathrm{Cl}_s(z) = \frac{1}{2i}\left(\mathrm{Li}_s\left(e^{iz}\right) -
\mathrm{Li}_s\left(e^{-iz}\right)\right)
= \mathrm{Im}\left[\mathrm{Li}_s(e^{iz})\right] \quad (s, z \in \mathbb{R}),
and this representation can be taken to provide the analytic continuation of the
series. The complementary function :func:`~mpmath.clcos` gives the corresponding
cosine sum.
**Examples**
Evaluation for arbitrarily chosen `s` and `z`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> s, z = 3, 4
>>> clsin(s, z); nsum(lambda k: sin(z*k)/k**s, [1,inf])
-0.6533010136329338746275795
-0.6533010136329338746275795
Using `z + \pi` instead of `z` gives an alternating series::
>>> clsin(s, z+pi)
0.8860032351260589402871624
>>> nsum(lambda k: (-1)**k*sin(z*k)/k**s, [1,inf])
0.8860032351260589402871624
With `s = 1`, the sum can be expressed in closed form
using elementary functions::
>>> z = 1 + sqrt(3)
>>> clsin(1, z)
0.2047709230104579724675985
>>> chop((log(1-exp(-j*z)) - log(1-exp(j*z)))/(2*j))
0.2047709230104579724675985
>>> nsum(lambda k: sin(k*z)/k, [1,inf])
0.2047709230104579724675985
The classical Clausen function `\mathrm{Cl}_2(\theta)` gives the
value of the integral `\int_0^{\theta} -\ln(2\sin(x/2)) dx` for
`0 < \theta < 2 \pi`::
>>> cl2 = lambda t: clsin(2, t)
>>> cl2(3.5)
-0.2465045302347694216534255
>>> -quad(lambda x: ln(2*sin(0.5*x)), [0, 3.5])
-0.2465045302347694216534255
This function is symmetric about `\theta = \pi` with zeros and extreme
points::
>>> cl2(0); cl2(pi/3); chop(cl2(pi)); cl2(5*pi/3); chop(cl2(2*pi))
0.0
1.014941606409653625021203
0.0
-1.014941606409653625021203
0.0
Catalan's constant is a special value::
>>> cl2(pi/2)
0.9159655941772190150546035
>>> +catalan
0.9159655941772190150546035
The Clausen sine function can be expressed in closed form when
`s` is an odd integer (becoming zero when `s` < 0)::
>>> z = 1 + sqrt(2)
>>> clsin(1, z); (pi-z)/2
0.3636895456083490948304773
0.3636895456083490948304773
>>> clsin(3, z); pi**2/6*z - pi*z**2/4 + z**3/12
0.5661751584451144991707161
0.5661751584451144991707161
>>> clsin(-1, z)
0.0
>>> clsin(-3, z)
0.0
It can also be expressed in closed form for even integer `s \le 0`,
providing a finite sum for series such as
`\sin(z) + \sin(2z) + \sin(3z) + \ldots`::
>>> z = 1 + sqrt(2)
>>> clsin(0, z)
0.1903105029507513881275865
>>> cot(z/2)/2
0.1903105029507513881275865
>>> clsin(-2, z)
-0.1089406163841548817581392
>>> -cot(z/2)*csc(z/2)**2/4
-0.1089406163841548817581392
Call with ``pi=True`` to multiply `z` by `\pi` exactly::
>>> clsin(3, 3*pi)
-8.892316224968072424732898e-26
>>> clsin(3, 3, pi=True)
0.0
Evaluation for complex `s`, `z` in a nonconvergent case::
>>> s, z = -1-j, 1+2j
>>> clsin(s, z)
(-0.593079480117379002516034 + 0.9038644233367868273362446j)
>>> extraprec(20)(nsum)(lambda k: sin(k*z)/k**s, [1,inf])
(-0.593079480117379002516034 + 0.9038644233367868273362446j)
"""
clcos = r"""
Computes the Clausen cosine function, defined formally by the series
.. math ::
\mathrm{\widetilde{Cl}}_s(z) = \sum_{k=1}^{\infty} \frac{\cos(kz)}{k^s}.
This function is complementary to the Clausen sine function
:func:`~mpmath.clsin`. In terms of the polylogarithm,
.. math ::
\mathrm{\widetilde{Cl}}_s(z) =
\frac{1}{2}\left(\mathrm{Li}_s\left(e^{iz}\right) +
\mathrm{Li}_s\left(e^{-iz}\right)\right)
= \mathrm{Re}\left[\mathrm{Li}_s(e^{iz})\right] \quad (s, z \in \mathbb{R}).
**Examples**
Evaluation for arbitrarily chosen `s` and `z`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> s, z = 3, 4
>>> clcos(s, z); nsum(lambda k: cos(z*k)/k**s, [1,inf])
-0.6518926267198991308332759
-0.6518926267198991308332759
Using `z + \pi` instead of `z` gives an alternating series::
>>> s, z = 3, 0.5
>>> clcos(s, z+pi)
-0.8155530586502260817855618
>>> nsum(lambda k: (-1)**k*cos(z*k)/k**s, [1,inf])
-0.8155530586502260817855618
With `s = 1`, the sum can be expressed in closed form
using elementary functions::
>>> z = 1 + sqrt(3)
>>> clcos(1, z)
-0.6720334373369714849797918
>>> chop(-0.5*(log(1-exp(j*z))+log(1-exp(-j*z))))
-0.6720334373369714849797918
>>> -log(abs(2*sin(0.5*z))) # Equivalent to above when z is real
-0.6720334373369714849797918
>>> nsum(lambda k: cos(k*z)/k, [1,inf])
-0.6720334373369714849797918
It can also be expressed in closed form when `s` is an even integer.
For example,
>>> clcos(2,z)
-0.7805359025135583118863007
>>> pi**2/6 - pi*z/2 + z**2/4
-0.7805359025135583118863007
The case `s = 0` gives the renormalized sum of
`\cos(z) + \cos(2z) + \cos(3z) + \ldots` (which happens to be the same for
any value of `z`)::
>>> clcos(0, z)
-0.5
>>> nsum(lambda k: cos(k*z), [1,inf])
-0.5
Also the sums
.. math ::
\cos(z) + 2\cos(2z) + 3\cos(3z) + \ldots
and
.. math ::
\cos(z) + 2^n \cos(2z) + 3^n \cos(3z) + \ldots
for higher integer powers `n = -s` can be done in closed form. They are zero
when `n` is positive and even (`s` negative and even)::
>>> clcos(-1, z); 1/(2*cos(z)-2)
-0.2607829375240542480694126
-0.2607829375240542480694126
>>> clcos(-3, z); (2+cos(z))*csc(z/2)**4/8
0.1472635054979944390848006
0.1472635054979944390848006
>>> clcos(-2, z); clcos(-4, z); clcos(-6, z)
0.0
0.0
0.0
With `z = \pi`, the series reduces to that of the Riemann zeta function
(more generally, if `z = p \pi/q`, it is a finite sum over Hurwitz zeta
function values)::
>>> clcos(2.5, 0); zeta(2.5)
1.34148725725091717975677
1.34148725725091717975677
>>> clcos(2.5, pi); -altzeta(2.5)
-0.8671998890121841381913472
-0.8671998890121841381913472
Call with ``pi=True`` to multiply `z` by `\pi` exactly::
>>> clcos(-3, 2*pi)
2.997921055881167659267063e+102
>>> clcos(-3, 2, pi=True)
0.008333333333333333333333333
Evaluation for complex `s`, `z` in a nonconvergent case::
>>> s, z = -1-j, 1+2j
>>> clcos(s, z)
(0.9407430121562251476136807 + 0.715826296033590204557054j)
>>> extraprec(20)(nsum)(lambda k: cos(k*z)/k**s, [1,inf])
(0.9407430121562251476136807 + 0.715826296033590204557054j)
"""
whitm = r"""
Evaluates the Whittaker function `M(k,m,z)`, which gives a solution
to the Whittaker differential equation
.. math ::
\frac{d^2f}{dz^2} + \left(-\frac{1}{4}+\frac{k}{z}+
\frac{(\frac{1}{4}-m^2)}{z^2}\right) f = 0.
A second solution is given by :func:`~mpmath.whitw`.
The Whittaker functions are defined in Abramowitz & Stegun, section 13.1.
They are alternate forms of the confluent hypergeometric functions
`\,_1F_1` and `U`:
.. math ::
M(k,m,z) = e^{-\frac{1}{2}z} z^{\frac{1}{2}+m}
\,_1F_1(\tfrac{1}{2}+m-k, 1+2m, z)
W(k,m,z) = e^{-\frac{1}{2}z} z^{\frac{1}{2}+m}
U(\tfrac{1}{2}+m-k, 1+2m, z).
**Examples**
Evaluation for arbitrary real and complex arguments is supported::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> whitm(1, 1, 1)
0.7302596799460411820509668
>>> whitm(1, 1, -1)
(0.0 - 1.417977827655098025684246j)
>>> whitm(j, j/2, 2+3j)
(3.245477713363581112736478 - 0.822879187542699127327782j)
>>> whitm(2, 3, 100000)
4.303985255686378497193063e+21707
Evaluation at zero::
>>> whitm(1,-1,0); whitm(1,-0.5,0); whitm(1,0,0)
+inf
nan
0.0
We can verify that :func:`~mpmath.whitm` numerically satisfies the
differential equation for arbitrarily chosen values::
>>> k = mpf(0.25)
>>> m = mpf(1.5)
>>> f = lambda z: whitm(k,m,z)
>>> for z in [-1, 2.5, 3, 1+2j]:
... chop(diff(f,z,2) + (-0.25 + k/z + (0.25-m**2)/z**2)*f(z))
...
0.0
0.0
0.0
0.0
An integral involving both :func:`~mpmath.whitm` and :func:`~mpmath.whitw`,
verifying evaluation along the real axis::
>>> quad(lambda x: exp(-x)*whitm(3,2,x)*whitw(1,-2,x), [0,inf])
3.438869842576800225207341
>>> 128/(21*sqrt(pi))
3.438869842576800225207341
"""
whitw = r"""
Evaluates the Whittaker function `W(k,m,z)`, which gives a second
solution to the Whittaker differential equation. (See :func:`~mpmath.whitm`.)
**Examples**
Evaluation for arbitrary real and complex arguments is supported::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> whitw(1, 1, 1)
1.19532063107581155661012
>>> whitw(1, 1, -1)
(-0.9424875979222187313924639 - 0.2607738054097702293308689j)
>>> whitw(j, j/2, 2+3j)
(0.1782899315111033879430369 - 0.01609578360403649340169406j)
>>> whitw(2, 3, 100000)
1.887705114889527446891274e-21705
>>> whitw(-1, -1, 100)
1.905250692824046162462058e-24
Evaluation at zero::
>>> for m in [-1, -0.5, 0, 0.5, 1]:
... whitw(1, m, 0)
...
+inf
nan
0.0
nan
+inf
We can verify that :func:`~mpmath.whitw` numerically satisfies the
differential equation for arbitrarily chosen values::
>>> k = mpf(0.25)
>>> m = mpf(1.5)
>>> f = lambda z: whitw(k,m,z)
>>> for z in [-1, 2.5, 3, 1+2j]:
... chop(diff(f,z,2) + (-0.25 + k/z + (0.25-m**2)/z**2)*f(z))
...
0.0
0.0
0.0
0.0
"""
ber = r"""
Computes the Kelvin function ber, which for real arguments gives the real part
of the Bessel J function of a rotated argument
.. math ::
J_n\left(x e^{3\pi i/4}\right) = \mathrm{ber}_n(x) + i \mathrm{bei}_n(x).
The imaginary part is given by :func:`~mpmath.bei`.
**Plots**
.. literalinclude :: /modules/mpmath/plots/ber.py
.. image :: /modules/mpmath/plots/ber.png
**Examples**
Verifying the defining relation::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> n, x = 2, 3.5
>>> ber(n,x)
1.442338852571888752631129
>>> bei(n,x)
-0.948359035324558320217678
>>> besselj(n, x*root(1,8,3))
(1.442338852571888752631129 - 0.948359035324558320217678j)
The ber and bei functions are also defined by analytic continuation
for complex arguments::
>>> ber(1+j, 2+3j)
(4.675445984756614424069563 - 15.84901771719130765656316j)
>>> bei(1+j, 2+3j)
(15.83886679193707699364398 + 4.684053288183046528703611j)
"""
bei = r"""
Computes the Kelvin function bei, which for real arguments gives the
imaginary part of the Bessel J function of a rotated argument.
See :func:`~mpmath.ber`.
"""
ker = r"""
Computes the Kelvin function ker, which for real arguments gives the real part
of the (rescaled) Bessel K function of a rotated argument
.. math ::
e^{-\pi i/2} K_n\left(x e^{3\pi i/4}\right) = \mathrm{ker}_n(x) + i \mathrm{kei}_n(x).
The imaginary part is given by :func:`~mpmath.kei`.
**Plots**
.. literalinclude :: /modules/mpmath/plots/ker.py
.. image :: /modules/mpmath/plots/ker.png
**Examples**
Verifying the defining relation::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> n, x = 2, 4.5
>>> ker(n,x)
0.02542895201906369640249801
>>> kei(n,x)
-0.02074960467222823237055351
>>> exp(-n*pi*j/2) * besselk(n, x*root(1,8,1))
(0.02542895201906369640249801 - 0.02074960467222823237055351j)
The ker and kei functions are also defined by analytic continuation
for complex arguments::
>>> ker(1+j, 3+4j)
(1.586084268115490421090533 - 2.939717517906339193598719j)
>>> kei(1+j, 3+4j)
(-2.940403256319453402690132 - 1.585621643835618941044855j)
"""
kei = r"""
Computes the Kelvin function kei, which for real arguments gives the
imaginary part of the (rescaled) Bessel K function of a rotated argument.
See :func:`~mpmath.ker`.
"""
struveh = r"""
Gives the Struve function
.. math ::
\,\mathbf{H}_n(z) =
\sum_{k=0}^\infty \frac{(-1)^k}{\Gamma(k+\frac{3}{2})
\Gamma(k+n+\frac{3}{2})} {\left({\frac{z}{2}}\right)}^{2k+n+1}
which is a solution to the Struve differential equation
.. math ::
z^2 f''(z) + z f'(z) + (z^2-n^2) f(z) = \frac{2 z^{n+1}}{\pi (2n-1)!!}.
**Examples**
Evaluation for arbitrary real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> struveh(0, 3.5)
0.3608207733778295024977797
>>> struveh(-1, 10)
-0.255212719726956768034732
>>> struveh(1, -100.5)
0.5819566816797362287502246
>>> struveh(2.5, 10000000000000)
3153915652525200060.308937
>>> struveh(2.5, -10000000000000)
(0.0 - 3153915652525200060.308937j)
>>> struveh(1+j, 1000000+4000000j)
(-3.066421087689197632388731e+1737173 - 1.596619701076529803290973e+1737173j)
A Struve function of half-integer order is elementary; for example:
>>> z = 3
>>> struveh(0.5, 3)
0.9167076867564138178671595
>>> sqrt(2/(pi*z))*(1-cos(z))
0.9167076867564138178671595
Numerically verifying the differential equation::
>>> z = mpf(4.5)
>>> n = 3
>>> f = lambda z: struveh(n,z)
>>> lhs = z**2*diff(f,z,2) + z*diff(f,z) + (z**2-n**2)*f(z)
>>> rhs = 2*z**(n+1)/fac2(2*n-1)/pi
>>> lhs
17.40359302709875496632744
>>> rhs
17.40359302709875496632744
"""
struvel = r"""
Gives the modified Struve function
.. math ::
\,\mathbf{L}_n(z) = -i e^{-n\pi i/2} \mathbf{H}_n(i z)
which solves to the modified Struve differential equation
.. math ::
z^2 f''(z) + z f'(z) - (z^2+n^2) f(z) = \frac{2 z^{n+1}}{\pi (2n-1)!!}.
**Examples**
Evaluation for arbitrary real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> struvel(0, 3.5)
7.180846515103737996249972
>>> struvel(-1, 10)
2670.994904980850550721511
>>> struvel(1, -100.5)
1.757089288053346261497686e+42
>>> struvel(2.5, 10000000000000)
4.160893281017115450519948e+4342944819025
>>> struvel(2.5, -10000000000000)
(0.0 - 4.160893281017115450519948e+4342944819025j)
>>> struvel(1+j, 700j)
(-0.1721150049480079451246076 + 0.1240770953126831093464055j)
>>> struvel(1+j, 1000000+4000000j)
(-2.973341637511505389128708e+434290 - 5.164633059729968297147448e+434290j)
Numerically verifying the differential equation::
>>> z = mpf(3.5)
>>> n = 3
>>> f = lambda z: struvel(n,z)
>>> lhs = z**2*diff(f,z,2) + z*diff(f,z) - (z**2+n**2)*f(z)
>>> rhs = 2*z**(n+1)/fac2(2*n-1)/pi
>>> lhs
6.368850306060678353018165
>>> rhs
6.368850306060678353018165
"""
appellf1 = r"""
Gives the Appell F1 hypergeometric function of two variables,
.. math ::
F_1(a,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c)_{m+n}}
\frac{x^m y^n}{m! n!}.
This series is only generally convergent when `|x| < 1` and `|y| < 1`,
although :func:`~mpmath.appellf1` can evaluate an analytic continuation
with respecto to either variable, and sometimes both.
**Examples**
Evaluation is supported for real and complex parameters::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> appellf1(1,0,0.5,1,0.5,0.25)
1.154700538379251529018298
>>> appellf1(1,1+j,0.5,1,0.5,0.5j)
(1.138403860350148085179415 + 1.510544741058517621110615j)
For some integer parameters, the F1 series reduces to a polynomial::
>>> appellf1(2,-4,-3,1,2,5)
-816.0
>>> appellf1(-5,1,2,1,4,5)
-20528.0
The analytic continuation with respect to either `x` or `y`,
and sometimes with respect to both, can be evaluated::
>>> appellf1(2,3,4,5,100,0.5)
(0.0006231042714165329279738662 + 0.0000005769149277148425774499857j)
>>> appellf1('1.1', '0.3', '0.2+2j', '0.4', '0.2', 1.5+3j)
(-0.1782604566893954897128702 + 0.002472407104546216117161499j)
>>> appellf1(1,2,3,4,10,12)
-0.07122993830066776374929313
For certain arguments, F1 reduces to an ordinary hypergeometric function::
>>> appellf1(1,2,3,5,0.5,0.25)
1.547902270302684019335555
>>> 4*hyp2f1(1,2,5,'1/3')/3
1.547902270302684019335555
>>> appellf1(1,2,3,4,0,1.5)
(-1.717202506168937502740238 - 2.792526803190927323077905j)
>>> hyp2f1(1,3,4,1.5)
(-1.717202506168937502740238 - 2.792526803190927323077905j)
The F1 function satisfies a system of partial differential equations::
>>> a,b1,b2,c,x,y = map(mpf, [1,0.5,0.25,1.125,0.25,-0.25])
>>> F = lambda x,y: appellf1(a,b1,b2,c,x,y)
>>> chop(x*(1-x)*diff(F,(x,y),(2,0)) +
... y*(1-x)*diff(F,(x,y),(1,1)) +
... (c-(a+b1+1)*x)*diff(F,(x,y),(1,0)) -
... b1*y*diff(F,(x,y),(0,1)) -
... a*b1*F(x,y))
0.0
>>>
>>> chop(y*(1-y)*diff(F,(x,y),(0,2)) +
... x*(1-y)*diff(F,(x,y),(1,1)) +
... (c-(a+b2+1)*y)*diff(F,(x,y),(0,1)) -
... b2*x*diff(F,(x,y),(1,0)) -
... a*b2*F(x,y))
0.0
The Appell F1 function allows for closed-form evaluation of various
integrals, such as any integral of the form
`\int x^r (x+a)^p (x+b)^q dx`::
>>> def integral(a,b,p,q,r,x1,x2):
... a,b,p,q,r,x1,x2 = map(mpmathify, [a,b,p,q,r,x1,x2])
... f = lambda x: x**r * (x+a)**p * (x+b)**q
... def F(x):
... v = x**(r+1)/(r+1) * (a+x)**p * (b+x)**q
... v *= (1+x/a)**(-p)
... v *= (1+x/b)**(-q)
... v *= appellf1(r+1,-p,-q,2+r,-x/a,-x/b)
... return v
... print("Num. quad: %s" % quad(f, [x1,x2]))
... print("Appell F1: %s" % (F(x2)-F(x1)))
...
>>> integral('1/5','4/3','-2','3','1/2',0,1)
Num. quad: 9.073335358785776206576981
Appell F1: 9.073335358785776206576981
>>> integral('3/2','4/3','-2','3','1/2',0,1)
Num. quad: 1.092829171999626454344678
Appell F1: 1.092829171999626454344678
>>> integral('3/2','4/3','-2','3','1/2',12,25)
Num. quad: 1106.323225040235116498927
Appell F1: 1106.323225040235116498927
Also incomplete elliptic integrals fall into this category [1]::
>>> def E(z, m):
... if (pi/2).ae(z):
... return ellipe(m)
... return 2*round(re(z)/pi)*ellipe(m) + mpf(-1)**round(re(z)/pi)*\
... sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2)
...
>>> z, m = 1, 0.5
>>> E(z,m); quad(lambda t: sqrt(1-m*sin(t)**2), [0,pi/4,3*pi/4,z])
0.9273298836244400669659042
0.9273298836244400669659042
>>> z, m = 3, 2
>>> E(z,m); quad(lambda t: sqrt(1-m*sin(t)**2), [0,pi/4,3*pi/4,z])
(1.057495752337234229715836 + 1.198140234735592207439922j)
(1.057495752337234229715836 + 1.198140234735592207439922j)
**References**
1. [WolframFunctions]_ http://functions.wolfram.com/EllipticIntegrals/EllipticE2/26/01/
2. [SrivastavaKarlsson]_
3. [CabralRosetti]_
4. [Vidunas]_
5. [Slater]_
"""
angerj = r"""
Gives the Anger function
.. math ::
\mathbf{J}_{\nu}(z) = \frac{1}{\pi}
\int_0^{\pi} \cos(\nu t - z \sin t) dt
which is an entire function of both the parameter `\nu` and
the argument `z`. It solves the inhomogeneous Bessel differential
equation
.. math ::
f''(z) + \frac{1}{z}f'(z) + \left(1-\frac{\nu^2}{z^2}\right) f(z)
= \frac{(z-\nu)}{\pi z^2} \sin(\pi \nu).
**Examples**
Evaluation for real and complex parameter and argument::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> angerj(2,3)
0.4860912605858910769078311
>>> angerj(-3+4j, 2+5j)
(-5033.358320403384472395612 + 585.8011892476145118551756j)
>>> angerj(3.25, 1e6j)
(4.630743639715893346570743e+434290 - 1.117960409887505906848456e+434291j)
>>> angerj(-1.5, 1e6)
0.0002795719747073879393087011
The Anger function coincides with the Bessel J-function when `\nu`
is an integer::
>>> angerj(1,3); besselj(1,3)
0.3390589585259364589255146
0.3390589585259364589255146
>>> angerj(1.5,3); besselj(1.5,3)
0.4088969848691080859328847
0.4777182150870917715515015
Verifying the differential equation::
>>> v,z = mpf(2.25), 0.75
>>> f = lambda z: angerj(v,z)
>>> diff(f,z,2) + diff(f,z)/z + (1-(v/z)**2)*f(z)
-0.6002108774380707130367995
>>> (z-v)/(pi*z**2) * sinpi(v)
-0.6002108774380707130367995
Verifying the integral representation::
>>> angerj(v,z)
0.1145380759919333180900501
>>> quad(lambda t: cos(v*t-z*sin(t))/pi, [0,pi])
0.1145380759919333180900501
**References**
1. [DLMF]_ section 11.10: Anger-Weber Functions
"""
webere = r"""
Gives the Weber function
.. math ::
\mathbf{E}_{\nu}(z) = \frac{1}{\pi}
\int_0^{\pi} \sin(\nu t - z \sin t) dt
which is an entire function of both the parameter `\nu` and
the argument `z`. It solves the inhomogeneous Bessel differential
equation
.. math ::
f''(z) + \frac{1}{z}f'(z) + \left(1-\frac{\nu^2}{z^2}\right) f(z)
= -\frac{1}{\pi z^2} (z+\nu+(z-\nu)\cos(\pi \nu)).
**Examples**
Evaluation for real and complex parameter and argument::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> webere(2,3)
-0.1057668973099018425662646
>>> webere(-3+4j, 2+5j)
(-585.8081418209852019290498 - 5033.314488899926921597203j)
>>> webere(3.25, 1e6j)
(-1.117960409887505906848456e+434291 - 4.630743639715893346570743e+434290j)
>>> webere(3.25, 1e6)
-0.00002812518265894315604914453
Up to addition of a rational function of `z`, the Weber function coincides
with the Struve H-function when `\nu` is an integer::
>>> webere(1,3); 2/pi-struveh(1,3)
-0.3834897968188690177372881
-0.3834897968188690177372881
>>> webere(5,3); 26/(35*pi)-struveh(5,3)
0.2009680659308154011878075
0.2009680659308154011878075
Verifying the differential equation::
>>> v,z = mpf(2.25), 0.75
>>> f = lambda z: webere(v,z)
>>> diff(f,z,2) + diff(f,z)/z + (1-(v/z)**2)*f(z)
-1.097441848875479535164627
>>> -(z+v+(z-v)*cospi(v))/(pi*z**2)
-1.097441848875479535164627
Verifying the integral representation::
>>> webere(v,z)
0.1486507351534283744485421
>>> quad(lambda t: sin(v*t-z*sin(t))/pi, [0,pi])
0.1486507351534283744485421
**References**
1. [DLMF]_ section 11.10: Anger-Weber Functions
"""
lommels1 = r"""
Gives the Lommel function `s_{\mu,\nu}` or `s^{(1)}_{\mu,\nu}`
.. math ::
s_{\mu,\nu}(z) = \frac{z^{\mu+1}}{(\mu-\nu+1)(\mu+\nu+1)}
\,_1F_2\left(1; \frac{\mu-\nu+3}{2}, \frac{\mu+\nu+3}{2};
-\frac{z^2}{4} \right)
which solves the inhomogeneous Bessel equation
.. math ::
z^2 f''(z) + z f'(z) + (z^2-\nu^2) f(z) = z^{\mu+1}.
A second solution is given by :func:`~mpmath.lommels2`.
**Plots**
.. literalinclude :: /modules/mpmath/plots/lommels1.py
.. image :: /modules/mpmath/plots/lommels1.png
**Examples**
An integral representation::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> u,v,z = 0.25, 0.125, mpf(0.75)
>>> lommels1(u,v,z)
0.4276243877565150372999126
>>> (bessely(v,z)*quad(lambda t: t**u*besselj(v,t), [0,z]) - \
... besselj(v,z)*quad(lambda t: t**u*bessely(v,t), [0,z]))*(pi/2)
0.4276243877565150372999126
A special value::
>>> lommels1(v,v,z)
0.5461221367746048054932553
>>> gamma(v+0.5)*sqrt(pi)*power(2,v-1)*struveh(v,z)
0.5461221367746048054932553
Verifying the differential equation::
>>> f = lambda z: lommels1(u,v,z)
>>> z**2*diff(f,z,2) + z*diff(f,z) + (z**2-v**2)*f(z)
0.6979536443265746992059141
>>> z**(u+1)
0.6979536443265746992059141
**References**
1. [GradshteynRyzhik]_
2. [Weisstein]_ http://mathworld.wolfram.com/LommelFunction.html
"""
lommels2 = r"""
Gives the second Lommel function `S_{\mu,\nu}` or `s^{(2)}_{\mu,\nu}`
.. math ::
S_{\mu,\nu}(z) = s_{\mu,\nu}(z) + 2^{\mu-1}
\Gamma\left(\tfrac{1}{2}(\mu-\nu+1)\right)
\Gamma\left(\tfrac{1}{2}(\mu+\nu+1)\right) \times
\left[\sin(\tfrac{1}{2}(\mu-\nu)\pi) J_{\nu}(z) -
\cos(\tfrac{1}{2}(\mu-\nu)\pi) Y_{\nu}(z)
\right]
which solves the same differential equation as
:func:`~mpmath.lommels1`.
**Plots**
.. literalinclude :: /modules/mpmath/plots/lommels2.py
.. image :: /modules/mpmath/plots/lommels2.png
**Examples**
For large `|z|`, `S_{\mu,\nu} \sim z^{\mu-1}`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> lommels2(10,2,30000)
1.968299831601008419949804e+40
>>> power(30000,9)
1.9683e+40
A special value::
>>> u,v,z = 0.5, 0.125, mpf(0.75)
>>> lommels2(v,v,z)
0.9589683199624672099969765
>>> (struveh(v,z)-bessely(v,z))*power(2,v-1)*sqrt(pi)*gamma(v+0.5)
0.9589683199624672099969765
Verifying the differential equation::
>>> f = lambda z: lommels2(u,v,z)
>>> z**2*diff(f,z,2) + z*diff(f,z) + (z**2-v**2)*f(z)
0.6495190528383289850727924
>>> z**(u+1)
0.6495190528383289850727924
**References**
1. [GradshteynRyzhik]_
2. [Weisstein]_ http://mathworld.wolfram.com/LommelFunction.html
"""
appellf2 = r"""
Gives the Appell F2 hypergeometric function of two variables
.. math ::
F_2(a,b_1,b_2,c_1,c_2,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c_1)_m (c_2)_n}
\frac{x^m y^n}{m! n!}.
The series is generally absolutely convergent for `|x| + |y| < 1`.
**Examples**
Evaluation for real and complex arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> appellf2(1,2,3,4,5,0.25,0.125)
1.257417193533135344785602
>>> appellf2(1,-3,-4,2,3,2,3)
-42.8
>>> appellf2(0.5,0.25,-0.25,2,3,0.25j,0.25)
(0.9880539519421899867041719 + 0.01497616165031102661476978j)
>>> chop(appellf2(1,1+j,1-j,3j,-3j,0.25,0.25))
1.201311219287411337955192
>>> appellf2(1,1,1,4,6,0.125,16)
(-0.09455532250274744282125152 - 0.7647282253046207836769297j)
A transformation formula::
>>> a,b1,b2,c1,c2,x,y = map(mpf, [1,2,0.5,0.25,1.625,-0.125,0.125])
>>> appellf2(a,b1,b2,c1,c2,x,y)
0.2299211717841180783309688
>>> (1-x)**(-a)*appellf2(a,c1-b1,b2,c1,c2,x/(x-1),y/(1-x))
0.2299211717841180783309688
A system of partial differential equations satisfied by F2::
>>> a,b1,b2,c1,c2,x,y = map(mpf, [1,0.5,0.25,1.125,1.5,0.0625,-0.0625])
>>> F = lambda x,y: appellf2(a,b1,b2,c1,c2,x,y)
>>> chop(x*(1-x)*diff(F,(x,y),(2,0)) -
... x*y*diff(F,(x,y),(1,1)) +
... (c1-(a+b1+1)*x)*diff(F,(x,y),(1,0)) -
... b1*y*diff(F,(x,y),(0,1)) -
... a*b1*F(x,y))
0.0
>>> chop(y*(1-y)*diff(F,(x,y),(0,2)) -
... x*y*diff(F,(x,y),(1,1)) +
... (c2-(a+b2+1)*y)*diff(F,(x,y),(0,1)) -
... b2*x*diff(F,(x,y),(1,0)) -
... a*b2*F(x,y))
0.0
**References**
See references for :func:`~mpmath.appellf1`.
"""
appellf3 = r"""
Gives the Appell F3 hypergeometric function of two variables
.. math ::
F_3(a_1,a_2,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a_1)_m (a_2)_n (b_1)_m (b_2)_n}{(c)_{m+n}}
\frac{x^m y^n}{m! n!}.
The series is generally absolutely convergent for `|x| < 1, |y| < 1`.
**Examples**
Evaluation for various parameters and variables::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> appellf3(1,2,3,4,5,0.5,0.25)
2.221557778107438938158705
>>> appellf3(1,2,3,4,5,6,0); hyp2f1(1,3,5,6)
(-0.5189554589089861284537389 - 0.1454441043328607980769742j)
(-0.5189554589089861284537389 - 0.1454441043328607980769742j)
>>> appellf3(1,-2,-3,1,1,4,6)
-17.4
>>> appellf3(1,2,-3,1,1,4,6)
(17.7876136773677356641825 + 19.54768762233649126154534j)
>>> appellf3(1,2,-3,1,1,6,4)
(85.02054175067929402953645 + 148.4402528821177305173599j)
>>> chop(appellf3(1+j,2,1-j,2,3,0.25,0.25))
1.719992169545200286696007
Many transformations and evaluations for special combinations
of the parameters are possible, e.g.:
>>> a,b,c,x,y = map(mpf, [0.5,0.25,0.125,0.125,-0.125])
>>> appellf3(a,c-a,b,c-b,c,x,y)
1.093432340896087107444363
>>> (1-y)**(a+b-c)*hyp2f1(a,b,c,x+y-x*y)
1.093432340896087107444363
>>> x**2*appellf3(1,1,1,1,3,x,-x)
0.01568646277445385390945083
>>> polylog(2,x**2)
0.01568646277445385390945083
>>> a1,a2,b1,b2,c,x = map(mpf, [0.5,0.25,0.125,0.5,4.25,0.125])
>>> appellf3(a1,a2,b1,b2,c,x,1)
1.03947361709111140096947
>>> gammaprod([c,c-a2-b2],[c-a2,c-b2])*hyp3f2(a1,b1,c-a2-b2,c-a2,c-b2,x)
1.03947361709111140096947
The Appell F3 function satisfies a pair of partial
differential equations::
>>> a1,a2,b1,b2,c,x,y = map(mpf, [0.5,0.25,0.125,0.5,0.625,0.0625,-0.0625])
>>> F = lambda x,y: appellf3(a1,a2,b1,b2,c,x,y)
>>> chop(x*(1-x)*diff(F,(x,y),(2,0)) +
... y*diff(F,(x,y),(1,1)) +
... (c-(a1+b1+1)*x)*diff(F,(x,y),(1,0)) -
... a1*b1*F(x,y))
0.0
>>> chop(y*(1-y)*diff(F,(x,y),(0,2)) +
... x*diff(F,(x,y),(1,1)) +
... (c-(a2+b2+1)*y)*diff(F,(x,y),(0,1)) -
... a2*b2*F(x,y))
0.0
**References**
See references for :func:`~mpmath.appellf1`.
"""
appellf4 = r"""
Gives the Appell F4 hypergeometric function of two variables
.. math ::
F_4(a,b,c_1,c_2,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty}
\frac{(a)_{m+n} (b)_{m+n}}{(c_1)_m (c_2)_n}
\frac{x^m y^n}{m! n!}.
The series is generally absolutely convergent for
`\sqrt{|x|} + \sqrt{|y|} < 1`.
**Examples**
Evaluation for various parameters and arguments::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> appellf4(1,1,2,2,0.25,0.125)
1.286182069079718313546608
>>> appellf4(-2,-3,4,5,4,5)
34.8
>>> appellf4(5,4,2,3,0.25j,-0.125j)
(-0.2585967215437846642163352 + 2.436102233553582711818743j)
Reduction to `\,_2F_1` in a special case::
>>> a,b,c,x,y = map(mpf, [0.5,0.25,0.125,0.125,-0.125])
>>> appellf4(a,b,c,a+b-c+1,x*(1-y),y*(1-x))
1.129143488466850868248364
>>> hyp2f1(a,b,c,x)*hyp2f1(a,b,a+b-c+1,y)
1.129143488466850868248364
A system of partial differential equations satisfied by F4::
>>> a,b,c1,c2,x,y = map(mpf, [1,0.5,0.25,1.125,0.0625,-0.0625])
>>> F = lambda x,y: appellf4(a,b,c1,c2,x,y)
>>> chop(x*(1-x)*diff(F,(x,y),(2,0)) -
... y**2*diff(F,(x,y),(0,2)) -
... 2*x*y*diff(F,(x,y),(1,1)) +
... (c1-(a+b+1)*x)*diff(F,(x,y),(1,0)) -
... ((a+b+1)*y)*diff(F,(x,y),(0,1)) -
... a*b*F(x,y))
0.0
>>> chop(y*(1-y)*diff(F,(x,y),(0,2)) -
... x**2*diff(F,(x,y),(2,0)) -
... 2*x*y*diff(F,(x,y),(1,1)) +
... (c2-(a+b+1)*y)*diff(F,(x,y),(0,1)) -
... ((a+b+1)*x)*diff(F,(x,y),(1,0)) -
... a*b*F(x,y))
0.0
**References**
See references for :func:`~mpmath.appellf1`.
"""
zeta = r"""
Computes the Riemann zeta function
.. math ::
\zeta(s) = 1+\frac{1}{2^s}+\frac{1}{3^s}+\frac{1}{4^s}+\ldots
or, with `a \ne 1`, the more general Hurwitz zeta function
.. math ::
\zeta(s,a) = \sum_{k=0}^\infty \frac{1}{(a+k)^s}.
Optionally, ``zeta(s, a, n)`` computes the `n`-th derivative with
respect to `s`,
.. math ::
\zeta^{(n)}(s,a) = (-1)^n \sum_{k=0}^\infty \frac{\log^n(a+k)}{(a+k)^s}.
Although these series only converge for `\Re(s) > 1`, the Riemann and Hurwitz
zeta functions are defined through analytic continuation for arbitrary
complex `s \ne 1` (`s = 1` is a pole).
The implementation uses three algorithms: the Borwein algorithm for
the Riemann zeta function when `s` is close to the real line;
the Riemann-Siegel formula for the Riemann zeta function when `s` is
large imaginary, and Euler-Maclaurin summation in all other cases.
The reflection formula for `\Re(s) < 0` is implemented in some cases.
The algorithm can be chosen with ``method = 'borwein'``,
``method='riemann-siegel'`` or ``method = 'euler-maclaurin'``.
The parameter `a` is usually a rational number `a = p/q`, and may be specified
as such by passing an integer tuple `(p, q)`. Evaluation is supported for
arbitrary complex `a`, but may be slow and/or inaccurate when `\Re(s) < 0` for
nonrational `a` or when computing derivatives.
**Examples**
Some values of the Riemann zeta function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> zeta(2); pi**2 / 6
1.644934066848226436472415
1.644934066848226436472415
>>> zeta(0)
-0.5
>>> zeta(-1)
-0.08333333333333333333333333
>>> zeta(-2)
0.0
For large positive `s`, `\zeta(s)` rapidly approaches 1::
>>> zeta(50)
1.000000000000000888178421
>>> zeta(100)
1.0
>>> zeta(inf)
1.0
>>> 1-sum((zeta(k)-1)/k for k in range(2,85)); +euler
0.5772156649015328606065121
0.5772156649015328606065121
>>> nsum(lambda k: zeta(k)-1, [2, inf])
1.0
Evaluation is supported for complex `s` and `a`:
>>> zeta(-3+4j)
(-0.03373057338827757067584698 + 0.2774499251557093745297677j)
>>> zeta(2+3j, -1+j)
(389.6841230140842816370741 + 295.2674610150305334025962j)
The Riemann zeta function has so-called nontrivial zeros on
the critical line `s = 1/2 + it`::
>>> findroot(zeta, 0.5+14j); zetazero(1)
(0.5 + 14.13472514173469379045725j)
(0.5 + 14.13472514173469379045725j)
>>> findroot(zeta, 0.5+21j); zetazero(2)
(0.5 + 21.02203963877155499262848j)
(0.5 + 21.02203963877155499262848j)
>>> findroot(zeta, 0.5+25j); zetazero(3)
(0.5 + 25.01085758014568876321379j)
(0.5 + 25.01085758014568876321379j)
>>> chop(zeta(zetazero(10)))
0.0
Evaluation on and near the critical line is supported for large
heights `t` by means of the Riemann-Siegel formula (currently
for `a = 1`, `n \le 4`)::
>>> zeta(0.5+100000j)
(1.073032014857753132114076 + 5.780848544363503984261041j)
>>> zeta(0.75+1000000j)
(0.9535316058375145020351559 + 0.9525945894834273060175651j)
>>> zeta(0.5+10000000j)
(11.45804061057709254500227 - 8.643437226836021723818215j)
>>> zeta(0.5+100000000j, derivative=1)
(51.12433106710194942681869 + 43.87221167872304520599418j)
>>> zeta(0.5+100000000j, derivative=2)
(-444.2760822795430400549229 - 896.3789978119185981665403j)
>>> zeta(0.5+100000000j, derivative=3)
(3230.72682687670422215339 + 14374.36950073615897616781j)
>>> zeta(0.5+100000000j, derivative=4)
(-11967.35573095046402130602 - 218945.7817789262839266148j)
>>> zeta(1+10000000j) # off the line
(2.859846483332530337008882 + 0.491808047480981808903986j)
>>> zeta(1+10000000j, derivative=1)
(-4.333835494679647915673205 - 0.08405337962602933636096103j)
>>> zeta(1+10000000j, derivative=4)
(453.2764822702057701894278 - 581.963625832768189140995j)
For investigation of the zeta function zeros, the Riemann-Siegel
Z-function is often more convenient than working with the Riemann
zeta function directly (see :func:`~mpmath.siegelz`).
Some values of the Hurwitz zeta function::
>>> zeta(2, 3); -5./4 + pi**2/6
0.3949340668482264364724152
0.3949340668482264364724152
>>> zeta(2, (3,4)); pi**2 - 8*catalan
2.541879647671606498397663
2.541879647671606498397663
For positive integer values of `s`, the Hurwitz zeta function is
equivalent to a polygamma function (except for a normalizing factor)::
>>> zeta(4, (1,5)); psi(3, '1/5')/6
625.5408324774542966919938
625.5408324774542966919938
Evaluation of derivatives::
>>> zeta(0, 3+4j, 1); loggamma(3+4j) - ln(2*pi)/2
(-2.675565317808456852310934 + 4.742664438034657928194889j)
(-2.675565317808456852310934 + 4.742664438034657928194889j)
>>> zeta(2, 1, 20)
2432902008176640000.000242
>>> zeta(3+4j, 5.5+2j, 4)
(-0.140075548947797130681075 - 0.3109263360275413251313634j)
>>> zeta(0.5+100000j, 1, 4)
(-10407.16081931495861539236 + 13777.78669862804508537384j)
>>> zeta(-100+0.5j, (1,3), derivative=4)
(4.007180821099823942702249e+79 + 4.916117957092593868321778e+78j)
Generating a Taylor series at `s = 2` using derivatives::
>>> for k in range(11): print("%s * (s-2)^%i" % (zeta(2,1,k)/fac(k), k))
...
1.644934066848226436472415 * (s-2)^0
-0.9375482543158437537025741 * (s-2)^1
0.9946401171494505117104293 * (s-2)^2
-1.000024300473840810940657 * (s-2)^3
1.000061933072352565457512 * (s-2)^4
-1.000006869443931806408941 * (s-2)^5
1.000000173233769531820592 * (s-2)^6
-0.9999999569989868493432399 * (s-2)^7
0.9999999937218844508684206 * (s-2)^8
-0.9999999996355013916608284 * (s-2)^9
1.000000000004610645020747 * (s-2)^10
Evaluation at zero and for negative integer `s`::
>>> zeta(0, 10)
-9.5
>>> zeta(-2, (2,3)); mpf(1)/81
0.01234567901234567901234568
0.01234567901234567901234568
>>> zeta(-3+4j, (5,4))
(0.2899236037682695182085988 + 0.06561206166091757973112783j)
>>> zeta(-3.25, 1/pi)
-0.0005117269627574430494396877
>>> zeta(-3.5, pi, 1)
11.156360390440003294709
>>> zeta(-100.5, (8,3))
-4.68162300487989766727122e+77
>>> zeta(-10.5, (-8,3))
(-0.01521913704446246609237979 + 29907.72510874248161608216j)
>>> zeta(-1000.5, (-8,3))
(1.031911949062334538202567e+1770 + 1.519555750556794218804724e+426j)
>>> zeta(-1+j, 3+4j)
(-16.32988355630802510888631 - 22.17706465801374033261383j)
>>> zeta(-1+j, 3+4j, 2)
(32.48985276392056641594055 - 51.11604466157397267043655j)
>>> diff(lambda s: zeta(s, 3+4j), -1+j, 2)
(32.48985276392056641594055 - 51.11604466157397267043655j)
**References**
1. http://mathworld.wolfram.com/RiemannZetaFunction.html
2. http://mathworld.wolfram.com/HurwitzZetaFunction.html
3. http://www.cecm.sfu.ca/personal/pborwein/PAPERS/P155.pdf
"""
dirichlet = r"""
Evaluates the Dirichlet L-function
.. math ::
L(s,\chi) = \sum_{k=1}^\infty \frac{\chi(k)}{k^s}.
where `\chi` is a periodic sequence of length `q` which should be supplied
in the form of a list `[\chi(0), \chi(1), \ldots, \chi(q-1)]`.
Strictly, `\chi` should be a Dirichlet character, but any periodic
sequence will work.
For example, ``dirichlet(s, [1])`` gives the ordinary
Riemann zeta function and ``dirichlet(s, [-1,1])`` gives
the alternating zeta function (Dirichlet eta function).
Also the derivative with respect to `s` (currently only a first
derivative) can be evaluated.
**Examples**
The ordinary Riemann zeta function::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> dirichlet(3, [1]); zeta(3)
1.202056903159594285399738
1.202056903159594285399738
>>> dirichlet(1, [1])
+inf
The alternating zeta function::
>>> dirichlet(1, [-1,1]); ln(2)
0.6931471805599453094172321
0.6931471805599453094172321
The following defines the Dirichlet beta function
`\beta(s) = \sum_{k=0}^\infty \frac{(-1)^k}{(2k+1)^s}` and verifies
several values of this function::
>>> B = lambda s, d=0: dirichlet(s, [0, 1, 0, -1], d)
>>> B(0); 1./2
0.5
0.5
>>> B(1); pi/4
0.7853981633974483096156609
0.7853981633974483096156609
>>> B(2); +catalan
0.9159655941772190150546035
0.9159655941772190150546035
>>> B(2,1); diff(B, 2)
0.08158073611659279510291217
0.08158073611659279510291217
>>> B(-1,1); 2*catalan/pi
0.5831218080616375602767689
0.5831218080616375602767689
>>> B(0,1); log(gamma(0.25)**2/(2*pi*sqrt(2)))
0.3915943927068367764719453
0.3915943927068367764719454
>>> B(1,1); 0.25*pi*(euler+2*ln2+3*ln(pi)-4*ln(gamma(0.25)))
0.1929013167969124293631898
0.1929013167969124293631898
A custom L-series of period 3::
>>> dirichlet(2, [2,0,1])
0.7059715047839078092146831
>>> 2*nsum(lambda k: (3*k)**-2, [1,inf]) + \
... nsum(lambda k: (3*k+2)**-2, [0,inf])
0.7059715047839078092146831
"""
coulombf = r"""
Calculates the regular Coulomb wave function
.. math ::
F_l(\eta,z) = C_l(\eta) z^{l+1} e^{-iz} \,_1F_1(l+1-i\eta, 2l+2, 2iz)
where the normalization constant `C_l(\eta)` is as calculated by
:func:`~mpmath.coulombc`. This function solves the differential equation
.. math ::
f''(z) + \left(1-\frac{2\eta}{z}-\frac{l(l+1)}{z^2}\right) f(z) = 0.
A second linearly independent solution is given by the irregular
Coulomb wave function `G_l(\eta,z)` (see :func:`~mpmath.coulombg`)
and thus the general solution is
`f(z) = C_1 F_l(\eta,z) + C_2 G_l(\eta,z)` for arbitrary
constants `C_1`, `C_2`.
Physically, the Coulomb wave functions give the radial solution
to the Schrodinger equation for a point particle in a `1/z` potential; `z` is
then the radius and `l`, `\eta` are quantum numbers.
The Coulomb wave functions with real parameters are defined
in Abramowitz & Stegun, section 14. However, all parameters are permitted
to be complex in this implementation (see references).
**Plots**
.. literalinclude :: /modules/mpmath/plots/coulombf.py
.. image :: /modules/mpmath/plots/coulombf.png
.. literalinclude :: /modules/mpmath/plots/coulombf_c.py
.. image :: /modules/mpmath/plots/coulombf_c.png
**Examples**
Evaluation is supported for arbitrary magnitudes of `z`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> coulombf(2, 1.5, 3.5)
0.4080998961088761187426445
>>> coulombf(-2, 1.5, 3.5)
0.7103040849492536747533465
>>> coulombf(2, 1.5, '1e-10')
4.143324917492256448770769e-33
>>> coulombf(2, 1.5, 1000)
0.4482623140325567050716179
>>> coulombf(2, 1.5, 10**10)
-0.066804196437694360046619
Verifying the differential equation::
>>> l, eta, z = 2, 3, mpf(2.75)
>>> A, B = 1, 2
>>> f = lambda z: A*coulombf(l,eta,z) + B*coulombg(l,eta,z)
>>> chop(diff(f,z,2) + (1-2*eta/z - l*(l+1)/z**2)*f(z))
0.0
A Wronskian relation satisfied by the Coulomb wave functions::
>>> l = 2
>>> eta = 1.5
>>> F = lambda z: coulombf(l,eta,z)
>>> G = lambda z: coulombg(l,eta,z)
>>> for z in [3.5, -1, 2+3j]:
... chop(diff(F,z)*G(z) - F(z)*diff(G,z))
...
1.0
1.0
1.0
Another Wronskian relation::
>>> F = coulombf
>>> G = coulombg
>>> for z in [3.5, -1, 2+3j]:
... chop(F(l-1,eta,z)*G(l,eta,z)-F(l,eta,z)*G(l-1,eta,z) - l/sqrt(l**2+eta**2))
...
0.0
0.0
0.0
An integral identity connecting the regular and irregular wave functions::
>>> l, eta, z = 4+j, 2-j, 5+2j
>>> coulombf(l,eta,z) + j*coulombg(l,eta,z)
(0.7997977752284033239714479 + 0.9294486669502295512503127j)
>>> g = lambda t: exp(-t)*t**(l-j*eta)*(t+2*j*z)**(l+j*eta)
>>> j*exp(-j*z)*z**(-l)/fac(2*l+1)/coulombc(l,eta)*quad(g, [0,inf])
(0.7997977752284033239714479 + 0.9294486669502295512503127j)
Some test case with complex parameters, taken from Michel [2]::
>>> mp.dps = 15
>>> coulombf(1+0.1j, 50+50j, 100.156)
(-1.02107292320897e+15 - 2.83675545731519e+15j)
>>> coulombg(1+0.1j, 50+50j, 100.156)
(2.83675545731519e+15 - 1.02107292320897e+15j)
>>> coulombf(1e-5j, 10+1e-5j, 0.1+1e-6j)
(4.30566371247811e-14 - 9.03347835361657e-19j)
>>> coulombg(1e-5j, 10+1e-5j, 0.1+1e-6j)
(778709182061.134 + 18418936.2660553j)
The following reproduces a table in Abramowitz & Stegun, at twice
the precision::
>>> mp.dps = 10
>>> eta = 2; z = 5
>>> for l in [5, 4, 3, 2, 1, 0]:
... print("%s %s %s" % (l, coulombf(l,eta,z),
... diff(lambda z: coulombf(l,eta,z), z)))
...
5 0.09079533488 0.1042553261
4 0.2148205331 0.2029591779
3 0.4313159311 0.320534053
2 0.7212774133 0.3952408216
1 0.9935056752 0.3708676452
0 1.143337392 0.2937960375
**References**
1. I.J. Thompson & A.R. Barnett, "Coulomb and Bessel Functions of Complex
Arguments and Order", J. Comp. Phys., vol 64, no. 2, June 1986.
2. N. Michel, "Precise Coulomb wave functions for a wide range of
complex `l`, `\eta` and `z`", http://arxiv.org/abs/physics/0702051v1
"""
coulombg = r"""
Calculates the irregular Coulomb wave function
.. math ::
G_l(\eta,z) = \frac{F_l(\eta,z) \cos(\chi) - F_{-l-1}(\eta,z)}{\sin(\chi)}
where `\chi = \sigma_l - \sigma_{-l-1} - (l+1/2) \pi`
and `\sigma_l(\eta) = (\ln \Gamma(1+l+i\eta)-\ln \Gamma(1+l-i\eta))/(2i)`.
See :func:`~mpmath.coulombf` for additional information.
**Plots**
.. literalinclude :: /modules/mpmath/plots/coulombg.py
.. image :: /modules/mpmath/plots/coulombg.png
.. literalinclude :: /modules/mpmath/plots/coulombg_c.py
.. image :: /modules/mpmath/plots/coulombg_c.png
**Examples**
Evaluation is supported for arbitrary magnitudes of `z`::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> coulombg(-2, 1.5, 3.5)
1.380011900612186346255524
>>> coulombg(2, 1.5, 3.5)
1.919153700722748795245926
>>> coulombg(-2, 1.5, '1e-10')
201126715824.7329115106793
>>> coulombg(-2, 1.5, 1000)
0.1802071520691149410425512
>>> coulombg(-2, 1.5, 10**10)
0.652103020061678070929794
The following reproduces a table in Abramowitz & Stegun,
at twice the precision::
>>> mp.dps = 10
>>> eta = 2; z = 5
>>> for l in [1, 2, 3, 4, 5]:
... print("%s %s %s" % (l, coulombg(l,eta,z),
... -diff(lambda z: coulombg(l,eta,z), z)))
...
1 1.08148276 0.6028279961
2 1.496877075 0.5661803178
3 2.048694714 0.7959909551
4 3.09408669 1.731802374
5 5.629840456 4.549343289
Evaluation close to the singularity at `z = 0`::
>>> mp.dps = 15
>>> coulombg(0,10,1)
3088184933.67358
>>> coulombg(0,10,'1e-10')
5554866000719.8
>>> coulombg(0,10,'1e-100')
5554866221524.1
Evaluation with a half-integer value for `l`::
>>> coulombg(1.5, 1, 10)
0.852320038297334
"""
coulombc = r"""
Gives the normalizing Gamow constant for Coulomb wave functions,
.. math ::
C_l(\eta) = 2^l \exp\left(-\pi \eta/2 + [\ln \Gamma(1+l+i\eta) +
\ln \Gamma(1+l-i\eta)]/2 - \ln \Gamma(2l+2)\right),
where the log gamma function with continuous imaginary part
away from the negative half axis (see :func:`~mpmath.loggamma`) is implied.
This function is used internally for the calculation of
Coulomb wave functions, and automatically cached to make multiple
evaluations with fixed `l`, `\eta` fast.
"""
ellipfun = r"""
Computes any of the Jacobi elliptic functions, defined
in terms of Jacobi theta functions as
.. math ::
\mathrm{sn}(u,m) = \frac{\vartheta_3(0,q)}{\vartheta_2(0,q)}
\frac{\vartheta_1(t,q)}{\vartheta_4(t,q)}
\mathrm{cn}(u,m) = \frac{\vartheta_4(0,q)}{\vartheta_2(0,q)}
\frac{\vartheta_2(t,q)}{\vartheta_4(t,q)}
\mathrm{dn}(u,m) = \frac{\vartheta_4(0,q)}{\vartheta_3(0,q)}
\frac{\vartheta_3(t,q)}{\vartheta_4(t,q)},
or more generally computes a ratio of two such functions. Here
`t = u/\vartheta_3(0,q)^2`, and `q = q(m)` denotes the nome (see
:func:`~mpmath.nome`). Optionally, you can specify the nome directly
instead of `m` by passing ``q=<value>``, or you can directly
specify the elliptic parameter `k` with ``k=<value>``.
The first argument should be a two-character string specifying the
function using any combination of ``'s'``, ``'c'``, ``'d'``, ``'n'``. These
letters respectively denote the basic functions
`\mathrm{sn}(u,m)`, `\mathrm{cn}(u,m)`, `\mathrm{dn}(u,m)`, and `1`.
The identifier specifies the ratio of two such functions.
For example, ``'ns'`` identifies the function
.. math ::
\mathrm{ns}(u,m) = \frac{1}{\mathrm{sn}(u,m)}
and ``'cd'`` identifies the function
.. math ::
\mathrm{cd}(u,m) = \frac{\mathrm{cn}(u,m)}{\mathrm{dn}(u,m)}.
If called with only the first argument, a function object
evaluating the chosen function for given arguments is returned.
**Examples**
Basic evaluation::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> ellipfun('cd', 3.5, 0.5)
-0.9891101840595543931308394
>>> ellipfun('cd', 3.5, q=0.25)
0.07111979240214668158441418
The sn-function is doubly periodic in the complex plane with periods
`4 K(m)` and `2 i K(1-m)` (see :func:`~mpmath.ellipk`)::
>>> sn = ellipfun('sn')
>>> sn(2, 0.25)
0.9628981775982774425751399
>>> sn(2+4*ellipk(0.25), 0.25)
0.9628981775982774425751399
>>> chop(sn(2+2*j*ellipk(1-0.25), 0.25))
0.9628981775982774425751399
The cn-function is doubly periodic with periods `4 K(m)` and `4 i K(1-m)`::
>>> cn = ellipfun('cn')
>>> cn(2, 0.25)
-0.2698649654510865792581416
>>> cn(2+4*ellipk(0.25), 0.25)
-0.2698649654510865792581416
>>> chop(cn(2+4*j*ellipk(1-0.25), 0.25))
-0.2698649654510865792581416
The dn-function is doubly periodic with periods `2 K(m)` and `4 i K(1-m)`::
>>> dn = ellipfun('dn')
>>> dn(2, 0.25)
0.8764740583123262286931578
>>> dn(2+2*ellipk(0.25), 0.25)
0.8764740583123262286931578
>>> chop(dn(2+4*j*ellipk(1-0.25), 0.25))
0.8764740583123262286931578
"""
jtheta = r"""
Computes the Jacobi theta function `\vartheta_n(z, q)`, where
`n = 1, 2, 3, 4`, defined by the infinite series:
.. math ::
\vartheta_1(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty}
(-1)^n q^{n^2+n\,} \sin((2n+1)z)
\vartheta_2(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty}
q^{n^{2\,} + n} \cos((2n+1)z)
\vartheta_3(z,q) = 1 + 2 \sum_{n=1}^{\infty}
q^{n^2\,} \cos(2 n z)
\vartheta_4(z,q) = 1 + 2 \sum_{n=1}^{\infty}
(-q)^{n^2\,} \cos(2 n z)
The theta functions are functions of two variables:
* `z` is the *argument*, an arbitrary real or complex number
* `q` is the *nome*, which must be a real or complex number
in the unit disk (i.e. `|q| < 1`). For `|q| \ll 1`, the
series converge very quickly, so the Jacobi theta functions
can efficiently be evaluated to high precision.
The compact notations `\vartheta_n(q) = \vartheta_n(0,q)`
and `\vartheta_n = \vartheta_n(0,q)` are also frequently
encountered. Finally, Jacobi theta functions are frequently
considered as functions of the half-period ratio `\tau`
and then usually denoted by `\vartheta_n(z|\tau)`.
Optionally, ``jtheta(n, z, q, derivative=d)`` with `d > 0` computes
a `d`-th derivative with respect to `z`.
**Examples and basic properties**
Considered as functions of `z`, the Jacobi theta functions may be
viewed as generalizations of the ordinary trigonometric functions
cos and sin. They are periodic functions::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> jtheta(1, 0.25, '0.2')
0.2945120798627300045053104
>>> jtheta(1, 0.25 + 2*pi, '0.2')
0.2945120798627300045053104
Indeed, the series defining the theta functions are essentially
trigonometric Fourier series. The coefficients can be retrieved
using :func:`~mpmath.fourier`::
>>> mp.dps = 10
>>> nprint(fourier(lambda x: jtheta(2, x, 0.5), [-pi, pi], 4))
([0.0, 1.68179, 0.0, 0.420448, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0])
The Jacobi theta functions are also so-called quasiperiodic
functions of `z` and `\tau`, meaning that for fixed `\tau`,
`\vartheta_n(z, q)` and `\vartheta_n(z+\pi \tau, q)` are the same
except for an exponential factor::
>>> mp.dps = 25
>>> tau = 3*j/10
>>> q = exp(pi*j*tau)
>>> z = 10
>>> jtheta(4, z+tau*pi, q)
(-0.682420280786034687520568 + 1.526683999721399103332021j)
>>> -exp(-2*j*z)/q * jtheta(4, z, q)
(-0.682420280786034687520568 + 1.526683999721399103332021j)
The Jacobi theta functions satisfy a huge number of other
functional equations, such as the following identity (valid for
any `q`)::
>>> q = mpf(3)/10
>>> jtheta(3,0,q)**4
6.823744089352763305137427
>>> jtheta(2,0,q)**4 + jtheta(4,0,q)**4
6.823744089352763305137427
Extensive listings of identities satisfied by the Jacobi theta
functions can be found in standard reference works.
The Jacobi theta functions are related to the gamma function
for special arguments::
>>> jtheta(3, 0, exp(-pi))
1.086434811213308014575316
>>> pi**(1/4.) / gamma(3/4.)
1.086434811213308014575316
:func:`~mpmath.jtheta` supports arbitrary precision evaluation and complex
arguments::
>>> mp.dps = 50
>>> jtheta(4, sqrt(2), 0.5)
2.0549510717571539127004115835148878097035750653737
>>> mp.dps = 25
>>> jtheta(4, 1+2j, (1+j)/5)
(7.180331760146805926356634 - 1.634292858119162417301683j)
Evaluation of derivatives::
>>> mp.dps = 25
>>> jtheta(1, 7, 0.25, 1); diff(lambda z: jtheta(1, z, 0.25), 7)
1.209857192844475388637236
1.209857192844475388637236
>>> jtheta(1, 7, 0.25, 2); diff(lambda z: jtheta(1, z, 0.25), 7, 2)
-0.2598718791650217206533052
-0.2598718791650217206533052
>>> jtheta(2, 7, 0.25, 1); diff(lambda z: jtheta(2, z, 0.25), 7)
-1.150231437070259644461474
-1.150231437070259644461474
>>> jtheta(2, 7, 0.25, 2); diff(lambda z: jtheta(2, z, 0.25), 7, 2)
-0.6226636990043777445898114
-0.6226636990043777445898114
>>> jtheta(3, 7, 0.25, 1); diff(lambda z: jtheta(3, z, 0.25), 7)
-0.9990312046096634316587882
-0.9990312046096634316587882
>>> jtheta(3, 7, 0.25, 2); diff(lambda z: jtheta(3, z, 0.25), 7, 2)
-0.1530388693066334936151174
-0.1530388693066334936151174
>>> jtheta(4, 7, 0.25, 1); diff(lambda z: jtheta(4, z, 0.25), 7)
0.9820995967262793943571139
0.9820995967262793943571139
>>> jtheta(4, 7, 0.25, 2); diff(lambda z: jtheta(4, z, 0.25), 7, 2)
0.3936902850291437081667755
0.3936902850291437081667755
**Possible issues**
For `|q| \ge 1` or `\Im(\tau) \le 0`, :func:`~mpmath.jtheta` raises
``ValueError``. This exception is also raised for `|q|` extremely
close to 1 (or equivalently `\tau` very close to 0), since the
series would converge too slowly::
>>> jtheta(1, 10, 0.99999999 * exp(0.5*j))
Traceback (most recent call last):
...
ValueError: abs(q) > THETA_Q_LIM = 1.000000
"""
eulernum = r"""
Gives the `n`-th Euler number, defined as the `n`-th derivative of
`\mathrm{sech}(t) = 1/\cosh(t)` evaluated at `t = 0`. Equivalently, the
Euler numbers give the coefficients of the Taylor series
.. math ::
\mathrm{sech}(t) = \sum_{n=0}^{\infty} \frac{E_n}{n!} t^n.
The Euler numbers are closely related to Bernoulli numbers
and Bernoulli polynomials. They can also be evaluated in terms of
Euler polynomials (see :func:`~mpmath.eulerpoly`) as `E_n = 2^n E_n(1/2)`.
**Examples**
Computing the first few Euler numbers and verifying that they
agree with the Taylor series::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> [eulernum(n) for n in range(11)]
[1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -61.0, 0.0, 1385.0, 0.0, -50521.0]
>>> chop(diffs(sech, 0, 10))
[1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -61.0, 0.0, 1385.0, 0.0, -50521.0]
Euler numbers grow very rapidly. :func:`~mpmath.eulernum` efficiently
computes numerical approximations for large indices::
>>> eulernum(50)
-6.053285248188621896314384e+54
>>> eulernum(1000)
3.887561841253070615257336e+2371
>>> eulernum(10**20)
4.346791453661149089338186e+1936958564106659551331
Comparing with an asymptotic formula for the Euler numbers::
>>> n = 10**5
>>> (-1)**(n//2) * 8 * sqrt(n/(2*pi)) * (2*n/(pi*e))**n
3.69919063017432362805663e+436961
>>> eulernum(n)
3.699193712834466537941283e+436961
Pass ``exact=True`` to obtain exact values of Euler numbers as integers::
>>> print(eulernum(50, exact=True))
-6053285248188621896314383785111649088103498225146815121
>>> print(eulernum(200, exact=True) % 10**10)
1925859625
>>> eulernum(1001, exact=True)
0
"""
eulerpoly = r"""
Evaluates the Euler polynomial `E_n(z)`, defined by the generating function
representation
.. math ::
\frac{2e^{zt}}{e^t+1} = \sum_{n=0}^\infty E_n(z) \frac{t^n}{n!}.
The Euler polynomials may also be represented in terms of
Bernoulli polynomials (see :func:`~mpmath.bernpoly`) using various formulas, for
example
.. math ::
E_n(z) = \frac{2}{n+1} \left(
B_n(z)-2^{n+1}B_n\left(\frac{z}{2}\right)
\right).
Special values include the Euler numbers `E_n = 2^n E_n(1/2)` (see
:func:`~mpmath.eulernum`).
**Examples**
Computing the coefficients of the first few Euler polynomials::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> for n in range(6):
... chop(taylor(lambda z: eulerpoly(n,z), 0, n))
...
[1.0]
[-0.5, 1.0]
[0.0, -1.0, 1.0]
[0.25, 0.0, -1.5, 1.0]
[0.0, 1.0, 0.0, -2.0, 1.0]
[-0.5, 0.0, 2.5, 0.0, -2.5, 1.0]
Evaluation for arbitrary `z`::
>>> eulerpoly(2,3)
6.0
>>> eulerpoly(5,4)
423.5
>>> eulerpoly(35, 11111111112)
3.994957561486776072734601e+351
>>> eulerpoly(4, 10+20j)
(-47990.0 - 235980.0j)
>>> eulerpoly(2, '-3.5e-5')
0.000035001225
>>> eulerpoly(3, 0.5)
0.0
>>> eulerpoly(55, -10**80)
-1.0e+4400
>>> eulerpoly(5, -inf)
-inf
>>> eulerpoly(6, -inf)
+inf
Computing Euler numbers::
>>> 2**26 * eulerpoly(26,0.5)
-4087072509293123892361.0
>>> eulernum(26)
-4087072509293123892361.0
Evaluation is accurate for large `n` and small `z`::
>>> eulerpoly(100, 0.5)
2.29047999988194114177943e+108
>>> eulerpoly(1000, 10.5)
3.628120031122876847764566e+2070
>>> eulerpoly(10000, 10.5)
1.149364285543783412210773e+30688
"""
spherharm = r"""
Evaluates the spherical harmonic `Y_l^m(\theta,\phi)`,
.. math ::
Y_l^m(\theta,\phi) = \sqrt{\frac{2l+1}{4\pi}\frac{(l-m)!}{(l+m)!}}
P_l^m(\cos \theta) e^{i m \phi}
where `P_l^m` is an associated Legendre function (see :func:`~mpmath.legenp`).
Here `\theta \in [0, \pi]` denotes the polar coordinate (ranging
from the north pole to the south pole) and `\phi \in [0, 2 \pi]` denotes the
azimuthal coordinate on a sphere. Care should be used since many different
conventions for spherical coordinate variables are used.
Usually spherical harmonics are considered for `l \in \mathbb{N}`,
`m \in \mathbb{Z}`, `|m| \le l`. More generally, `l,m,\theta,\phi`
are permitted to be complex numbers.
.. note ::
:func:`~mpmath.spherharm` returns a complex number, even the value is
purely real.
**Plots**
.. literalinclude :: /modules/mpmath/plots/spherharm40.py
`Y_{4,0}`:
.. image :: /modules/mpmath/plots/spherharm40.png
`Y_{4,1}`:
.. image :: /modules/mpmath/plots/spherharm41.png
`Y_{4,2}`:
.. image :: /modules/mpmath/plots/spherharm42.png
`Y_{4,3}`:
.. image :: /modules/mpmath/plots/spherharm43.png
`Y_{4,4}`:
.. image :: /modules/mpmath/plots/spherharm44.png
**Examples**
Some low-order spherical harmonics with reference values::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> theta = pi/4
>>> phi = pi/3
>>> spherharm(0,0,theta,phi); 0.5*sqrt(1/pi)*expj(0)
(0.2820947917738781434740397 + 0.0j)
(0.2820947917738781434740397 + 0.0j)
>>> spherharm(1,-1,theta,phi); 0.5*sqrt(3/(2*pi))*expj(-phi)*sin(theta)
(0.1221506279757299803965962 - 0.2115710938304086076055298j)
(0.1221506279757299803965962 - 0.2115710938304086076055298j)
>>> spherharm(1,0,theta,phi); 0.5*sqrt(3/pi)*cos(theta)*expj(0)
(0.3454941494713354792652446 + 0.0j)
(0.3454941494713354792652446 + 0.0j)
>>> spherharm(1,1,theta,phi); -0.5*sqrt(3/(2*pi))*expj(phi)*sin(theta)
(-0.1221506279757299803965962 - 0.2115710938304086076055298j)
(-0.1221506279757299803965962 - 0.2115710938304086076055298j)
With the normalization convention used, the spherical harmonics are orthonormal
on the unit sphere::
>>> sphere = [0,pi], [0,2*pi]
>>> dS = lambda t,p: fp.sin(t) # differential element
>>> Y1 = lambda t,p: fp.spherharm(l1,m1,t,p)
>>> Y2 = lambda t,p: fp.conj(fp.spherharm(l2,m2,t,p))
>>> l1 = l2 = 3; m1 = m2 = 2
>>> print(fp.quad(lambda t,p: Y1(t,p)*Y2(t,p)*dS(t,p), *sphere))
(1+0j)
>>> m2 = 1 # m1 != m2
>>> print(fp.chop(fp.quad(lambda t,p: Y1(t,p)*Y2(t,p)*dS(t,p), *sphere)))
0.0
Evaluation is accurate for large orders::
>>> spherharm(1000,750,0.5,0.25)
(3.776445785304252879026585e-102 - 5.82441278771834794493484e-102j)
Evaluation works with complex parameter values::
>>> spherharm(1+j, 2j, 2+3j, -0.5j)
(64.44922331113759992154992 + 1981.693919841408089681743j)
"""
scorergi = r"""
Evaluates the Scorer function
.. math ::
\operatorname{Gi}(z) =
\operatorname{Ai}(z) \int_0^z \operatorname{Bi}(t) dt +
\operatorname{Bi}(z) \int_z^{\infty} \operatorname{Ai}(t) dt
which gives a particular solution to the inhomogeneous Airy
differential equation `f''(z) - z f(z) = 1/\pi`. Another
particular solution is given by the Scorer Hi-function
(:func:`~mpmath.scorerhi`). The two functions are related as
`\operatorname{Gi}(z) + \operatorname{Hi}(z) = \operatorname{Bi}(z)`.
**Plots**
.. literalinclude :: /modules/mpmath/plots/gi.py
.. image :: /modules/mpmath/plots/gi.png
.. literalinclude :: /modules/mpmath/plots/gi_c.py
.. image :: /modules/mpmath/plots/gi_c.png
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> scorergi(0); 1/(power(3,'7/6')*gamma('2/3'))
0.2049755424820002450503075
0.2049755424820002450503075
>>> diff(scorergi, 0); 1/(power(3,'5/6')*gamma('1/3'))
0.1494294524512754526382746
0.1494294524512754526382746
>>> scorergi(+inf); scorergi(-inf)
0.0
0.0
>>> scorergi(1)
0.2352184398104379375986902
>>> scorergi(-1)
-0.1166722172960152826494198
Evaluation for large arguments::
>>> scorergi(10)
0.03189600510067958798062034
>>> scorergi(100)
0.003183105228162961476590531
>>> scorergi(1000000)
0.0000003183098861837906721743873
>>> 1/(pi*1000000)
0.0000003183098861837906715377675
>>> scorergi(-1000)
-0.08358288400262780392338014
>>> scorergi(-100000)
0.02886866118619660226809581
>>> scorergi(50+10j)
(0.0061214102799778578790984 - 0.001224335676457532180747917j)
>>> scorergi(-50-10j)
(5.236047850352252236372551e+29 - 3.08254224233701381482228e+29j)
>>> scorergi(100000j)
(-8.806659285336231052679025e+6474077 + 8.684731303500835514850962e+6474077j)
Verifying the connection between Gi and Hi::
>>> z = 0.25
>>> scorergi(z) + scorerhi(z)
0.7287469039362150078694543
>>> airybi(z)
0.7287469039362150078694543
Verifying the differential equation::
>>> for z in [-3.4, 0, 2.5, 1+2j]:
... chop(diff(scorergi,z,2) - z*scorergi(z))
...
-0.3183098861837906715377675
-0.3183098861837906715377675
-0.3183098861837906715377675
-0.3183098861837906715377675
Verifying the integral representation::
>>> z = 0.5
>>> scorergi(z)
0.2447210432765581976910539
>>> Ai,Bi = airyai,airybi
>>> Bi(z)*(Ai(inf,-1)-Ai(z,-1)) + Ai(z)*(Bi(z,-1)-Bi(0,-1))
0.2447210432765581976910539
**References**
1. [DLMF]_ section 9.12: Scorer Functions
"""
scorerhi = r"""
Evaluates the second Scorer function
.. math ::
\operatorname{Hi}(z) =
\operatorname{Bi}(z) \int_{-\infty}^z \operatorname{Ai}(t) dt -
\operatorname{Ai}(z) \int_{-\infty}^z \operatorname{Bi}(t) dt
which gives a particular solution to the inhomogeneous Airy
differential equation `f''(z) - z f(z) = 1/\pi`. See also
:func:`~mpmath.scorergi`.
**Plots**
.. literalinclude :: /modules/mpmath/plots/hi.py
.. image :: /modules/mpmath/plots/hi.png
.. literalinclude :: /modules/mpmath/plots/hi_c.py
.. image :: /modules/mpmath/plots/hi_c.png
**Examples**
Some values and limits::
>>> from mpmath import *
>>> mp.dps = 25; mp.pretty = True
>>> scorerhi(0); 2/(power(3,'7/6')*gamma('2/3'))
0.4099510849640004901006149
0.4099510849640004901006149
>>> diff(scorerhi,0); 2/(power(3,'5/6')*gamma('1/3'))
0.2988589049025509052765491
0.2988589049025509052765491
>>> scorerhi(+inf); scorerhi(-inf)
+inf
0.0
>>> scorerhi(1)
0.9722051551424333218376886
>>> scorerhi(-1)
0.2206696067929598945381098
Evaluation for large arguments::
>>> scorerhi(10)
455641153.5163291358991077
>>> scorerhi(100)
6.041223996670201399005265e+288
>>> scorerhi(1000000)
7.138269638197858094311122e+289529652
>>> scorerhi(-10)
0.0317685352825022727415011
>>> scorerhi(-100)
0.003183092495767499864680483
>>> scorerhi(100j)
(-6.366197716545672122983857e-9 + 0.003183098861710582761688475j)
>>> scorerhi(50+50j)
(-5.322076267321435669290334e+63 + 1.478450291165243789749427e+65j)
>>> scorerhi(-1000-1000j)
(0.0001591549432510502796565538 - 0.000159154943091895334973109j)
Verifying the differential equation::
>>> for z in [-3.4, 0, 2, 1+2j]:
... chop(diff(scorerhi,z,2) - z*scorerhi(z))
...
0.3183098861837906715377675
0.3183098861837906715377675
0.3183098861837906715377675
0.3183098861837906715377675
Verifying the integral representation::
>>> z = 0.5
>>> scorerhi(z)
0.6095559998265972956089949
>>> Ai,Bi = airyai,airybi
>>> Bi(z)*(Ai(z,-1)-Ai(-inf,-1)) - Ai(z)*(Bi(z,-1)-Bi(-inf,-1))
0.6095559998265972956089949
"""
| [
"kevin.m.smyth@gmail.com"
] | kevin.m.smyth@gmail.com |
61bbf79465067d63c29ee60dc6d48f4dca794443 | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/sample/object_attr_get-105.py | 667ce2fd6395edeaed0eb054576d151fe61bfa6f | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 183 | py | class A(object):
a:int = 42
class B(A):
b:bool = True
def __init__(self:"B"):
print("B")
a:A = None
b:B = None
a = b = B()
print(a.a)
print($Exp.a)
print(b.b)
| [
"647530+Virtlink@users.noreply.github.com"
] | 647530+Virtlink@users.noreply.github.com |
77bebce4924876a513ff4cdcd4fcae2b0bcc671e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02811/s286027298.py | f73e68742605388a14f092320e9372e089a893a6 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | k,x=list(map(int,input().split()))
if 500*k>=x:
print('Yes')
else:
print('No') | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
9c1c5043ec4afa52876e8466255b46d56f6a2136 | 3716e91c0a18a2cf0b5807cc673d95a7539b008c | /DungeonsKitgard/DeadlyDungeonRescue.py | 284f1afc14a5fc11ad02ee961f1401bbc26d1654 | [] | no_license | kiwiapple87/CodeCombat-1 | 47f0fa6d75d6d3e9fb9c28feeb6fe2648664c1aa | ce0201e5ed099193ca40afd3b7abeee5a3732387 | refs/heads/master | 2021-05-01T16:38:03.575842 | 2016-08-25T11:13:26 | 2016-08-25T11:13:26 | 66,552,813 | 1 | 0 | null | 2016-08-25T11:39:20 | 2016-08-25T11:39:18 | null | UTF-8 | Python | false | false | 1,397 | py | # http://codecombat.com/play/level/deadly-dungeon-rescue
# Сбегите из подземелья после спасения измученного крестьянина.
# Вы можете спрятаться за горгульями.
# Убийство охранников может привести к нежелательным последствиям.
# Если вы сможете собрать все сокровища, вы можете получить дополнительную награду.
self.moveUp(5)
self.moveRight(6)
self.moveDown(4)
self.moveRight(6)
self.moveDown(4)
self.attack('Torture Room Door')
self.attack('Torture Room Door')
self.attack('Torture Master')
self.attack('Torture Master')
self.moveRight(2)
self.moveDown(2)
self.moveLeft(8)
self.attack('South Vault Door')
self.attack('South Vault Door')
self.moveUp(2)
self.moveRight()
self.moveDown()
self.moveLeft(3)
self.moveRight()
self.moveUp(6)
self.moveLeft(2)
self.moveRight(2)
self.moveDown(4)
self.moveUp(4)
self.moveRight(2)
self.moveLeft(2)
self.moveDown(4)
self.moveUp(4)
self.moveDown(7)
self.moveLeft(3)
# self.moveRight(4)
# self.moveLeft(2)
self.moveUp(9)
self.moveRight(6)
self.moveDown(2)
self.attack('Exit Door')
self.attack('Exit Door')
self.moveRight(7)
# self.moveDown(4)
# self.attack('Torture Room Door')
# self.attack('Torture Room Door')
# self.moveLeft(2)
# self.moveRight(2)
| [
"vadim-job-hg@yandex.ru"
] | vadim-job-hg@yandex.ru |
b8c3b6c0035aae5cda585026ddf1459337697870 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/verbs/_shifted.py | f91e9b4d9a75c8d13f05fdca4c6f193c33d00954 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | py |
from xai.brain.wordbase.verbs._shift import _SHIFT
#calss header
class _SHIFTED(_SHIFT, ):
def __init__(self,):
_SHIFT.__init__(self)
self.name = "SHIFTED"
self.specie = 'verbs'
self.basic = "shift"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
e61e59a67298c69be410f34ad18bf220e57a3d6c | 92e8c0b58c3b005c5f74f770d2e91c7fc91cf181 | /tests/scraper/scraper_processor_run_test.py | ff2bb27705e37508ecc9a1eb61114d289785e23e | [] | no_license | xhijack/django-dynamic-scraper | d1e8ab6e68a34fdea810d84aa312f176610289af | 7b09960e66f7029f50266033848eaba81352b212 | refs/heads/master | 2021-01-16T18:09:35.081262 | 2016-07-15T23:45:32 | 2016-07-15T23:45:32 | 62,983,247 | 0 | 0 | null | 2016-07-10T05:28:29 | 2016-07-10T05:28:29 | null | UTF-8 | Python | false | false | 7,715 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import logging, os.path, unittest
from twisted.internet import reactor
from scrapy.exceptions import CloseSpider
from scraper.models import Event
from scraper.scraper_test import EventSpider, ScraperTest
from dynamic_scraper.models import SchedulerRuntime, Log
class ScraperProcessorRunTest(ScraperTest):
def setUpProcessorTest(self):
self.se_url.processors = 'pre_url'
self.se_url.proc_ctxt = "'pre_url': 'http://localhost:8010/static/site_with_processor/'"
self.se_url.save()
self.event_website.url = os.path.join(self.SERVER_URL, 'site_with_processor/event_main.html')
self.event_website.save()
def setUpProcessorTestWithDetailPageUrlPlaceholder(self):
self.se_url.processors = 'pre_url'
self.se_url.proc_ctxt = "'pre_url': 'http://localhost:8010/static/{title}/'"
self.se_url.save()
self.event_website.url = os.path.join(self.SERVER_URL, 'site_with_processor/event_main_placeholder.html')
self.event_website.save()
def test_processor(self):
self.setUpProcessorTest()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
@unittest.skip("Skipped due to unresolved problem that order of processor execution can not clearly determined.")
def test_multiple_processors_use(self):
self.setUpProcessorTest()
self.se_desc.processors = 'pre_string, post_string '
self.se_desc.proc_ctxt = "'pre_string': 'before_', 'post_string': '_after',"
self.se_desc.save()
self.run_event_spider(1)
self.assertEqual(Event.objects.get(id=1).description, 'before_Event 2 description_after')
def test_replace_processor_wrong_x_path(self):
self.setUpProcessorTest()
self.se_title.x_path = '/div[@class="class_which_is_not_there"]/text()'
self.se_title.processors = 'replace'
self.se_title.proc_ctxt = "'replace': 'This text is a replacement'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 0)
def test_replace_processor_correct_x_path(self):
self.setUpProcessorTest()
self.se_title.processors = 'replace'
self.se_title.proc_ctxt = "'replace': 'This text is a replacement'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
def test_replace_processor_unicode_replace(self):
self.setUpProcessorTest()
self.se_title.processors = 'replace'
self.se_title.proc_ctxt = "'replace': 'Replacement with beautiful unicode ❤ ☀ ★ ☂ ☻ ♞ ☯ ☭ ☢'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
def test_static_processor_wrong_x_path(self):
self.setUpProcessorTest()
self.se_title.x_path = '/div[@class="class_which_is_not_there"]/text()'
self.se_title.processors = 'static'
self.se_title.proc_ctxt = "'static': 'This text should always be there'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
def test_static_processor_empty_x_path(self):
self.setUpProcessorTest()
self.se_title.x_path = ''
self.se_title.processors = 'static'
self.se_title.proc_ctxt = "'static': 'This text should always be there'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.filter(title='This text should always be there')), 2)
def test_static_processor_correct_x_path(self):
self.setUpProcessorTest()
self.se_title.processors = 'static'
self.se_title.proc_ctxt = "'static': 'This text should always be there'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
def test_static_processor_unicode_text(self):
self.setUpProcessorTest()
self.se_title.processors = 'static'
self.se_title.proc_ctxt = "'static': 'This text should always be there ❤ ☀ ★ ☂ ☻ ♞ ☯ ☭ ☢'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.filter(title='This text should always be there ❤ ☀ ★ ☂ ☻ ♞ ☯ ☭ ☢')), 2)
def test_reg_exp(self):
self.se_desc.reg_exp = '(\d{6})'
self.se_desc.save()
self.event_website.url = os.path.join(self.SERVER_URL, 'site_with_reg_exp/event_main.html')
self.event_website.save()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 2)
self.assertEqual(Event.objects.get(title='Event 1').description, '563423')
def test_processor_with_detail_page_url_placeholder(self):
self.setUpProcessorTestWithDetailPageUrlPlaceholder()
self.run_event_spider(1)
self.assertEqual(len(Event.objects.all()), 1)
self.assertEqual(
Event.objects.get(title='site_with_processor').url,
'http://localhost:8010/static/site_with_processor/event1.html')
def test_processor_with_placeholder_mp_to_dp(self):
self.setUpProcessorTest()
self.se_desc.processors = 'post_string'
self.se_desc.proc_ctxt = "'post_string': '_START_{title}_END'"
self.se_desc.save()
self.run_event_spider(1)
self.assertEqual(Event.objects.filter(description='Event 1 description_START_Event 1_END').count(), 1)
def test_processor_with_placeholder_mp_to_dp_unicode(self):
self.event_website.url = os.path.join(self.SERVER_URL, 'site_unicode/event_main.html')
self.event_website.save()
self.se_desc.processors = 'post_string'
self.se_desc.proc_ctxt = "'post_string': '_START_{title}_END'"
self.se_desc.save()
self.run_event_spider(1)
self.assertEqual(Event.objects.filter(description='Event 1 description ♖ ☦ ✝ ❖ ➎ ♠ ♣ ♥_START_Event 1 ❤ ☀ ★ ☂ ☻ ♞ ☯ ☭ ☢_END').count(), 1)
def test_processor_with_placeholder_dp_to_mp(self):
self.setUpProcessorTest()
self.se_title.processors = 'post_string'
self.se_title.proc_ctxt = "'post_string': '_START_{description}_END'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(Event.objects.filter(title='Event 1_START_Event 1 description_END').count(), 1)
def test_processor_with_placeholder_tmp_to_mp(self):
self.setUpProcessorTest()
self.se_title.processors = 'post_string'
self.se_title.proc_ctxt = "'post_string': '_START_{extra_standard_1}_END'"
self.se_title.save()
self.run_event_spider(1)
self.assertEqual(Event.objects.filter(title='Event 1_START_Event 1_END').count(), 1)
def test_processor_with_placeholder_tmp_with_placeholder_to_mp(self):
self.setUpProcessorTest()
self.se_title.processors = 'post_string'
self.se_title.proc_ctxt = "'post_string': '_START_{extra_standard_1}_END'"
self.se_title.save()
self.se_es_1.processors = 'remove_chars'
self.se_es_1.proc_ctxt = "'remove_chars': '[0-9 ]+'"
self.se_es_1.save()
self.run_event_spider(1)
self.assertEqual(Event.objects.filter(title='Event 1_START_Event_END').count(), 1)
| [
"Holger.Drewes@googlemail.com"
] | Holger.Drewes@googlemail.com |
c7c35f722e75e78845d2f157718f044c3e4a0579 | 596e92d0d484b6e7eee6d322e72e52748fdeaa5d | /sportsdata/mma_stats/models/mma_stats_fight.py | 61535408a9967328ee4ee850f51a19c715d4cf21 | [] | no_license | scottypate/sportsdata | f5f61ddc7eb482883f93737c6ce73dd814ed4336 | a07955ab50bf4fff1ce114ed9895095ff770c473 | refs/heads/main | 2023-08-18T16:51:56.452678 | 2021-10-22T12:44:08 | 2021-10-22T12:44:08 | 420,062,350 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 11,297 | py | # coding: utf-8
"""
MMA v3 Stats
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class MmaStatsFight(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'fight_id': 'int',
'order': 'int',
'status': 'str',
'weight_class': 'str',
'card_segment': 'str',
'referee': 'str',
'rounds': 'int',
'result_clock': 'int',
'result_round': 'int',
'result_type': 'str',
'winner_id': 'int',
'fighters': 'list[MmaStatsFighterInfo]',
'active': 'bool'
}
attribute_map = {
'fight_id': 'FightId',
'order': 'Order',
'status': 'Status',
'weight_class': 'WeightClass',
'card_segment': 'CardSegment',
'referee': 'Referee',
'rounds': 'Rounds',
'result_clock': 'ResultClock',
'result_round': 'ResultRound',
'result_type': 'ResultType',
'winner_id': 'WinnerId',
'fighters': 'Fighters',
'active': 'Active'
}
def __init__(self, fight_id=None, order=None, status=None, weight_class=None, card_segment=None, referee=None, rounds=None, result_clock=None, result_round=None, result_type=None, winner_id=None, fighters=None, active=None): # noqa: E501
"""MmaStatsFight - a model defined in Swagger""" # noqa: E501
self._fight_id = None
self._order = None
self._status = None
self._weight_class = None
self._card_segment = None
self._referee = None
self._rounds = None
self._result_clock = None
self._result_round = None
self._result_type = None
self._winner_id = None
self._fighters = None
self._active = None
self.discriminator = None
if fight_id is not None:
self.fight_id = fight_id
if order is not None:
self.order = order
if status is not None:
self.status = status
if weight_class is not None:
self.weight_class = weight_class
if card_segment is not None:
self.card_segment = card_segment
if referee is not None:
self.referee = referee
if rounds is not None:
self.rounds = rounds
if result_clock is not None:
self.result_clock = result_clock
if result_round is not None:
self.result_round = result_round
if result_type is not None:
self.result_type = result_type
if winner_id is not None:
self.winner_id = winner_id
if fighters is not None:
self.fighters = fighters
if active is not None:
self.active = active
@property
def fight_id(self):
"""Gets the fight_id of this MmaStatsFight. # noqa: E501
:return: The fight_id of this MmaStatsFight. # noqa: E501
:rtype: int
"""
return self._fight_id
@fight_id.setter
def fight_id(self, fight_id):
"""Sets the fight_id of this MmaStatsFight.
:param fight_id: The fight_id of this MmaStatsFight. # noqa: E501
:type: int
"""
self._fight_id = fight_id
@property
def order(self):
"""Gets the order of this MmaStatsFight. # noqa: E501
:return: The order of this MmaStatsFight. # noqa: E501
:rtype: int
"""
return self._order
@order.setter
def order(self, order):
"""Sets the order of this MmaStatsFight.
:param order: The order of this MmaStatsFight. # noqa: E501
:type: int
"""
self._order = order
@property
def status(self):
"""Gets the status of this MmaStatsFight. # noqa: E501
:return: The status of this MmaStatsFight. # noqa: E501
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this MmaStatsFight.
:param status: The status of this MmaStatsFight. # noqa: E501
:type: str
"""
self._status = status
@property
def weight_class(self):
"""Gets the weight_class of this MmaStatsFight. # noqa: E501
:return: The weight_class of this MmaStatsFight. # noqa: E501
:rtype: str
"""
return self._weight_class
@weight_class.setter
def weight_class(self, weight_class):
"""Sets the weight_class of this MmaStatsFight.
:param weight_class: The weight_class of this MmaStatsFight. # noqa: E501
:type: str
"""
self._weight_class = weight_class
@property
def card_segment(self):
"""Gets the card_segment of this MmaStatsFight. # noqa: E501
:return: The card_segment of this MmaStatsFight. # noqa: E501
:rtype: str
"""
return self._card_segment
@card_segment.setter
def card_segment(self, card_segment):
"""Sets the card_segment of this MmaStatsFight.
:param card_segment: The card_segment of this MmaStatsFight. # noqa: E501
:type: str
"""
self._card_segment = card_segment
@property
def referee(self):
"""Gets the referee of this MmaStatsFight. # noqa: E501
:return: The referee of this MmaStatsFight. # noqa: E501
:rtype: str
"""
return self._referee
@referee.setter
def referee(self, referee):
"""Sets the referee of this MmaStatsFight.
:param referee: The referee of this MmaStatsFight. # noqa: E501
:type: str
"""
self._referee = referee
@property
def rounds(self):
"""Gets the rounds of this MmaStatsFight. # noqa: E501
:return: The rounds of this MmaStatsFight. # noqa: E501
:rtype: int
"""
return self._rounds
@rounds.setter
def rounds(self, rounds):
"""Sets the rounds of this MmaStatsFight.
:param rounds: The rounds of this MmaStatsFight. # noqa: E501
:type: int
"""
self._rounds = rounds
@property
def result_clock(self):
"""Gets the result_clock of this MmaStatsFight. # noqa: E501
:return: The result_clock of this MmaStatsFight. # noqa: E501
:rtype: int
"""
return self._result_clock
@result_clock.setter
def result_clock(self, result_clock):
"""Sets the result_clock of this MmaStatsFight.
:param result_clock: The result_clock of this MmaStatsFight. # noqa: E501
:type: int
"""
self._result_clock = result_clock
@property
def result_round(self):
"""Gets the result_round of this MmaStatsFight. # noqa: E501
:return: The result_round of this MmaStatsFight. # noqa: E501
:rtype: int
"""
return self._result_round
@result_round.setter
def result_round(self, result_round):
"""Sets the result_round of this MmaStatsFight.
:param result_round: The result_round of this MmaStatsFight. # noqa: E501
:type: int
"""
self._result_round = result_round
@property
def result_type(self):
"""Gets the result_type of this MmaStatsFight. # noqa: E501
:return: The result_type of this MmaStatsFight. # noqa: E501
:rtype: str
"""
return self._result_type
@result_type.setter
def result_type(self, result_type):
"""Sets the result_type of this MmaStatsFight.
:param result_type: The result_type of this MmaStatsFight. # noqa: E501
:type: str
"""
self._result_type = result_type
@property
def winner_id(self):
"""Gets the winner_id of this MmaStatsFight. # noqa: E501
:return: The winner_id of this MmaStatsFight. # noqa: E501
:rtype: int
"""
return self._winner_id
@winner_id.setter
def winner_id(self, winner_id):
"""Sets the winner_id of this MmaStatsFight.
:param winner_id: The winner_id of this MmaStatsFight. # noqa: E501
:type: int
"""
self._winner_id = winner_id
@property
def fighters(self):
"""Gets the fighters of this MmaStatsFight. # noqa: E501
:return: The fighters of this MmaStatsFight. # noqa: E501
:rtype: list[MmaStatsFighterInfo]
"""
return self._fighters
@fighters.setter
def fighters(self, fighters):
"""Sets the fighters of this MmaStatsFight.
:param fighters: The fighters of this MmaStatsFight. # noqa: E501
:type: list[MmaStatsFighterInfo]
"""
self._fighters = fighters
@property
def active(self):
"""Gets the active of this MmaStatsFight. # noqa: E501
:return: The active of this MmaStatsFight. # noqa: E501
:rtype: bool
"""
return self._active
@active.setter
def active(self, active):
"""Sets the active of this MmaStatsFight.
:param active: The active of this MmaStatsFight. # noqa: E501
:type: bool
"""
self._active = active
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(MmaStatsFight, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, MmaStatsFight):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"scotty.pate@auth0.com"
] | scotty.pate@auth0.com |
7e2b34df685708489aa33c5b08b40994f15d6866 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/parity_20200821124709.py | 072436c7d2907623d847dd852d9c36fed3b03bda | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 214 | py | def parity(A):
# we can have two arrays one to store even and other to store odd
# that would cost memory
even = []
odd = []
for i in range(len(A)):
if A[i]%2 == 0:
parity([3,1,2,4]) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
823499cb9328a7731d079ae7523ae00c2b7341b3 | e71ecfe679dd8c800e8b0960d4ba68e19401a4fc | /PyGithub_examples/search_by_code.py | a86993ecc3ded7531443e407581a721b05c40738 | [] | no_license | igizm0/SimplePyScripts | 65740038d36aab50918ca5465e21c41c87713630 | 62c8039fbb92780c8a7fbb561ab4b86cc2185c3d | refs/heads/master | 2021-04-12T10:48:17.769548 | 2017-06-15T18:53:04 | 2017-06-15T18:53:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,000 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'ipetrash'
LOGIN = None
PASSWORD = None
# http://user:password@proxy_host:proxy_port
PROXY = None
if PROXY:
import os
os.environ['http_proxy'] = PROXY
from github import Github
gh = Github(LOGIN, PASSWORD)
# print(list(gh.search_code('requests auth github filename:.py language:python')[:5]))
search_query = 'requests auth github filename:.py language:python'
# print(gh.search_code(search_query).totalCount)
# The Search API has a custom rate limit. For requests using Basic Authentication, OAuth, or client ID and
# secret, you can make up to 30 requests per minute. For unauthenticated requests, the rate limit allows
# you to make up to 10 requests per minute.
#
# Если авторизован, то каждые 2 секунды можно слать запрос, иначе каждые 6
timeout = 2 if LOGIN and PASSWORD else 6
# Немного добавить на всякий
timeout += 0.5
import time
search_result = gh.search_code(search_query)
total_count = search_result.totalCount
page = 0
data = search_result.get_page(page)
print(data[0])
print(dir(data[0]))
print(data[0].url)
print(data[0].content)
from base64 import b64decode as base64_to_text
print(base64_to_text(data[0].content.encode()).decode())
print(data[0].html_url)
# get user from repo url
user = data[0].html_url.split('/')[3]
print(user)
# i = 1
# while total_count > 0:
# data = search_result.get_page(page)
# for result in data:
# print(i, result)
# i += 1
#
# print('page: {}, total: {}, results: {}'.format(page, total_count, len(data)))
# page += 1
# total_count -= len(data)
#
# # Задержка запросов, чтобы гитхаб не блокировал временно доступ
# time.sleep(timeout)
# i = 1
# for match in gh.search_code(search_query):
# print(i, match)
# i += 1
#
# time.sleep(timeout)
#
# # print(dir(match))
# # break
| [
"gil9red@gmail.com"
] | gil9red@gmail.com |
da700d1576d08b4500612bcf1e824f1dee1cd1a6 | ffe2e0394c3a386b61e0c2e1876149df26c64970 | /cal.py | 922ab0f65f531f1c5d5749bfb43c69c5da379a0d | [] | no_license | garethpaul/WillBeOut | 202e0ad7a12800c6008ec106c67ee7d23d256a07 | c8c40f2f71238c5a5ac6f5ce0cfb3a07e166b341 | refs/heads/master | 2016-09-05T14:02:15.648358 | 2013-01-16T17:26:43 | 2013-01-16T17:26:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,412 | py | import tornado.auth
import tornado.web
import base
import json
from cgi import escape
class CalHandler(base.BaseHandler):
@tornado.web.authenticated
def post(self):
_user_id = self.get_current_user()['id']
_user_name = self.get_current_user()['name']
_hour = self.get_argument('hour')
_day = self.get_argument('day')
_date = self.get_argument('d')
_month = self.get_argument('month')
_week = self.get_argument('week')
_string = self.get_argument('string')
# check if vote exists
c = self.db.execute(
"""INSERT INTO willbeout_times (user_id, user_name, hour, day, month, week, string, d) VALUES (%s,%s,%s,%s,%s,%s,%s,%s)""",
int(_user_id), escape(_user_name), str(_hour), str(_day), int(
_month), int(_week), str(_string), int(_date))
self.write('OK')
@tornado.web.authenticated
def get(self):
_json = []
_user_id = self.get_current_user()['id']
_wk = self.get_argument('wk')
for i in self.db.query(
"SELECT * FROM willbeout_times WHERE user_id = %s AND week = %s",
escape(_user_id), _wk):
_json.append({'day': str(i.day), 'month': i.month, 'hour':
i.hour, 'date': i.d, 'string': str(i.string)})
self.write(json.dumps(_json))
| [
"gareth@garethpaul.com"
] | gareth@garethpaul.com |
714c50c40ca01e9b0f9ed55c904b6094b746454f | 8c8c56dfd72f3de4c2637050d113a58193ee848a | /scripts/create_span_concept_dict.py | b97a17717f886df57237336e76b318c773e027cd | [] | no_license | raosudha89/amr_emnlp | 9eb83d68e4c81cd257e2f0d0ed2ac29440563ca4 | 91b3ca6526c6872fed2cdfff59ff83342353ae07 | refs/heads/master | 2021-01-10T04:59:47.845470 | 2016-02-01T00:14:31 | 2016-02-01T00:14:31 | 50,605,235 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,052 | py | import sys
import cPickle as pickle
from collections import OrderedDict
argv = sys.argv[1:]
if len(argv) < 1:
print "usage: create_span_concept_dict.py <concept_training_dataset.p>"
sys.exit()
concept_training_dataset = pickle.load(open(argv[0], "rb"))
span_concept_dict = {}
for id, concept_training_data in concept_training_dataset.iteritems():
for [span, pos, concept, name, ner] in concept_training_data:
if span_concept_dict.has_key(span):
if span_concept_dict[span].has_key(concept):
span_concept_dict[span][concept] += 1
else:
span_concept_dict[span][concept] = 1
else:
span_concept_dict[span] = {concept:1}
#Sort the concepts for each span by their frequency
for span, concepts in span_concept_dict.iteritems():
span_concept_dict[span] = OrderedDict(sorted(concepts.items(), key=lambda concepts: concepts[1], reverse=True)).items()
print_to_file = 1
if print_to_file:
for span, concepts in span_concept_dict.iteritems():
print span, concepts
pickle.dump(span_concept_dict, open("span_concept_dict.p", "wb"))
| [
"raosudha@umiacs.umd.edu"
] | raosudha@umiacs.umd.edu |
7069511abb46a5cbe8b75a04015b09471f7bea14 | 9ac90488fa5ccd5100e2593e34031d4692664e28 | /data/input_files_new_philly_dta/DTA_interface.py | 0b53ff74280c6a95b274b48b9cc2ab396d8f0bfd | [
"MIT"
] | permissive | AlanPi1992/MAC-POSTS | 18a2c9f25a06d6512a96de986c4b2d38f9f9fa32 | 4e4ed3bb6faa5ebd0aa5059b2dfff103fe8f1961 | refs/heads/master | 2021-06-27T06:18:58.116114 | 2019-05-22T21:54:45 | 2019-05-22T21:54:45 | 105,393,332 | 0 | 1 | MIT | 2018-11-26T04:45:20 | 2017-09-30T18:57:17 | Jupyter Notebook | UTF-8 | Python | false | false | 4,077 | py | import numpy as np
import os
class Link:
from1 = None
to1 = None
ID = None
linkType = None
name = None
length = np.float(0)
FFS = np.float(0)
cap = np.float(0)
RHOJ = np.float(0)
lane = None
hasCounts = False
hasSpeed = False
volume = np.float()
lambda_plus = np.float()
lambda_minus = np.float()
v = np.float()
u = np.float()
def __init__(self, re):
words = re.split()
self.ID = int(words[0])
self.linkType = words[1]
self.name = words[2]
self.from1 = int(words[3])
self.to1 = int(words[4])
self.length = np.float32(words[5])
self.FFS = np.float64(words[6])
self.cap = np.float64(words[7])
self.RHOJ = np.float64(words[8])
self.lane = int(words[9])
def isConnector(self):
return int(self.RHOJ > 9999)
def read_output(total_inverval, path):
output = dict()
link_id_list = list()
f = file(path + "record/MNM_output_record_interval_volume", 'r')
line = f.readline()
words = line.split()
num_link = len(words)
for str_link_id in words:
link_id = int(str_link_id)
output[link_id] = np.zeros(total_inverval)
link_id_list.append(link_id)
line = f.readline()
counter = 0
while line:
words = line.split()
for idx, str_link_volume in enumerate(words):
output[link_id_list[idx]][counter] = np.float(str_link_volume)
counter = counter + 1
line = f.readline()
if (counter != total_inverval):
print "Potential error"
f.close()
return output
def get_link_dic(path):
linkDic = dict()
link_log = file(path + "Philly.lin", "r").readlines()[1:]
for line in link_log:
e = Link(line)
if e.linkType == "LWRLK":
linkDic[e.ID] = e
return linkDic
def get_matrix(link_dict, output_dict, total_inverval):
output_matrix = np.zeros((len(link_dict), total_inverval + 1))
for idx, link_id in enumerate(link_dict.keys()):
output_matrix[idx][0] = link_id
output_matrix[idx, 1:total_inverval+1] = output_dict[link_id] / (link_dict[link_id].RHOJ * np.float(link_dict[link_id].lane) * link_dict[link_id].length)
return output_matrix
def read_results(total_inverval, path):
link_dict = get_link_dic(path)
output_dict = read_output(total_inverval, path)
results = get_matrix(link_dict, output_dict, total_inverval)
return results
def rewrite_conf(request, conf_name):
f = file(conf_name + "new", "w")
conf_log = file(conf_name).readlines()
for line in conf_log:
change_flag = False
for trigger, value in request.iteritems():
if line.startswith(trigger):
print "changing:", trigger
change_flag = True
f.write(str(trigger) + " = " + str(value).strip("\n\t") + "\n")
if not change_flag:
f.write(line)
def replace_conf(conf_name):
new_conf_name = conf_name + "new"
if os.path.exists(new_conf_name):
os.remove(conf_name)
os.rename(new_conf_name, conf_name)
def modify_conf(request, path):
conf_name = path + "config.conf"
rewrite_conf(request, conf_name)
replace_conf(conf_name)
def run_MNM(path):
os.system("." + path + "dta_response")
##################################################
####### main ##########
##################################################
def get_DNL_results(params):
# total_inverval = params["total_inverval"]
total_inverval = 60
start_interval = 20
request = dict()
path = "/"
request["max_interval"] = total_inverval
request["start_assign_interval"] = start_interval
modify_conf(request, path)
run_MNM(path)
results = read_results(total_inverval, path)
return results
params_local = dict()
# # params_local["total_inverval"] = 167
# # params_local["start_assign_interval"] = 0
a = get_DNL_results(params_local)
# np.savetxt("a.txt", a)
# # linkDic = dict()
# match_file = file("match_file", "w")
# link_log = file("Philly.lin", "r").readlines()[1:]
# for line in link_log:
# e = Link(line)
# if e.linkType == "LWRLK":
# linkDic[e.ID] = e
# for link in linkDic.itervalues():
# print link.ID
# match_file.write(" ".join([str(e) for e in [link.ID, link.name]]) + "\n")
| [
"lemma171@gmail.com"
] | lemma171@gmail.com |
2a60db08d08a074061f2c691d287318b4559dc11 | 7839d009f3ae0a0c1bc360b86756eba80fce284d | /build/rostest/catkin_generated/pkg.installspace.context.pc.py | 7235d889406793f9882e29d9c47b2428384ccc19 | [] | no_license | abhat91/ros_osx | b5022daea0b6fdaae3489a97fdb1793b669e64f5 | 39cd8a79788d437927a24fab05a0e8ac64b3fb33 | refs/heads/master | 2021-01-10T14:43:41.047439 | 2016-03-13T23:18:59 | 2016-03-13T23:18:59 | 53,812,264 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/opt/ros/jade/include;/usr/local/include".split(';') if "/opt/ros/jade/include;/usr/local/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-l:/usr/local/lib/libboost_system-mt.dylib;-l:/usr/local/lib/libboost_thread-mt.dylib;-l:/usr/local/lib/libboost_chrono-mt.dylib;-l:/usr/local/lib/libboost_date_time-mt.dylib;-l:/usr/local/lib/libboost_atomic-mt.dylib".split(';') if "-l:/usr/local/lib/libboost_system-mt.dylib;-l:/usr/local/lib/libboost_thread-mt.dylib;-l:/usr/local/lib/libboost_chrono-mt.dylib;-l:/usr/local/lib/libboost_date_time-mt.dylib;-l:/usr/local/lib/libboost_atomic-mt.dylib" != "" else []
PROJECT_NAME = "rostest"
PROJECT_SPACE_DIR = "/opt/ros/jade"
PROJECT_VERSION = "1.11.16"
| [
"abhat@wpi.edu"
] | abhat@wpi.edu |
3f1a432d893e46055583ae755aaef4aef96eaf57 | af67d7d0f56da5d8ac9a6fbd4b0aedcebf5a6434 | /buglab/representations/codereprs.py | 5180a587b54d6c055eb02f11cb9958ead312eccd | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | microsoft/neurips21-self-supervised-bug-detection-and-repair | 23ef751829dc90d83571cd68c8703e2c985e4521 | 4e51184a63aecd19174ee40fc6433260ab73d56e | refs/heads/main | 2023-05-23T12:23:41.870343 | 2022-01-19T12:16:19 | 2022-01-19T12:16:19 | 417,330,374 | 90 | 23 | MIT | 2022-08-30T11:54:55 | 2021-10-15T01:14:33 | Python | UTF-8 | Python | false | false | 9,586 | py | from collections import defaultdict
from pathlib import Path
from typing import Any, DefaultDict, Dict, List, Optional, Set, Tuple, Union
import libcst as cst
from libcst.metadata import CodeRange
from typing_extensions import Final
from buglab.representations.data import BugLabGraph
from buglab.utils.cstutils import PersistentMetadataWrapper, PositionFilter, subsumes_code_range
from buglab.utils.text import get_text_in_range
class DummyEntity:
__slots__ = ("value", "range")
def __init__(self, value: str, node_range: CodeRange):
self.value: Final = value
self.range: Final = node_range
TNode = Union[cst.CSTNode, DummyEntity]
class PythonCodeRelations:
def __init__(self, code_text: str, path: Path):
self.__code_text = code_text
self.__path: Final = path
self.__ast_with_metadata_wrapper = PersistentMetadataWrapper(cst.parse_module(code_text), unsafe_skip_copy=True)
self.__relations: DefaultDict[str, DefaultDict[TNode, Set[Tuple[TNode, Any]]]] = defaultdict(
lambda: defaultdict(set)
)
@property
def ast(self) -> cst.Module:
return self.__ast_with_metadata_wrapper.module
@property
def ast_with_metadata_wrapper(self) -> cst.MetadataWrapper:
return self.__ast_with_metadata_wrapper
@property
def code_text(self) -> str:
return self.__code_text
@property
def path(self) -> Path:
return self.__path
def add_relation(
self, relation_kind: str, from_node: TNode, to_node: Optional[TNode] = None, metadata: Any = None
) -> None:
assert from_node is not None
node_relations = self.__relations[relation_kind][from_node]
assert to_node is not None or metadata is not None
node_relations.add((to_node, metadata))
def __map_nodes_to_idx(self, target_range: Optional[CodeRange]) -> Dict[Any, int]:
"""Map all nodes within the target range to a unique id."""
if target_range is not None:
nodes_to_use = self.__get_nodes_within_range(target_range)
else:
nodes_to_use = None
node_to_idx: Dict[Any, int] = {}
def add_node(node):
if node not in node_to_idx:
node_to_idx[node] = len(node_to_idx)
for node_rels in self.__relations.values():
for from_node, target_nodes in node_rels.items():
if nodes_to_use is not None and isinstance(from_node, cst.CSTNode) and from_node not in nodes_to_use:
continue
elif (
nodes_to_use is not None
and isinstance(from_node, DummyEntity)
and not subsumes_code_range(from_node.range, target_range)
):
continue
add_node(from_node)
for node, metadata in target_nodes:
if isinstance(node, cst.CSTNode) and (nodes_to_use is None or node in nodes_to_use):
add_node(node)
elif isinstance(node, DummyEntity) and (
target_range is None or subsumes_code_range(node.range, target_range)
):
add_node(node)
elif node is None:
assert metadata is not None, "Both the node and the metadata is empty."
add_node(metadata)
return node_to_idx
def __get_nodes_within_range(self, range: CodeRange) -> Set[cst.CSTNode]:
visitor = PositionFilter(range)
self.__ast_with_metadata_wrapper.visit(visitor)
return visitor.nodes_within_range
def __node_to_label(self, node: Union[cst.CSTNode, DummyEntity]) -> str:
if isinstance(node, cst.Name):
return node.value
elif isinstance(node, cst.Integer):
return str(node.value)
elif isinstance(node, cst.Float):
return str(node.value)
elif isinstance(node, cst.SimpleString):
return node.value
elif isinstance(node, cst.Imaginary):
return node.value
elif isinstance(node, DummyEntity):
return node.value
elif hasattr(node, "_get_token"):
return node._get_token()
return type(node).__name__
def as_serializable(
self,
target_range: Optional[CodeRange] = None,
reference_nodes: List[cst.CSTNode] = None,
) -> Tuple[BugLabGraph, Dict[Any, int]]:
# TODO: Refactor this out of this class to allow multiple different serializable representations.
node_idxs = self.__map_nodes_to_idx(target_range)
all_nodes = [None] * len(node_idxs)
for node, idx in node_idxs.items():
assert all_nodes[idx] is None
if isinstance(node, (cst.CSTNode, DummyEntity)):
node_lbl = self.__node_to_label(node)
all_nodes[idx] = node_lbl
else:
all_nodes[idx] = self.get_abbrv_symbol_name(node)
edges = {}
for rel_type, nodes in self.__relations.items():
edges_for_rel = []
edges[rel_type] = edges_for_rel
for from_node, to_nodes in nodes.items():
from_idx = node_idxs.get(from_node)
if from_idx is None:
continue
for node, metadata in to_nodes:
if node is not None and node not in node_idxs:
continue
to_idx = node_idxs[node if node is not None else metadata]
if metadata is None or node is None:
edges_for_rel.append((from_idx, to_idx))
else:
edges_for_rel.append((from_idx, to_idx, metadata))
data = {
"nodes": all_nodes,
"edges": edges,
"path": str(self.__path),
"reference_nodes": [node_idxs.get(n) for n in reference_nodes],
"text": get_text_in_range(self.__code_text, target_range),
}
if target_range is not None:
data["code_range"] = (
(target_range.start.line, target_range.start.column),
(target_range.end.line, target_range.end.column),
)
return data, node_idxs
def get_abbrv_symbol_name(self, node: str):
local_s = node.rfind("<locals>.")
if local_s >= 0:
node = node[local_s + len("<locals>.") :]
return node
def as_dot(
self, filepath: Path, edge_colors: Optional[Dict[str, str]] = None, target_range: Optional[CodeRange] = None
):
node_idxs = self.__map_nodes_to_idx(target_range)
def escape(string: str) -> str:
return string.replace('"', '\\"')
if edge_colors is None:
edge_colors = {}
token_nodes = set()
for from_node, to_nodes in self.__relations["NextToken"].items():
token_nodes.update(t for t, m in to_nodes)
token_nodes.add(from_node)
with open(filepath, "w") as f:
f.write("digraph {\n\tcompound=true;\n")
for node, node_idx in node_idxs.items():
if node in token_nodes:
continue
if isinstance(node, (cst.CSTNode, DummyEntity)):
node_lbl = escape(self.__node_to_label(node))
f.write(f'\t node{node_idx}[shape="rectangle", label="{node_lbl}"];\n')
else:
node_lbl = escape(node)
f.write(
f'\t node{node_idx}[shape="rectangle", label="{node_lbl}", style=filled, fillcolor="orange"];\n'
)
f.write('\tsubgraph clusterNextToken {\n\tlabel="Tokens";\n\trank="same";\n')
for token_node in token_nodes:
if token_node not in node_idxs:
continue
node_lbl = escape(self.__node_to_label(token_node))
f.write(f'\t\tnode{node_idxs[token_node]}[shape="rectangle", label="{node_lbl}"];\n')
edge_color = edge_colors.get("NextToken", "black")
self.__create_dot_edges(edge_color, f, node_idxs, self.__relations["NextToken"], "NextToken")
f.write("\t}\n") # subgraph
for rel_type, nodes in self.__relations.items():
if rel_type == "NextToken":
continue
edge_color = edge_colors.get(rel_type, "black")
self.__create_dot_edges(edge_color, f, node_idxs, nodes, rel_type)
f.write("}\n") # graph
def __create_dot_edges(self, edge_color, f, node_idxs, nodes, rel_type, indent="\t", weight=None):
for from_node, to_nodes in nodes.items():
from_idx = node_idxs.get(from_node)
if from_idx is None:
continue
edge_style = f'color="{edge_color}", splines=ortho'
if weight:
edge_style += f", weight={weight}"
for node, metadata in to_nodes:
if node is not None and node not in node_idxs:
continue
to_idx = node_idxs[node if node is not None else metadata]
if metadata is None or node is None:
f.write(f'{indent}node{from_idx} -> node{to_idx} [label="{rel_type}" {edge_style}];\n')
else:
f.write(f'{indent}node{from_idx} -> node{to_idx} [label="{rel_type}.{metadata}" {edge_style}];\n')
| [
"miallama@microsoft.com"
] | miallama@microsoft.com |
d24d9b532639b9ddb03b9b8f313d705c1a0aa4d2 | 4e382ae46cf997ea2dbdfcfa463a57d3e0e9ad97 | /sols/find_the_difference.py | 8ac2f6c88a73e8e3e625f600b9cab2ad1558bb5c | [] | no_license | hayeonk/leetcode | 5136824838eb17ed2e4b7004301ba5bb1037082f | 6485f8f9b5aa198e96fbb800b058d9283a28e4e2 | refs/heads/master | 2020-04-28T03:37:16.800519 | 2019-06-01T14:34:45 | 2019-06-01T14:34:45 | 174,943,756 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 194 | py | class Solution(object):
def findTheDifference(self, s, t):
ret = 0
for c in s:
ret ^= ord(c)
for c in t:
ret ^= ord(c)
return chr(ret) | [
"31617695+hayeonk@users.noreply.github.com"
] | 31617695+hayeonk@users.noreply.github.com |
992b1d21729bf38534b92b9c117474340f6d1e7c | da0a0045d4e7b0de12a9459e5546332ccc7cce0e | /Design_Patterns/Structural/Adapter/example1/third_party_billing_system.py | 91d45cfb3c8a513e62f49f5e13c1c9b9a26457f8 | [] | no_license | git4rajesh/python-learnings | d64e7c76698b7f2255a77a8233a90774db78f030 | ee6b7f7844079e94801c19a1dd80921e1741e58e | refs/heads/master | 2022-11-01T08:47:45.097034 | 2019-04-28T02:14:34 | 2019-04-28T02:14:34 | 172,183,208 | 0 | 1 | null | 2022-10-12T08:35:43 | 2019-02-23T07:04:59 | Python | UTF-8 | Python | false | false | 432 | py | class Third_Party_Billing_System:
@staticmethod
def process_salary(lst_emp_obj):
for emp in lst_emp_obj:
if emp.designation == 'Mgr':
emp.salary = 1000
elif emp.designation == 'QA':
emp.salary = 2000
elif emp.designation == 'Engr':
emp.salary = 3000
else:
emp.salary = 5000
return lst_emp_obj
| [
"rvenkataraman"
] | rvenkataraman |
d7366b50353afa370e0f073c0930672676fc801a | 81efabfbef513ba9d45f28c2fce5e9ab5eb19eec | /Example_Buzzer.py | 0ad2adf9df635f5f17d95419322dbe1895b509b1 | [] | no_license | ncdcommunity/Raspberry_Pi_PCA9536_Input_Output_Module_Python_library | e219f89ab1e5de748f4db804250d9962a41c2cbf | e910ad2a8bbb4c492179bd593d3c7f31ef92d368 | refs/heads/master | 2021-03-24T13:21:48.969471 | 2018-02-08T07:01:09 | 2018-02-08T07:01:09 | 120,723,001 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | # Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# PCA9536_BZ
# This code is designed to work with the PCA9536_I2CBZ I2C Mini Module available from ControlEverything.com.
# https://shop.controleverything.com/products/digital-3-channel-input-output-with-buzzer?variant=25687479179#tabs-0-product_tabset-2
import time
from PCA9536 import PCA9536
pca9536 = PCA9536()
while True :
pca9536.select_io()
pca9536.select_pin()
pca9536.input_output_config()
time.sleep(0.5)
pca9536.read_data()
print " ******************************** "
time.sleep(0.5)
| [
"ryker1990@gmail.com"
] | ryker1990@gmail.com |
0871e550193f28e9c243723ca06cb964eb1e0256 | a98c455a318ab2d47b10ef1aa195b7dfd1b5449c | /codes/fashionmnist_tobf.py | 31f7ae4dc70bc7816376311fda79de25e325bbae | [] | no_license | WanliXue/BF_implemation | ddd463ed906e1f4ee0de492da48bc6de3574bfd0 | 211aa963f3be755858daf03fca5690d3c9532053 | refs/heads/main | 2022-12-26T07:04:05.280651 | 2020-10-13T02:08:55 | 2020-10-13T02:08:55 | 303,561,823 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,946 | py | import numpy as np
# import os
# print(os.getcwd())
import math
import time
from BF_TS import BF_TS
def conver_data_to_bf(data):
com_to_len = data.shape[1]
# print(data.shape)
#
compressed_data = data[:, :]
#
# ------------Put into compressed and BF data---------
length = 10000
b = 5
num_hash = 10
dis = float(5)
g = (2 * b + 1) * com_to_len
false_positive = math.pow(1 - math.exp(-(num_hash * g) / length), num_hash)
# print ('lenth:',length,'b:',b,'num_hash:',num_hash,'dis:',dis ,'false_positive: ', false_positive)
false_positive = math.pow(1 - math.exp(-(float)(num_hash * g) / length), num_hash)
print ('lenth:', length, 'num_hash:', num_hash, 'false_positive: ', false_positive)
## # generate the npy with the bf and data
bf_ts = BF_TS(length, num_hash, b, dis / (2 * b), dis)
##
# ---------------------
print ('BF filter')
# print 'start'
start_time = time.time()
bf_train = bf_ts.convert_set_to_bf(compressed_data) # the result it a list and hard to convert to np array
print ('BF filter done')
cifar_batch = bf_ts.convert_bitarray_to_train_data(bf_train, len(bf_train), length)
print('bf done using time: {} mins'.format((time.time() - start_time) / 60))
# cifar_bfed = np.stack([cifar_batch, cifar_batch2, cifar_batch3], axis=2)
return cifar_batch
# ---------------------
train_path = '/Users/wanli/Dropbox/ppml_code_with_dataset/CIFAR_mnist/Fashion_train_random60_full.npy'
test_path = '/Users/wanli/Dropbox/ppml_code_with_dataset/CIFAR_mnist/Fashion_test_random60_full.npy'
data = np.load(train_path) # (9000,300,3)
bfed = conver_data_to_bf(data)
save_path = '../data/fashion_bfed_train_random60.npy'
np.save(save_path, bfed)
data_test = np.load(test_path) # (9000,300,3)
bfed_test = conver_data_to_bf(data_test)
save_path = '../data/fashion_bfed_test_random60.npy'
np.save(save_path, bfed_test) | [
"xuewanli.lee@gmail.com"
] | xuewanli.lee@gmail.com |
b019c18b74461fd6a01e93019d9a39a9681330c1 | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/artificial/transf_Anscombe/trend_ConstantTrend/cycle_7/ar_12/test_artificial_1024_Anscombe_ConstantTrend_7_12_100.py | 3e21083297c74f56fbd8826c3dc7a03e1537c962 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 275 | py | import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 1024 , FREQ = 'D', seed = 0, trendtype = "ConstantTrend", cycle_length = 7, transform = "Anscombe", sigma = 0.0, exog_count = 100, ar_order = 12); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
654de6277b4e8c96c01f3c4013478da037b626d2 | 36ac195ecceb868e78372bc8e976066cc9ff0fae | /torch_glow/tests/nodes/batchnorm3d_test.py | 6fed650f00e7de8b34a67493a89a151fbdc61b73 | [
"Apache-2.0"
] | permissive | jeff60907/glow | d283d65bc67e0cc9836854fa7e4e270b77023fff | 34214caa999e4428edbd08783243d29a4454133f | refs/heads/master | 2021-09-23T07:30:29.459957 | 2021-09-14T01:47:06 | 2021-09-14T01:48:00 | 216,199,454 | 0 | 0 | Apache-2.0 | 2019-10-19T12:00:31 | 2019-10-19T12:00:31 | null | UTF-8 | Python | false | false | 2,270 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import torch
import torch.nn as nn
from tests import utils
class TestBatchNorm3D(utils.TorchGlowTestCase):
def test_batchnorm_basic(self):
"""
Basic test of the PyTorch 3D batchnorm Node on Glow.
"""
class SimpleBatchNorm(nn.Module):
def __init__(self, num_channels, running_mean, running_var):
super(SimpleBatchNorm, self).__init__()
self.batchnorm = nn.BatchNorm3d(num_channels)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
def forward(self, x):
return self.batchnorm(x)
num_channels = 4
running_mean = torch.rand(num_channels)
running_var = torch.rand(num_channels)
model = SimpleBatchNorm(num_channels, running_mean, running_var)
model.eval()
inputs = torch.randn(1, num_channels, 4, 5, 5)
utils.compare_tracing_methods(model, inputs, fusible_ops={"aten::batch_norm"})
def test_batchnorm_with_weights(self):
"""
Test of the PyTorch 3D batchnorm Node with weights and biases on Glow.
"""
class SimpleBatchNorm(nn.Module):
def __init__(self, num_channels, weight, bias, running_mean, running_var):
super(SimpleBatchNorm, self).__init__()
self.batchnorm = nn.BatchNorm3d(num_channels)
self.batchnorm.weight = torch.nn.Parameter(weight)
self.batchnorm.bias = torch.nn.Parameter(bias)
self.batchnorm.running_mean = running_mean
self.batchnorm.running_var = running_var
def forward(self, x):
return self.batchnorm(x)
num_channels = 4
weight = torch.rand(num_channels)
bias = torch.rand(num_channels)
running_mean = torch.rand(num_channels)
running_var = torch.ones(num_channels)
inputs = torch.randn(1, num_channels, 4, 5, 5)
model = SimpleBatchNorm(num_channels, weight, bias, running_mean, running_var)
model.eval()
utils.compare_tracing_methods(model, inputs, fusible_ops={"aten::batch_norm"})
| [
"facebook-github-bot@users.noreply.github.com"
] | facebook-github-bot@users.noreply.github.com |
59d1905f4c830c16a12da935f6d105de60ee67ef | 15dfa10d195fb6187765aa1d6a42f6152e8cd4e1 | /sof_parser/sof_parser/pipelines.py | 6a2cdf0a24fac33c26e82f57d0c644bf88554a77 | [] | no_license | didoogan/sof_parser | d790e12d5290dd110ddc1511a74a02876dba607b | c3b9064425e74ebb67e34319a462b5401732990c | refs/heads/master | 2020-12-29T02:06:59.956070 | 2016-09-21T14:45:55 | 2016-09-21T14:45:55 | 68,790,532 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
class SofParserPipeline(object):
def process_item(self, item, spider):
return item
| [
"tzd0409@gmail.com"
] | tzd0409@gmail.com |
a9ada2f135bdc88b9aaf30c65ca4588856fbeb5f | 84b05857cbe74d190bdbee18d442d0c720b1b84d | /Coderbyte_algorithms/Easy/MovingMedian/MovingMedian.py | be21ca848c07cd1ea8955337db4131d680066937 | [] | no_license | JakubKazimierski/PythonPortfolio | 1c8c7e7b0f1358fc42a2295b807d0afafd8e88a3 | 3aa62ad36c3b06b2a3b05f1f8e2a9e21d68b371f | refs/heads/master | 2023-06-01T01:16:22.897097 | 2023-05-15T01:05:22 | 2023-05-15T01:05:22 | 311,473,524 | 9 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,443 | py | '''
Moving Median from Coderbyte
December 2020 Jakub Kazimierski
'''
import statistics
def MovingMedian(arr):
'''
Have the function MovingMedian(arr)
read the array of numbers stored in arr
which will contain a sliding window size, N,
as the first element in the array and the rest
will be a list of numbers. Your program should
return the Moving Median for each element based
on the element and its N-1 predecessors, where N
is the sliding window size. The final output should
be a string with the moving median corresponding
to each entry in the original array separated by commas.
Note that for the first few elements
(until the window size is reached), the median is computed
on a smaller number of entries.
For example: if arr is [3, 1, 3, 5, 10, 6, 4, 3, 1]
then your program should output "1,2,3,5,6,6,4,3"
'''
try:
median_list = []
for i in range(1, len(arr)):
if i < arr[0]:
median_list.append(int(statistics.median(arr[1:i+1])))
else:
# n=arr[0], (n-1)th element before i, starts from index i+1-n
start = i+1-arr[0]
median_list.append(int(statistics.median(arr[start:i+1])))
return ",".join(str(median) for median in median_list)
except(TypeError):
return -1 | [
"j.m.kazimierski@gmail.com"
] | j.m.kazimierski@gmail.com |
302781dcebe0e1f90f184cdf719806f48bc0785d | f199898334653e32d6a13922063e98f6cc477db5 | /tests/test_air.py | 8f734e633fc83fe54da3b9d9da5639cd14145903 | [
"MIT"
] | permissive | wptree/akshare | c31bb822d806974be951c3b2258312abdec09a6e | 7697506d277f14d1719e60c3d19e73ff7d69e6af | refs/heads/master | 2021-02-07T22:30:00.957989 | 2020-02-29T10:29:27 | 2020-02-29T10:29:27 | 244,083,276 | 1 | 0 | MIT | 2020-03-01T03:59:01 | 2020-03-01T03:59:00 | null | UTF-8 | Python | false | false | 1,314 | py | # -*- coding:utf-8 -*-
# /usr/bin/env python
"""
Author: Albert King
date: 2019/12/12 18:16
contact: jindaxiang@163.com
desc: To test intention, just write test code here!
"""
# from akshare.air.aqi_study import air_hourly
from akshare.index.index_weibo import weibo_index
from akshare.event.franchise import franchise_china
# from akshare.fortune.fortune_500 import fortune_rank
def test_franchise_china():
franchise_china_df = franchise_china()
assert franchise_china_df.shape[0] > 0
# def test_air_hourly():
# """
# test air_hourly interface
# :return: air_hourly_df
# :rtype: pandas.DataFrame
# """
# air_hourly_df = air_hourly("成都", "2019-12-10")
# assert air_hourly_df.shape[0] > 0
def test_weibo_index():
"""
test weibo_index interface
:return: weibo_index_df
:rtype: pandas.DataFrame
"""
weibo_index_df = weibo_index(word="口罩", time_type="3month")
assert weibo_index_df.shape[0] > 0
# def test_fortune():
# """
# test fortune_rank interface
# :return: fortune_rank_df
# :rtype: pandas.DataFrame
# """
# fortune_rank_df = fortune_rank(year=2011) # 2010 不一样
# assert fortune_rank_df.shape[0] > 0
if __name__ == "__main__":
# test_air_hourly()
test_weibo_index()
# test_fortune()
| [
"jindaxiang@163.com"
] | jindaxiang@163.com |
6f90146523ad83c22bd917cb55bcb3196765cb28 | d3f680630426ff3a63d564e78bb1480863a7f0f6 | /services/web__aps_dz.py | 4c103effdaa88ee80facaa3ba0bd5fd20edee578 | [] | no_license | JesseWeinstein/NewsGrabber | 09f9d567449e99ba211e4ba61b42c53276de235b | b431dc5f313d4718c6328aaaa97da1bc8e136023 | refs/heads/master | 2020-12-31T02:32:48.359448 | 2016-01-23T14:20:26 | 2016-01-23T14:20:26 | 48,966,133 | 1 | 0 | null | 2016-01-04T00:33:41 | 2016-01-04T00:33:41 | null | UTF-8 | Python | false | false | 456 | py | refresh = 5
version = 20160122.01
urls = ['http://www.aps.dz/algerie?format=feed',
'http://www.aps.dz/economie?format=feed',
'http://www.aps.dz/sport?format=feed',
'http://www.aps.dz/monde?format=feed',
'http://www.aps.dz/societe?format=feed',
'http://www.aps.dz/regions?format=feed',
'http://www.aps.dz/culture?format=feed',
'http://www.aps.dz/sante-sciences-tech?format=feed']
regex = [r'^https?:\/\/[^\/]*aps\.dz']
videoregex = []
liveregex = [] | [
"Arkiver@hotmail.com"
] | Arkiver@hotmail.com |
86ab4c0936eb638ea5a4cdc51d104fe7ae8991a0 | bf92a619b9b850678bb691915e45c39cd740fa63 | /apps/freeway/main.py | de0fdd6c91d2e76ff7289f891c78993f55fdaea0 | [] | no_license | jrecuero/jc2cli | a045f1efa431f53351dfac968852fd82e8c963b6 | c97615828880021b3965756aed939e39bac949b6 | refs/heads/master | 2021-05-10T10:16:34.698398 | 2018-11-06T17:43:53 | 2018-11-06T17:43:53 | 118,377,662 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,198 | py | import ehandler
import race
import freeway
import section
import device
from cursor import Cursor
if __name__ == '__main__':
Cursor.print(Cursor.clear_entire_screen())
Cursor.print(Cursor.move_upper_left(0))
_ehdlr = ehandler.EHandler()
_race = race.Race()
_freeway = freeway.Freeway()
_freeway.add_section(section.Section(100, 1, section.Spec.Straight))
_freeway.add_section(section.Section(50, 1, section.Spec.Turn))
_freeway.add_section(section.Section(100, 1, section.Spec.Straight))
_freeway.add_section(section.Section(50, 1, section.Spec.Turn))
_race.freeway = _freeway
_devices = [device.Device('dev-80', 'dev-class', 'dev-sub', 80),
device.Device('dev-50', 'dev-class', 'dev-sub', 50),
device.Device('dev-90', 'dev-class', 'dev-sub', 90),
device.Device('dev-60', 'dev-class', 'dev-sub', 60),
device.Device('dev-70', 'dev-class', 'dev-sub', 70), ]
# _devices = [device.Device('dev-80', 'dev-class', 'dev-sub', 80), ]
for dev in _devices:
_race.add_device(dev)
_race.laps = 5
_ehdlr.race = _race
_ehdlr.setup()
_ehdlr.delay = 100
_ehdlr.start()
| [
"jose.recuero@gmail.com"
] | jose.recuero@gmail.com |
b15f080badda883ba9ec4368b1e34032afe7f2a8 | 06fec21ab6be610d7e491eaa55f776587ed6fadd | /hubapp/migrations/0007_add_price.py | 988a6501030a2dbba177e0b2c561af43c2b411c3 | [] | no_license | rcoffie/ehub | 6e6e493d252b4d8a5360616ea64dd85fdc3b15f8 | 28213469c612088acb3a62ca9bf1f3c2a0dd5756 | refs/heads/master | 2022-09-29T09:05:49.705294 | 2020-06-07T09:26:16 | 2020-06-07T09:26:16 | 263,087,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | # Generated by Django 3.0.6 on 2020-06-06 13:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hubapp', '0006_auto_20200606_1302'),
]
operations = [
migrations.AddField(
model_name='add',
name='price',
field=models.DecimalField(decimal_places=2, default=True, max_digits=6),
),
]
| [
"rcoffie22@yahoo.com"
] | rcoffie22@yahoo.com |
8ceae620a6ca4aa42c19252edc4912bc71ab0105 | 9afbcb367de9bf055d531d285bc299a9ca3040fe | /django_session/django_session/settings.py | 8d2cce87371602bcd47e87324c303b6c4fded580 | [] | no_license | mysqlplus163/aboutPython | a41a5bc2efd43b53d4acf96e7477e80c022cf657 | fa7c3e6f123158011d8726b28bfcd0dee02fa853 | refs/heads/master | 2020-03-21T05:06:19.949902 | 2018-03-14T16:04:54 | 2018-03-14T16:04:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,177 | py | """
Django settings for django_session project.
Generated by 'django-admin startproject' using Django 1.10.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'ga+l=(++6huhfamtls@f_qt-^mufus0ios8074=38ttx=)js7c'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app01',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'django_session.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_session.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| [
"liwenzhou7@gmail.com"
] | liwenzhou7@gmail.com |
378d8d6243e4eb15707ac59dc6e7bf93f80452e7 | 67572ef7c6ac88a335dd884ac19dd8f5519145fa | /4_Recursion/recursive_sierspinski.py | 29161fc863bc4e448ba111480df36cefa2bc8fd9 | [
"MIT"
] | permissive | ZoroOP/Problem-Solving-With-Algorithms-And-Data-Structures | ccb2eb306229097dd8c930523e20ed7115a1e8ef | be29b46b9f4e579644ca2d44675c0ce7dcb29b3b | refs/heads/master | 2021-10-17T00:59:53.654643 | 2019-02-13T05:40:27 | 2019-02-13T05:40:27 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,419 | py | """
Draw a sierpinski triangle recursively with the turtle module.
"""
import turtle
def drawTriangle(points,color,myTurtle):
myTurtle.fillcolor(color)
myTurtle.up()
myTurtle.goto(points[0][0],points[0][1])
myTurtle.down()
myTurtle.begin_fill()
myTurtle.goto(points[1][0],points[1][1])
myTurtle.goto(points[2][0],points[2][1])
myTurtle.goto(points[0][0],points[0][1])
myTurtle.end_fill()
def getMid(p1,p2):
return ( (p1[0]+p2[0]) / 2, (p1[1] + p2[1]) / 2)
def sierpinski(points,degree,myTurtle):
colormap = ['blue','red','green','white','yellow',
'violet','orange']
drawTriangle(points,colormap[degree],myTurtle)
if degree > 0:
sierpinski([points[0],
getMid(points[0], points[1]),
getMid(points[0], points[2])],
degree-1, myTurtle)
sierpinski([points[1],
getMid(points[0], points[1]),
getMid(points[1], points[2])],
degree-1, myTurtle)
sierpinski([points[2],
getMid(points[2], points[1]),
getMid(points[0], points[2])],
degree-1, myTurtle)
def main():
myTurtle = turtle.Turtle()
myWin = turtle.Screen()
myPoints = [[-100,-50],[0,100],[100,-50]]
sierpinski(myPoints,3,myTurtle)
myWin.exitonclick()
main()
| [
"anthony.r.chao@gmail.com"
] | anthony.r.chao@gmail.com |
7988c3051318b94f342323a66c7309ca285ec7a3 | e8215b98dcf46417e720cc6ef4a0329474ae9b82 | /PHYS210/Project 2-Animation - Backup/ising.py | 17cd108c5c818627b3c2906932314eaf85891cf0 | [] | no_license | rgkaufmann/PythonCodes | 2d47bab84ec851fc962598f613b1e666a14c8efd | a5d5cd993beabdb79897a05b35420ad82f438f51 | refs/heads/master | 2021-06-13T23:19:09.109162 | 2021-03-03T06:00:04 | 2021-03-03T06:00:04 | 162,771,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,839 | py | # PHYS 210 - Project 2 - The Ising Model Ryan Kaufmann
# Bonus: Add a second animation showing the individual spins of each electron
# In each of the desired temperatures (0.1K, 2.5K, 100K)
import numpy as np # Importing all important function, numpy
import matplotlib.pyplot as plt # for arrays, plt for plotting, anim for
import matplotlib.animation as anim # animations, warnings to clear warnings
import warnings # and sys to increase recursion limit
import cProfile, pstats, io
import sys
warnings.simplefilter(action='ignore', category=UserWarning) # Ignores warning
plt.rcParams['animation.ffmpeg_path']='C:/Users/ryank/Downloads/ffmpeg-20171102-d5995c5-win64-static/bin/ffmpeg.exe'
# When using the MovieWriter in the animation saving
plt.rcParams['image.cmap'] = 'Paired'
pr2 = cProfile.Profile()
pr2.enable()
# Declaring all variables used in the program and increases recursion limit
sys.setrecursionlimit(4000)
SigmaSpins = np.random.choice((-1, 1), (50, 50))
Zeros = np.zeros((50, 50))
Temperatures = np.array([0.01, 0.1, 1, 2, 2.5, 3, 4, 5, 10, 100])
SpinDataTemp001 = [SigmaSpins]
SpinDataTemp025 = [SigmaSpins]
SpinDataTemp100 = [SigmaSpins]
# Convergence calculates the electron spin configuration that the original
# configuration reaches after 600000 iterations. It takes an original state
# the current iteration of recusion, and the temperature of the system
def Convergence(sigma, count, temperature):
for indx in range(500):
# For loop calculates a random coordinate on the configuration
# Then calculates the energy difference between the original state
# And the new state (Derivation at bottom)
coors = np.random.randint(-1, 49, 2)
Energydif = (sigma[(coors[0]+1), coors[1]] +
sigma[coors[0]-1, coors[1]] +
sigma[coors[0], (coors[1]+1)] +
sigma[coors[0], coors[1]-1])
Energydif = -2*sigma[coors[0], coors[1]]*Energydif
# Finally find whether or not the electron spin should be switched
# And switches it or not. If the probability needs to be calculated,
# It is compared to a random number and determined to be switched
if Energydif >= 0:
sigma[coors[0], coors[1]] = -1*sigma[coors[0], coors[1]]
else:
probability = np.exp(Energydif/temperature)
if np.random.random() < probability:
sigma[coors[0], coors[1]] = -1*sigma[coors[0], coors[1]]
# After 500 iterations, it checks if it has been 1000 iterations since the
# Last recording of a electron spin. If it has been 1000 iterations, it
# Records it to be used with the animation segment.
if temperature == 0.1:
global SpinDataTemp001
SpinDataTemp001.append(sigma.tolist())
elif temperature == 2.5:
global SpinDataTemp025
SpinDataTemp025.append(sigma.tolist())
elif temperature == 100:
global SpinDataTemp100
SpinDataTemp100.append(sigma.tolist())
# Then it decides if it should be through another iteration or returned
if count >= 1199:
return sigma
else:
return Convergence(sigma, count+1, temperature)
# ConvergenceSet goes through a set of spin configurations and gets the
# Magnetic moment for each using the same temperature. It adds them
# To one array and then returns the complete array
def ConvergenceSet(setsigmaspins, temperature):
if setsigmaspins.size == SigmaSpins.size:
return np.sum(Convergence(setsigmaspins[:, :, 0], 0, temperature))
else:
return np.append(np.sum(Convergence(setsigmaspins[:, :, 0],
0, temperature)),
ConvergenceSet(setsigmaspins[:, :, 1:], temperature))
# TemperatureSet goes through a set of temperatures and gets five magnetic
# Moments for each using each temperature. It then adds them to one
# Array and then returns the complete array.
def TemperatureSet(temperatureset):
FiveTimesSigmaSpins = np.repeat(SigmaSpins[:, :, np.newaxis], 5, axis=2)
if temperatureset.size == 1:
return ConvergenceSet(FiveTimesSigmaSpins,
temperatureset[0])[:, np.newaxis]
else:
return np.append(ConvergenceSet(FiveTimesSigmaSpins,
temperatureset[0])[:, np.newaxis],
TemperatureSet(temperatureset[1:]),
axis=1)
# UpdateHeat replaces the data in the heat map with a 'newer' data set
def updateHeat(num, spins):
Heat.set_data(spins[num])
# UpdateQuiver replaces the data in the vector field with a 'newer' data set
def updateQuiver(num, spins):
Color = np.arctan2(Zeros, spins[num])
Quiver.set_UVC(Zeros, spins[num], Color)
# Animate takes in various parameters to construct a figure and form the
# Animation. Then it saves the animation to a file.
def Animate(Temp, File, Type, SpinData):
fig = plt.figure()
fig.suptitle('Electron Spins at {}K'.format(Temp))
if Type == 'Heat':
global Heat
Heat = plt.imshow(SigmaSpins, cmap='inferno')
animation = anim.FuncAnimation(fig, updateHeat, frames=1200,
repeat=False, fargs=(SpinData, ))
animation.save(File, fps=20)
elif Type == 'Quiver':
global Quiver
Quiver = plt.quiver(Zeros, SigmaSpins, np.arctan2(Zeros, SigmaSpins),
pivot='middle')
animation = anim.FuncAnimation(fig, updateQuiver, frames=1200,
repeat=False, fargs=(SpinData, ))
animation.save(File, fps=20)
# Gathers data on the convergence configurations given initial spin
MagMoments = TemperatureSet(Temperatures).transpose()
MaxMagMoments = np.amax(np.abs(MagMoments), axis=1)
# Constructs the plot for the magnetic moments versus the temperature
title = 'Magnetic Moment Against Temperature'
title = title + ' As Calculated by the Ising Model'
plt.semilogx(Temperatures, MaxMagMoments)
plt.title(title)
plt.xlabel('Temp (K)')
plt.ylabel('Magnetic Moment')
plt.savefig('Tcurie.pdf')
# Animates each of the required temperatures using both Heat and Quiver funcs
Animate(0.1, 'temp_0.1.mp4', 'Heat', SpinDataTemp001)
Animate(0.1, 'temp_0.1Quiver.mp4', 'Quiver', SpinDataTemp001)
Animate(2.5, 'temp_2.5.mp4', 'Heat', SpinDataTemp025)
Animate(2.5, 'temp_2.5Quiver.mp4', 'Quiver', SpinDataTemp025)
Animate(100, 'temp_100.mp4', 'Heat', SpinDataTemp100)
Animate(100, 'temp_100Quiver.mp4', 'Quiver', SpinDataTemp100)
pr2.disable()
file = open('FullIsingStats.txt', 'w')
s = io.StringIO()
sortby = 'tottime'
ps = pstats.Stats(pr2, stream=s).sort_stats(sortby)
ps.print_stats()
file.write(s.getvalue())
file.close() | [
"ryankaufmannprof@gmail.com"
] | ryankaufmannprof@gmail.com |
182188564993884d7a326aefc604b6929c92cfa3 | b95ec100e1864954eb4fa88096ffc6414105263c | /CookieTTS/_5_infer/t2s_server/text2speech.py | 17ed38a920f5c5daf33975186513408293fe51a1 | [] | no_license | Harishgeth/cookietts | d07a772bf449ac40c4556fb9864ed163e6c22ec3 | 147bbe94c9afcd26db6fb40d745b493922ca5bc7 | refs/heads/master | 2022-12-07T06:10:28.131351 | 2020-07-06T09:06:35 | 2020-07-06T09:06:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 43,523 | py | import sys
import os
import numpy as np
import random
import time
import argparse
import torch
import matplotlib.pyplot as plt
from scipy.io.wavfile import write
import json
import re
import difflib
from glob import glob
from unidecode import unidecode
import nltk # sentence spliting
from nltk import sent_tokenize
sys.path.append('../')
from model import Tacotron2
from train import load_model
from text import text_to_sequence
from denoiser import Denoiser
from utils import load_filepaths_and_text
def get_mask_from_lengths(lengths, max_len=None):
if not max_len:
max_len = torch.max(lengths).long()
ids = torch.arange(0, max_len, device=lengths.device, dtype=torch.int64)
mask = (ids < lengths.unsqueeze(1))
return mask
#@torch.jit.script # should work and be even faster, but makes it harder to debug and it's already fast enough right now
def alignment_metric(alignments, input_lengths=None, output_lengths=None, average_across_batch=False):
alignments = alignments.transpose(1,2) # [B, dec, enc] -> [B, enc, dec]
# alignments [batch size, x, y]
# input_lengths [batch size] for len_x
# output_lengths [batch size] for len_y
if input_lengths == None:
input_lengths = torch.ones(alignments.size(0), device=alignments.device)*(alignments.shape[1]-1) # [B] # 147
if output_lengths == None:
output_lengths = torch.ones(alignments.size(0), device=alignments.device)*(alignments.shape[2]-1) # [B] # 767
batch_size = alignments.size(0)
optimums = torch.sqrt(input_lengths.double().pow(2) + output_lengths.double().pow(2)).view(batch_size)
# [B, enc, dec] -> [B, dec], [B, dec]
values, cur_idxs = torch.max(alignments, 1) # get max value in column and location of max value
cur_idxs = cur_idxs.float()
prev_indx = torch.cat((cur_idxs[:,0][:,None], cur_idxs[:,:-1]), dim=1) # shift entire tensor by one.
dist = ((prev_indx - cur_idxs).pow(2) + 1).pow(0.5) # [B, dec]
dist.masked_fill_(~get_mask_from_lengths(output_lengths, max_len=dist.size(1)), 0.0) # set dist of padded to zero
dist = dist.sum(dim=(1)) # get total dist for each B
diagonalitys = (dist + 1.4142135)/optimums # dist / optimal dist
alignments.masked_fill_(~get_mask_from_lengths(output_lengths, max_len=alignments.size(2))[:,None,:], 0.0)
attm_enc_total = torch.sum(alignments, dim=2)# [B, enc, dec] -> [B, enc]
# calc max (with padding ignored)
attm_enc_total.masked_fill_(~get_mask_from_lengths(input_lengths, max_len=attm_enc_total.size(1)), 0.0)
encoder_max_focus = attm_enc_total.max(dim=1)[0] # [B, enc] -> [B]
# calc mean (with padding ignored)
encoder_avg_focus = attm_enc_total.mean(dim=1) # [B, enc] -> [B]
encoder_avg_focus *= (attm_enc_total.size(1)/input_lengths.float())
# calc min (with padding ignored)
attm_enc_total.masked_fill_(~get_mask_from_lengths(input_lengths, max_len=attm_enc_total.size(1)), 1.0)
encoder_min_focus = attm_enc_total.min(dim=1)[0] # [B, enc] -> [B]
# calc average max attention (with padding ignored)
values.masked_fill_(~get_mask_from_lengths(output_lengths, max_len=values.size(1)), 0.0) # because padding
avg_prob = values.mean(dim=1)
avg_prob *= (alignments.size(2)/output_lengths.float()) # because padding
if average_across_batch:
diagonalitys = diagonalitys.mean()
encoder_max_focus = encoder_max_focus.mean()
encoder_min_focus = encoder_min_focus.mean()
encoder_avg_focus = encoder_avg_focus.mean()
avg_prob = avg_prob.mean()
return diagonalitys.cpu(), avg_prob.cpu(), encoder_max_focus.cpu(), encoder_min_focus.cpu(), encoder_avg_focus.cpu()
def chunks(lst, n):
"""Yield successive n-sized chunks from lst."""
# list(chunks([0,1,2,3,4,5,6,7,8,9],2)) -> [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
for i in range(0, len(lst), n):
yield lst[i:i + n]
# generator for text splitting.
def parse_text_into_segments(texts, split_at_quotes=True, target_segment_length=200):
"""Swap speaker at every quote mark. Each split segment will have quotes around it (for information later on rather than accuracy to the original text)."""
# split text by quotes
quo ='"' # nested quotes in list comprehension are hard to work with
texts = [f'"{text.replace(quo,"").strip()}"' if i%2 else text.replace(quo,"").strip() for i, text in enumerate(unidecode(texts).split('"'))]
# clean up and remove empty texts
def clean_text(text):
text = text.strip()
text = text.replace("\n"," ").replace(" "," ").replace("> --------------------------------------------------------------------------","").replace("------------------------------------","")
return text
texts = [clean_text(text) for text in texts if len(text.strip().replace('"','').strip()) or len(clean_text(text))]
assert len(texts)
# split text by sentences and add commas in where needed.
def quotify(seg, text):
if '"' in text:
if seg[0] != '"': seg='"'+seg
if seg[-1] != '"': seg+='"'
return seg
texts_tmp = []
texts = [texts_tmp.extend([quotify(x.strip(), text) for x in sent_tokenize(text) if len(x.replace('"','').strip())]) for text in texts]
texts = texts_tmp
del texts_tmp
assert len(texts)
# merge neighbouring sentences
quote_mode = False
texts_output = []
texts_segmented = ''
texts_len = len(texts)
for i, text in enumerate(texts):
# split segment if quote swap
if split_at_quotes and ('"' in text and quote_mode == False) or (not '"' in text and quote_mode == True):
texts_segmented.replace('""','')
texts_output.append(texts_segmented)
texts_segmented=text
quote_mode = not quote_mode
# split segment if max length
elif len(texts_segmented+text) > target_segment_length:
texts_segmented.replace('""','')
texts_output.append(texts_segmented)
texts_segmented=text
else: # continue adding to segment
texts_segmented+= f' {text}'
# add any remaining stuff.
if len(texts_segmented):
texts_output.append(texts_segmented)
assert len(texts_output)
return texts_output
def get_first_over_thresh(x, threshold):
"""Takes [B, T] and outputs first T over threshold for each B (output.shape = [B])."""
device = x.device
x = x.clone().cpu().float() # GPU implementation of argmax() splits tensor into 32 elem chunks, each chunk is parsed forward then the outputs are collected together... backwards
x[:,-1] = threshold # set last to threshold just incase the output didn't finish generating.
x[x>threshold] = threshold
return ( (x.size(1)-1)-(x.flip(dims=(1,)).argmax(dim=1)) ).to(device).int()
class T2S:
def __init__(self, conf):
self.conf = conf
# load Tacotron2
self.ttm_current = self.conf['TTM']['default_model']
assert self.ttm_current in self.conf['TTM']['models'].keys(), "Tacotron default model not found in config models"
tacotron_path = self.conf['TTM']['models'][self.ttm_current]['modelpath'] # get first available Tacotron
self.tacotron, self.ttm_hparams, self.ttm_sp_name_lookup, self.ttm_sp_id_lookup = self.load_tacotron2(tacotron_path)
# load WaveGlow
self.MTW_current = self.conf['MTW']['default_model']
assert self.MTW_current in self.conf['MTW']['models'].keys(), "WaveGlow default model not found in config models"
vocoder_path = self.conf['MTW']['models'][self.MTW_current]['modelpath'] # get first available waveglow
vocoder_confpath = self.conf['MTW']['models'][self.MTW_current]['configpath']
self.waveglow, self.MTW_denoiser, self.MTW_train_sigma, self.MTW_sp_id_lookup = self.load_waveglow(vocoder_path, vocoder_confpath)
# load torchMoji
if self.ttm_hparams.torchMoji_linear: # if Tacotron includes a torchMoji layer
self.tm_sentence_tokenizer, self.tm_torchmoji = self.load_torchmoji()
# override since my checkpoints are still missing speaker names
if self.conf['TTM']['use_speaker_ids_file_override']:
speaker_ids_fpath = self.conf['TTM']['speaker_ids_file']
self.ttm_sp_name_lookup = {name: self.ttm_sp_id_lookup[int(ext_id)] for _, name, ext_id in load_filepaths_and_text(speaker_ids_fpath)}
# load arpabet/pronounciation dictionary
dict_path = self.conf['dict_path']
self.load_arpabet_dict(dict_path)
# download nltk package for splitting text into sentences
nltk.download('punkt')
print("T2S Initialized and Ready!")
def load_arpabet_dict(self, dict_path):
print("Loading ARPAbet Dictionary... ", end="")
self.arpadict = {}
for line in reversed((open(dict_path, "r").read()).splitlines()):
self.arpadict[(line.split(" ", 1))[0]] = (line.split(" ", 1))[1].strip()
print("Done!")
def ARPA(self, text, punc=r"!?,.;:#-_'\"()[]"):
text = text.replace("\n"," ")
out = ''
for word in text.split(" "):
end_chars = ''; start_chars = ''
while any(elem in word for elem in punc) and len(word) > 1:
if word[-1] in punc: end_chars = word[-1] + end_chars; word = word[:-1]
elif word[0] in punc: start_chars = start_chars + word[0]; word = word[1:]
else: break
if word.upper() in self.arpadict.keys():
word = "{" + str(self.arpadict[word.upper()]) + "}"
out = (out + " " + start_chars + word + end_chars).strip()
return out
def load_torchmoji(self):
""" Use torchMoji to score texts for emoji distribution.
The resulting emoji ids (0-63) correspond to the mapping
in emoji_overview.png file at the root of the torchMoji repo.
Writes the result to a csv file.
"""
import json
import numpy as np
import os
from torchmoji.sentence_tokenizer import SentenceTokenizer
from torchmoji.model_def import torchmoji_feature_encoding
from torchmoji.global_variables import PRETRAINED_PATH, VOCAB_PATH
print('Tokenizing using dictionary from {}'.format(VOCAB_PATH))
with open(VOCAB_PATH, 'r') as f:
vocabulary = json.load(f)
maxlen = 130
texts = ["Testing!",]
with torch.no_grad():
# init model
st = SentenceTokenizer(vocabulary, maxlen, ignore_sentences_with_only_custom=True)
torchmoji = torchmoji_feature_encoding(PRETRAINED_PATH)
return st, torchmoji
def get_torchmoji_hidden(self, texts):
with torch.no_grad():
tokenized, _, _ = self.tm_sentence_tokenizer.tokenize_sentences(texts) # input array [B] e.g: ["Test?","2nd Sentence!"]
embedding = self.tm_torchmoji(tokenized) # returns np array [B, Embed]
return embedding
def is_ax(self, config):
"""Quickly check if a model uses the Ax WaveGlow core by what's available in the config file."""
return True if 'upsample_first' in config.keys() else False
def load_waveglow(self, vocoder_path, config_fpath):
# Load config file
with open(config_fpath) as f:
data = f.read()
config = json.loads(data)
train_config = config["train_config"]
data_config = config["data_config"]
dist_config = config["dist_config"]
vocoder_config = {
**config["waveglow_config"],
'win_length': data_config['win_length'],
'hop_length': data_config['hop_length']
}
print(vocoder_config)
print(f"Config File from '{config_fpath}' successfully loaded.")
# import the correct model core
if self.is_ax(vocoder_config):
from efficient_model_ax import WaveGlow
else:
if vocoder_config["yoyo"]:
from efficient_model import WaveGlow
else:
from glow import WaveGlow
# initialize model
print(f"intializing WaveGlow model... ", end="")
waveglow = WaveGlow(**vocoder_config).cuda()
print(f"Done!")
# load checkpoint from file
print(f"loading WaveGlow checkpoint... ", end="")
checkpoint = torch.load(vocoder_path)
waveglow.load_state_dict(checkpoint['model']) # and overwrite initialized weights with checkpointed weights
waveglow.cuda().eval().half() # move to GPU and convert to half precision
print(f"Done!")
print(f"initializing Denoiser... ", end="")
denoiser = Denoiser(waveglow)
print(f"Done!")
vocoder_iters = checkpoint['iteration']
print(f"WaveGlow trained for {vocoder_iters} iterations")
speaker_lookup = checkpoint['speaker_lookup'] # ids lookup
training_sigma = train_config['sigma']
return waveglow, denoiser, training_sigma, speaker_lookup
def update_wg(self, vocoder_name):
self.waveglow, self.MTW_denoiser, self.MTW_train_sigma, self.MTW_sp_id_lookup = self.load_waveglow(self.conf['MTW']['models'][vocoder_name]['modelpath'], self.conf['MTW']['models'][vocoder_name]['configpath'])
self.MTW_current = vocoder_name
def load_tacotron2(self, tacotron_path):
"""Loads tacotron2,
Returns:
- model
- hparams
- speaker_lookup
"""
checkpoint = torch.load(tacotron_path) # load file into memory
print("Loading Tacotron... ", end="")
checkpoint_hparams = checkpoint['hparams'] # get hparams
checkpoint_dict = checkpoint['state_dict'] # get state_dict
model = load_model(checkpoint_hparams) # initialize the model
model.load_state_dict(checkpoint_dict) # load pretrained weights
_ = model.cuda().eval().half()
print("Done")
tacotron_speaker_name_lookup = checkpoint['speaker_name_lookup'] # save speaker name lookup
tacotron_speaker_id_lookup = checkpoint['speaker_id_lookup'] # save speaker_id lookup
print(f"This Tacotron model has been trained for {checkpoint['iteration']} Iterations.")
return model, checkpoint_hparams, tacotron_speaker_name_lookup, tacotron_speaker_id_lookup
def update_tt(self, tacotron_name):
self.model, self.ttm_hparams, self.ttm_sp_name_lookup, self.ttm_sp_id_lookup = self.load_tacotron2(self.conf['TTM']['models'][tacotron_name]['modelpath'])
self.ttm_current = tacotron_name
if self.conf['TTM']['use_speaker_ids_file_override']:# (optional) override
self.ttm_sp_name_lookup = {name: self.ttm_sp_id_lookup[int(ext_id)] for _, name, ext_id in load_filepaths_and_text(self.conf['TTM']['speaker_ids_file'])}
def get_MTW_sp_id_from_ttm_sp_names(self, names):
"""Get WaveGlow speaker ids from Tacotron2 named speaker lookup. (This should function should be removed once WaveGlow has named speaker support)."""
ttm_model_ids = [self.ttm_sp_name_lookup[name] for name in names]
reversed_lookup = {v: k for k, v in self.ttm_sp_id_lookup.items()}
ttm_ext_ids = [reversed_lookup[int(speaker_id)] for speaker_id in ttm_model_ids]
wv_model_ids = [self.MTW_sp_id_lookup[int(speaker_id)] for speaker_id in ttm_ext_ids]
return wv_model_ids
def get_closest_names(self, names):
possible_names = list(self.ttm_sp_name_lookup.keys())
validated_names = [difflib.get_close_matches(name, possible_names, n=2, cutoff=0.01)[0] for name in names] # change all names in input to the closest valid name
return validated_names
def infer(self, text, speaker_names, style_mode, textseg_mode, batch_mode, max_attempts, max_duration_s, batch_size, dyna_max_duration_s, use_arpabet, target_score, speaker_mode, cat_silence_s, textseg_len_target, gate_delay=4, gate_threshold=0.6, filename_prefix=None, status_updates=False, show_time_to_gen=True, end_mode='thresh', absolute_maximum_tries=4096, absolutely_required_score=-1e3):
"""
PARAMS:
...
gate_delay
default: 4
options: int ( 0 -> inf )
info: a modifier for when spectrograms are cut off.
This would allow you to add silence to the end of a clip without an unnatural fade-out.
8 will give 0.1 seconds of delay before ending the clip.
If this param is set too high then the model will try to start speaking again
despite not having any text left to speak, therefore keeping it low is typical.
gate_threshold
default: 0.6
options: float ( 0.0 -> 1.0 )
info: used to control when Tacotron2 will stop generating new mel frames.
This will effect speed of generation as the model will generate
extra frames till it hits the threshold. This may be preferred if
you believe the model is stopping generation too early.
When end_mode == 'thresh', this param will also be used to decide
when the audio from the best spectrograms should be cut off.
...
end_mode
default: 'thresh'
options: ['max','thresh']
info: controls where the spectrograms are cut off.
'max' will cut the spectrograms off at the highest gate output,
'thresh' will cut off spectrograms at the first gate output over gate_threshold.
"""
assert end_mode in ['max','thresh'], f"end_mode of {end_mode} is not valid."
assert gate_delay > -10, "gate_delay is negative."
assert gate_threshold > 0.0, "gate_threshold less than 0.0"
assert gate_threshold <= 1.0, "gate_threshold greater than 1.0"
os.makedirs(self.conf["working_directory"], exist_ok=True)
os.makedirs(self.conf["output_directory"], exist_ok=True)
with torch.no_grad():
# time to gen
audio_len = 0
start_time = time.time()
# Score Metric
scores = []
# Score Parameters
diagonality_weighting = 0.5 # 'pacing factor', a penalty for clips where the model pace changes often/rapidly. # this thing does NOT work well for Rarity.
max_focus_weighting = 1.0 # 'stuck factor', a penalty for clips that spend execisve time on the same letter.
min_focus_weighting = 1.0 # 'miniskip factor', a penalty for skipping/ignoring single letters in the input text.
avg_focus_weighting = 1.0 # 'skip factor', a penalty for skipping very large parts of the input text
# add a filename prefix to keep multiple requests seperate
if not filename_prefix:
filename_prefix = str(time.time())
# add output filename
output_filename = f"{filename_prefix}_output"
# split the text into chunks (if applicable)
if textseg_mode == 'no_segmentation':
texts = [text,]
elif textseg_mode == 'segment_by_line':
texts = text.split("\n")
elif textseg_mode == 'segment_by_sentence':
texts = parse_text_into_segments(text, split_at_quotes=False, target_segment_length=textseg_len_target)
elif textseg_mode == 'segment_by_sentencequote':
texts = parse_text_into_segments(text, split_at_quotes=True, target_segment_length=textseg_len_target)
else:
raise NotImplementedError(f"textseg_mode of {textseg_mode} is invalid.")
del text
# cleanup for empty inputs.
texts = [x.strip() for x in texts if len(x.strip())]
total_len = len(texts)
# update Tacotron stopping params
frames_per_second = float(self.ttm_hparams.sampling_rate/self.ttm_hparams.hop_length)
self.tacotron.decoder.gate_delay = int(gate_delay)
self.tacotron.decoder.max_decoder_steps = int(min(max([len(t) for t in texts]) * float(dyna_max_duration_s)*frames_per_second, float(max_duration_s)*frames_per_second))
self.tacotron.decoder.gate_threshold = float(gate_threshold)
# find closest valid name(s)
speaker_names = self.get_closest_names(speaker_names)
# pick how the batch will be handled
batch_size = int(batch_size)
if batch_mode == "scaleup":
simultaneous_texts = total_len
batch_size_per_text = batch_size
elif batch_mode == "nochange":
simultaneous_texts = max(batch_size//max_attempts, 1)
batch_size_per_text = min(batch_size, max_attempts)
elif batch_mode == "scaledown":
simultaneous_texts = total_len
batch_size_per_text = -(-batch_size//total_len)
else:
raise NotImplementedError(f"batch_mode of {batch_mode} is invalid.")
# for size merging
running_fsize = 0
fpaths = []
out_count = 0
# keeping track of stats for html/terminal
show_inference_progress_start = time.time()
continue_from = 0
counter = 0
total_specs = 0
n_passes = 0
text_batch_in_progress = []
for text_index, text in enumerate(texts):
if text_index < continue_from: print(f"Skipping {text_index}.\t",end=""); counter+=1; continue
last_text = (text_index == (total_len-1)) # true if final text input
# setup the text batches
text_batch_in_progress.append(text)
if (len(text_batch_in_progress) == simultaneous_texts) or last_text: # if text batch ready or final input
text_batch = text_batch_in_progress
text_batch_in_progress = []
else:
continue # if batch not ready, add another text
self.tacotron.decoder.max_decoder_steps = int(min(max([len(t) for t in text_batch]) * float(dyna_max_duration_s)*frames_per_second, float(max_duration_s)*frames_per_second))
if speaker_mode == "not_interleaved": # non-interleaved
batch_speaker_names = speaker_names * -(-simultaneous_texts//len(speaker_names))
batch_speaker_names = batch_speaker_names[:simultaneous_texts]
elif speaker_mode == "interleaved": # interleaved
repeats = -(-simultaneous_texts//len(speaker_names))
batch_speaker_names = [i for i in speaker_names for _ in range(repeats)][:simultaneous_texts]
elif speaker_mode == "random": # random
batch_speaker_names = [random.choice(speaker_names),] * simultaneous_texts
elif speaker_mode == "cycle_next": # use next speaker for each text input
def shuffle_and_return():
first_speaker = speaker_names[0]
speaker_names.append(speaker_names.pop(0))
return first_speaker
batch_speaker_names = [shuffle_and_return() for i in range(simultaneous_texts)]
else:
raise NotImplementedError
if 0:# (optional) use different speaker list for text inside quotes
speaker_ids = [random.choice(speakers).split("|")[2] if ('"' in text) else random.choice(narrators).split("|")[2] for text in text_batch] # pick speaker if quotemark in text, else narrator
text_batch = [text.replace('"',"") for text in text_batch] # remove quotes from text
if len(batch_speaker_names) > len(text_batch):
batch_speaker_names = batch_speaker_names[:len(text_batch)]
simultaneous_texts = len(text_batch)
# get speaker_ids (tacotron)
tacotron_speaker_ids = [self.ttm_sp_name_lookup[speaker] for speaker in batch_speaker_names]
tacotron_speaker_ids = torch.LongTensor(tacotron_speaker_ids).cuda().repeat_interleave(batch_size_per_text)
# get speaker_ids (waveglow)
vocoder_speaker_ids = self.get_MTW_sp_id_from_ttm_sp_names(batch_speaker_names)
vocoder_speaker_ids = [self.MTW_sp_id_lookup[int(speaker_id)] for speaker_id in vocoder_speaker_ids]
vocoder_speaker_ids = torch.LongTensor(vocoder_speaker_ids).cuda()
# get style input
if style_mode == 'mel':
mel = load_mel(audio_path.replace(".npy",".wav")).cuda().half()
style_input = mel
elif style_mode == 'token':
pass
#style_input =
elif style_mode == 'zeros':
style_input = None
elif style_mode == 'torchmoji_hidden':
try:
tokenized, _, _ = self.tm_sentence_tokenizer.tokenize_sentences(text_batch) # input array [B] e.g: ["Test?","2nd Sentence!"]
except:
raise Exception(f"TorchMoji failed to tokenize text:\n{text_batch}")
try:
embedding = self.tm_torchmoji(tokenized) # returns np array [B, Embed]
except Exception as ex:
print(f'Exception: {ex}')
print(f"TorchMoji failed to process text:\n{text_batch}")
#raise Exception(f"text\n{text}\nfailed to process.")
style_input = torch.from_numpy(embedding).cuda().half().repeat_interleave(batch_size_per_text, dim=0)
elif style_mode == 'torchmoji_string':
style_input = text_batch
raise NotImplementedError
else:
raise NotImplementedError
if style_input.size(0) < (simultaneous_texts*batch_size_per_text):
diff = -(-(simultaneous_texts*batch_size_per_text) // style_input.size(0))
style_input = style_input.repeat(diff, 1)[:simultaneous_texts*batch_size_per_text]
# check punctuation and add '.' if missing
valid_last_char = '-,.?!;:' # valid final characters in texts
text_batch = [text+'.' if (text[-1] not in valid_last_char) else text for text in text_batch]
# parse text
text_batch = [unidecode(text.replace("...",". ").replace(" "," ").strip()) for text in text_batch] # remove eclipses, double spaces, unicode and spaces before/after the text.
if use_arpabet: # convert texts to ARPAbet (phonetic) versions.
text_batch = [self.ARPA(text) for text in text_batch]
# convert texts to number representation, pad where appropriate and move to GPU
sequence_split = [torch.LongTensor(text_to_sequence(text, self.ttm_hparams.text_cleaners)) for text in text_batch] # convert texts to numpy representation
text_lengths = torch.tensor([seq.size(0) for seq in sequence_split])
max_len = text_lengths.max().item()
sequence = torch.zeros(text_lengths.size(0), max_len).long() # create large tensor to move each text input into
for i in range(text_lengths.size(0)): # move each text into padded input tensor
sequence[i, :sequence_split[i].size(0)] = sequence_split[i]
sequence = sequence.cuda().long().repeat_interleave(batch_size_per_text, dim=0) # move to GPU and repeat text
text_lengths = text_lengths.cuda().long() # move to GPU
# debug # Looks like pytorch 1.5 doesn't run contiguous on some operations the previous versions did.
text_lengths = text_lengths.clone()
sequence = sequence.clone()
print("sequence.shape[0] =",sequence.shape[0]) # debug
try:
best_score = np.ones(simultaneous_texts) * -9e9
tries = np.zeros(simultaneous_texts)
best_generations = [0]*simultaneous_texts
best_score_str = ['']*simultaneous_texts
while np.amin(best_score) < target_score:
# run Tacotron
if status_updates: print("Running Tacotron2... ", end='')
mel_batch_outputs, mel_batch_outputs_postnet, gate_batch_outputs, alignments_batch = self.tacotron.inference(sequence, tacotron_speaker_ids, style_input=style_input, style_mode=style_mode, text_lengths=text_lengths.repeat_interleave(batch_size_per_text, dim=0))
# metric for html side
n_passes+=1 # metric for html
total_specs+=mel_batch_outputs.shape[0]
# get metrics for each item
if end_mode == 'thresh':
output_lengths = get_first_over_thresh(gate_batch_outputs, gate_threshold)
elif end_mode == 'max':
output_lengths = gate_batch_outputs.argmax(dim=1)
diagonality_batch, avg_prob_batch, enc_max_focus_batch, enc_min_focus_batch, enc_avg_focus_batch = alignment_metric(alignments_batch, input_lengths=text_lengths.repeat_interleave(batch_size_per_text, dim=0), output_lengths=output_lengths)
# split batch into items
batch = list(zip(
mel_batch_outputs.split(1,dim=0),
mel_batch_outputs_postnet.split(1,dim=0),
gate_batch_outputs.split(1,dim=0),
alignments_batch.split(1,dim=0),
diagonality_batch,
avg_prob_batch,
enc_max_focus_batch,
enc_min_focus_batch,
enc_avg_focus_batch,))
for j in range(simultaneous_texts): # process each set of text spectrograms seperately
start, end = (j*batch_size_per_text), ((j+1)*batch_size_per_text)
sametext_batch = batch[start:end] # seperate the full batch into pieces that use the same input text
# process all items related to the j'th text input
for k, (mel_outputs, mel_outputs_postnet, gate_outputs, alignments, diagonality, avg_prob, enc_max_focus, enc_min_focus, enc_avg_focus) in enumerate(sametext_batch):
# factors that make up score
weighted_score = avg_prob.item() # general alignment quality
diagonality_punishment = (max(diagonality.item(),1.20)-1.20) * 0.5 * diagonality_weighting # speaking each letter at a similar pace.
max_focus_punishment = max((enc_max_focus.item()-40), 0) * 0.005 * max_focus_weighting # getting stuck on same letter for 0.6s
min_focus_punishment = max(0.25-enc_min_focus.item(),0) * min_focus_weighting # skipping single enc outputs
avg_focus_punishment = max(2.5-enc_avg_focus.item(), 0) * avg_focus_weighting # skipping most enc outputs
weighted_score -= (diagonality_punishment + max_focus_punishment + min_focus_punishment + avg_focus_punishment)
score_str = f"{round(diagonality.item(),3)} {round(avg_prob.item()*100,2)}% {round(weighted_score,4)} {round(max_focus_punishment,2)} {round(min_focus_punishment,2)} {round(avg_focus_punishment,2)}|"
if weighted_score > best_score[j]:
best_score[j] = weighted_score
best_score_str[j] = score_str
best_generations[j] = [mel_outputs, mel_outputs_postnet, gate_outputs, alignments]
tries[j]+=1
if np.amin(tries) >= max_attempts and np.amin(best_score) > (absolutely_required_score-1):
raise StopIteration
if np.amin(tries) >= absolute_maximum_tries:
print(f"Absolutely required score not achieved in {absolute_maximum_tries} attempts - ", end='')
raise StopIteration
if np.amin(tries) < (max_attempts-1):
print(f'Minimum score of {np.amin(best_score)} is less than Target score of {target_score}. Retrying.')
elif np.amin(best_score) < absolutely_required_score:
print(f"Minimum score of {np.amin(best_score)} is less than 'Absolutely Required score' of {absolutely_required_score}. Retrying.")
except StopIteration:
del batch
if status_updates: print("Done")
pass
# cleanup VRAM
style_input = sequence = None
# [[mel, melpost, gate, align], [mel, melpost, gate, align], [mel, melpost, gate, align]] -> [[mel, mel, mel], [melpost, melpost, melpost], [gate, gate, gate], [align, align, align]]
mel_batch_outputs, mel_batch_outputs_postnet, gate_batch_outputs, alignments_batch = [x[0][0].T for x in best_generations], [x[1][0].T for x in best_generations], [x[2][0] for x in best_generations], [x[3][0] for x in best_generations]
# pickup the best attempts from each input
# stack best output arrays into tensors for WaveGlow
gate_batch_outputs = torch.nn.utils.rnn.pad_sequence(gate_batch_outputs, batch_first=True, padding_value=0.0)
# get duration(s)
if end_mode == 'thresh':
max_lengths = get_first_over_thresh(gate_batch_outputs, gate_threshold)+gate_delay
elif end_mode == 'max':
max_lengths = gate_batch_outputs.argmax(dim=1)+gate_delay
max_length = torch.max(max_lengths)
mel_batch_outputs = torch.nn.utils.rnn.pad_sequence(mel_batch_outputs, batch_first=True, padding_value=-11.6).transpose(1,2)[:,:,:max_length]
mel_batch_outputs_postnet = torch.nn.utils.rnn.pad_sequence(mel_batch_outputs_postnet, batch_first=True, padding_value=-11.6).transpose(1,2)[:,:,:max_length]
alignments_batch = torch.nn.utils.rnn.pad_sequence(alignments_batch, batch_first=True, padding_value=0)[:,:max_length,:]
if status_updates:
print("Running WaveGlow... ", end='')
# Run WaveGlow
audio_batch = self.waveglow.infer(mel_batch_outputs_postnet, speaker_ids=vocoder_speaker_ids, sigma=self.MTW_train_sigma*0.95)
audio_denoised_batch = self.MTW_denoiser(audio_batch, strength=0.0001).squeeze(1)
print("audio_denoised_batch.shape =", audio_denoised_batch.shape) # debug
if status_updates:
print('Done')
# write audio files and any stats
audio_bs = audio_batch.size(0)
for j, (audio, audio_denoised) in enumerate(zip(audio_batch.split(1, dim=0), audio_denoised_batch.split(1, dim=0))):
# remove WaveGlow padding
audio_end = max_lengths[j] * self.ttm_hparams.hop_length
audio = audio[:,:audio_end]
audio_denoised = audio_denoised[:,:audio_end]
# remove Tacotron2 padding
spec_end = max_lengths[j]
mel_outputs = mel_batch_outputs.split(1, dim=0)[j][:,:,:spec_end]
mel_outputs_postnet = mel_batch_outputs_postnet.split(1, dim=0)[j][:,:,:spec_end]
alignments = alignments_batch.split(1, dim=0)[j][:,:spec_end,:text_lengths[j]]
# save audio
filename = f"{filename_prefix}_{counter//300:04}_{counter:06}.wav"
save_path = os.path.join(self.conf['working_directory'], filename)
# add silence to clips (ignore last clip)
if cat_silence_s:
cat_silence_samples = int(cat_silence_s*self.ttm_hparams.sampling_rate)
audio = torch.nn.functional.pad(audio, (0, cat_silence_samples))
# scale audio for int16 output
audio = (audio * 2**15).squeeze().cpu().numpy().astype('int16')
# remove if already exists
if os.path.exists(save_path):
print(f"File already found at [{save_path}], overwriting.")
os.remove(save_path)
if status_updates: print(f"Saving clip to [{save_path}]... ", end="")
write(save_path, self.ttm_hparams.sampling_rate, audio)
if status_updates: print("Done")
counter+=1
audio_len+=audio_end
# ------ merge clips of 300 ------ #
last_item = (j == audio_bs-1)
if (counter % 300) == 0 or (last_text and last_item): # if 300th file or last item of last batch.
i = (counter- 1)//300
# merge batch of 300 files together
print(f"Merging audio files {i*300} to {((i+1)*300)-1}... ", end='')
fpath = os.path.join(self.conf['working_directory'], f"{filename_prefix}_concat_{i:04}.wav")
files_to_merge = os.path.join(self.conf["working_directory"], f"{filename_prefix}_{i:04}_*.wav")
os.system(f'sox "{files_to_merge}" -b 16 "{fpath}"')
assert os.path.exists(fpath), f"'{fpath}' failed to generate."
del files_to_merge
# delete the original 300 files
print("Cleaning up remaining temp files... ", end="")
tmp_files = [fp for fp in glob(os.path.join(self.conf['working_directory'], f"{filename_prefix}_{i:04}_*.wav")) if "output" not in fp]
_ = [os.remove(fp) for fp in tmp_files]
print("Done")
# add merged file to final output(s)
fsize = os.stat(fpath).st_size
running_fsize += fsize
fpaths += [fpath,]
if ( running_fsize/(1024**3) > self.conf['output_maxsize_gb'] ) or (len(fpaths) > 300) or (last_text and last_item): # if (total size of fpaths is > 2GB) or (more than 300 inputs) or (last item of last batch): save as output
fpath_str = '"'+'" "'.join(fpaths)+'"' # chain together fpaths in string for SoX input
output_extension = self.conf['sox_output_ext']
if output_extension[0] != '.':
output_extension = f".{output_extension}"
out_name = f"{output_filename}_{out_count:02}{output_extension}"
out_path = os.path.join(self.conf['output_directory'], out_name)
os.system(f'sox {fpath_str} -b 16 "{out_path}"') # merge the merged files into final outputs. bit depth of 16 useful to stay in the 32bit duration limit
if running_fsize >= (os.stat(out_path).st_size - 1024): # if output seems to have correctly generated.
print("Cleaning up merged temp files... ", end="") # delete the temp files and keep the output
_ = [os.remove(fp) for fp in fpaths]
print("Done")
running_fsize = 0
out_count+=1
fpaths = []
# ------ // merge clips of 300 // ------ #
#end of writing loop
if True:#self.conf['show_inference_alignment_scores']:
for k, bs in enumerate(best_score):
print(f"Input_Str {k}: '{text_batch[k]}'")
print(f"Best_Score {k}: {bs:0.4f}")
print(f"Score_Str {k}: {best_score_str[k]}\n")
for score in best_score:
scores+=[score,]
if True:#self.conf['show_inference_progress']:
time_elapsed = time.time()-show_inference_progress_start
time_per_clip = time_elapsed/(text_index+1)
remaining_files = (total_len-(text_index+1))
eta_finish = (remaining_files*time_per_clip)/60
print(f"{text_index}/{total_len}, {eta_finish:.2f}mins remaining.")
del time_per_clip, eta_finish, remaining_files, time_elapsed
audio_seconds_generated = round(audio_len.item()/self.ttm_hparams.sampling_rate,3)
time_to_gen = round(time.time()-start_time,3)
if show_time_to_gen:
print(f"Generated {audio_seconds_generated}s of audio in {time_to_gen}s wall time - so far. (best of {tries.sum().astype('int')} tries this pass)")
print("\n") # seperate each pass
scores = np.stack(scores)
avg_score = np.mean(scores)
return out_name, time_to_gen, audio_seconds_generated, total_specs, n_passes, avg_score | [
"42448678+CookiePPP@users.noreply.github.com"
] | 42448678+CookiePPP@users.noreply.github.com |
dc3d0bbf003d9e703385315bc3b4b2710809e86f | 4c1da0c18482031ea650b32b1ee19cd8e16338fb | /exit_window_v0.py | 34b21c5da8185119ea480f48991bdc8f863aff28 | [] | no_license | adamjaro/irview | 03dbb4c4a3e2a1082e246552b104602108bfd44a | a47c6a49af5411bc167e35fdee961e27243b49f3 | refs/heads/master | 2021-06-19T22:52:27.242434 | 2021-06-17T22:11:32 | 2021-06-17T22:11:32 | 211,398,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,769 | py |
import math
import ROOT as rt
from ROOT import TVector2, TGraph, TText
#_____________________________________________________________________________
class exit_window_v0(object):
#photon exit window, part of electron beam pipe, rear side
#_____________________________________________________________________________
def __init__(self, geom):
#input points, in electron beamline frame
if geom == "flat":
self.pos = [(-21.7, -5), (-21.7, 5)] # flat geometry
if geom == "tilt":
self.pos = [(-18.8, -5), (-21.7, 5)] # tilted geometry
#print the geometry
self.print_position()
#points in z and x, both in m
self.zx_pos = []
for i in self.pos:
# z at 0 and x at 1, converted to cm
self.zx_pos.append( TVector2(i[0], 0.01*i[1]) )
#angle of initial rotation
self.rotate(-0.008)
#_____________________________________________________________________________
def rotate(self, theta):
#rotate by angle theta about the origin
for i in xrange(len(self.zx_pos)):
self.zx_pos[i] = self.zx_pos[i].Rotate(theta)
#_____________________________________________________________________________
def draw_2d(self):
#draw the exit window
self.geom = TGraph(len(self.zx_pos))
self.geom.SetLineColor(rt.kGreen+1)
self.geom.SetLineWidth(4)
ipoint = 0
for i in self.zx_pos:
self.geom.SetPoint(ipoint, i.X(), 100*i.Y())
ipoint += 1
self.geom.Draw("lsame")
#label
zpos = (self.zx_pos[0].X() + self.zx_pos[1].X())/2.
self.label = TText(zpos, (self.zx_pos[0].Y())*100-6, "Exit window")
self.label.SetTextSize(0.03)
#self.label.SetTextAngle(90)
#self.label.SetTextAlign(32)
self.label.SetTextAlign(23)
#self.label.Draw("same")
#_____________________________________________________________________________
def print_position(self):
#show position and angle of the exit window
z1 = self.pos[0][0]*1e3 # to mm
z2 = self.pos[1][0]*1e3
x1 = self.pos[0][1]*10. # to mm
x2 = self.pos[1][1]*10.
print("z_mid:", (z1 + z2)/2., "mm")
print("x_mid:", (x1 + x2)/2., "mm")
#length in x-z plane
dl = math.sqrt((z1-z2)**2 + (x1-x2)**2)
print("len:", dl, "mm")
#angle in x-z plane
dz = abs(z2-z1)
dx = abs(x2-x1)
#theta = math.atan( dx/dz )
theta = math.asin( dx/dl )
print("dz:", dz, "mm")
print("dx:", dx, "mm")
print("theta:", theta, "rad")
print("pi/2 - theta:", math.pi/2. - theta, "rad")
| [
"jaroslav.adam@cern.ch"
] | jaroslav.adam@cern.ch |
1ee2a6377c7360321e648ff0ee8fd16d7f80d533 | ba0cbdae81c171bd4be7b12c0594de72bd6d625a | /MyToontown/py2/toontown/safezone/DLPlayground.pyc.py | 5e65850004a6b36058f664b5dfb7c98d89449186 | [] | no_license | sweep41/Toontown-2016 | 65985f198fa32a832e762fa9c59e59606d6a40a3 | 7732fb2c27001264e6dd652c057b3dc41f9c8a7d | refs/heads/master | 2021-01-23T16:04:45.264205 | 2017-06-04T02:47:34 | 2017-06-04T02:47:34 | 93,279,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | # 2013.08.22 22:24:38 Pacific Daylight Time
# Embedded file name: toontown.safezone.DLPlayground
from pandac.PandaModules import *
import Playground
import random
class DLPlayground(Playground.Playground):
__module__ = __name__
def __init__(self, loader, parentFSM, doneEvent):
Playground.Playground.__init__(self, loader, parentFSM, doneEvent)
def showPaths(self):
from toontown.classicchars import CCharPaths
from toontown.toonbase import TTLocalizer
self.showPathPoints(CCharPaths.getPaths(TTLocalizer.Donald))
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\toontown\safezone\DLPlayground.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:24:38 Pacific Daylight Time
| [
"sweep14@gmail.com"
] | sweep14@gmail.com |
a2294c40c4792abccefe3426817b25c445dc8dca | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03385/s791898250.py | 480d80639c0ae6dfbadb01faa2974dae7aa402a4 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | s = set(input())
if len(s) == 3:
print("Yes")
else:
print("No") | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
8433d0ea23babf77d16a0856540607257f5e75b6 | abdab481e9c4f64fc3d2f84539c58b51fd66f198 | /numpy/core/arrayprint.pyi | b8c7b1604647f74d163548109b0a7e327c99aef6 | [] | no_license | depixusgenome/linting | aa068d9b5dd393c668429a1fed2e0dfc5d675125 | 4e3398fab98f873f77f8e8ab81eaeb7df215e7f7 | refs/heads/master | 2020-09-24T08:39:02.645467 | 2019-10-11T20:33:20 | 2019-10-11T20:33:20 | 225,716,480 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,249 | pyi | # Stubs for numpy.core.arrayprint (Python 3.5)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from typing import Any, Optional
from . import numerictypes as _nt
def set_printoptions(precision: Optional[Any] = ..., threshold: Optional[Any] = ..., edgeitems: Optional[Any] = ..., linewidth: Optional[Any] = ..., suppress: Optional[Any] = ..., nanstr: Optional[Any] = ..., infstr: Optional[Any] = ..., formatter: Optional[Any] = ...): ...
def get_printoptions(): ...
def array2string(a, max_line_width: Optional[Any] = ..., precision: Optional[Any] = ..., suppress_small: Optional[Any] = ..., separator: str = ..., prefix: str = ..., style: Any = ..., formatter: Optional[Any] = ...): ...
class FloatFormat:
precision = ... # type: Any
suppress_small = ... # type: Any
sign = ... # type: Any
exp_format = ... # type: bool
large_exponent = ... # type: bool
max_str_len = ... # type: int
def __init__(self, data, precision, suppress_small, sign: bool = ...) -> None: ...
special_fmt = ... # type: Any
format = ... # type: Any
def fillFormat(self, data): ...
def __call__(self, x, strip_zeros: bool = ...): ...
class IntegerFormat:
format = ... # type: Any
def __init__(self, data) -> None: ...
def __call__(self, x): ...
class LongFloatFormat:
precision = ... # type: Any
sign = ... # type: Any
def __init__(self, precision, sign: bool = ...) -> None: ...
def __call__(self, x): ...
class LongComplexFormat:
real_format = ... # type: Any
imag_format = ... # type: Any
def __init__(self, precision) -> None: ...
def __call__(self, x): ...
class ComplexFormat:
real_format = ... # type: Any
imag_format = ... # type: Any
def __init__(self, x, precision, suppress_small) -> None: ...
def __call__(self, x): ...
class DatetimeFormat:
timezone = ... # type: Any
unit = ... # type: Any
casting = ... # type: Any
def __init__(self, x, unit: Optional[Any] = ..., timezone: Optional[Any] = ..., casting: str = ...) -> None: ...
def __call__(self, x): ...
class TimedeltaFormat:
format = ... # type: Any
def __init__(self, data) -> None: ...
def __call__(self, x): ...
| [
"pol.davezac@depixus.com"
] | pol.davezac@depixus.com |
96d191f3d7de629e749b76de9c56cc154bb389f5 | 5b5aee20bf23dfe1a6314fb524c4cc31e140ee64 | /thuoclao/check/migrations/0001_initial.py | 5b551428b8fbec05aa632d32ed1e93ba3ac8071c | [] | no_license | locvx1234/ThuoclaoPing | 7f75aa658a7b97d4b8c8982b2477e93463861cf2 | 8c821c0adf50a93a17c29255905bcf32101fe171 | refs/heads/master | 2023-08-17T05:29:38.392255 | 2019-10-22T02:44:41 | 2019-10-22T02:44:41 | 128,077,290 | 0 | 1 | null | 2023-09-09T02:36:36 | 2018-04-04T14:50:33 | JavaScript | UTF-8 | Python | false | false | 4,370 | py | # Generated by Django 2.0.4 on 2018-07-20 02:13
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('auth', '0009_alter_user_last_name_max_length'),
]
operations = [
migrations.CreateModel(
name='Alert',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to=settings.AUTH_USER_MODEL)),
('email_alert', models.EmailField(blank=True, max_length=100)),
('telegram_id', models.CharField(blank=True, help_text='Telegram ID', max_length=10)),
('webhook', models.URLField(blank=True, help_text='URL to send message into Slack.')),
('delay_check', models.IntegerField(default=10, help_text='Interval time to check status host. - unit: second')),
],
options={
'ordering': ('user',),
},
),
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group_name', models.CharField(max_length=45)),
('description', models.TextField(blank=True, null=True)),
('ok', models.IntegerField(blank=True, null=True)),
('warning', models.IntegerField(blank=True, null=True)),
('critical', models.IntegerField(blank=True, null=True)),
],
),
migrations.CreateModel(
name='Group_attribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('attribute_name', models.CharField(max_length=45)),
('value', models.CharField(max_length=100)),
('type_value', models.IntegerField(help_text='0: integer, 1: bool, 2: date, 3: string, 4: ip-domain, 5: URL', null=True)),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='check.Group')),
],
),
migrations.CreateModel(
name='Host',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hostname', models.CharField(max_length=45)),
('description', models.TextField(blank=True, null=True)),
('status', models.IntegerField(default=-1, help_text='0: ok, 1: warning, 2: critical')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='check.Group')),
],
options={
'ordering': ('hostname',),
},
),
migrations.CreateModel(
name='Host_attribute',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('attribute_name', models.CharField(max_length=45)),
('value', models.CharField(max_length=100)),
('type_value', models.IntegerField(help_text='0: integer, 1: bool, 2: date, 3: string, 4: ip-domain, 5: URL', null=True)),
('host', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='check.Host')),
],
),
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('service_name', models.CharField(max_length=45)),
],
options={
'ordering': ('service_name',),
},
),
migrations.AddField(
model_name='group',
name='service',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='check.Service'),
),
migrations.AddField(
model_name='group',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| [
"locvx1234@gmail.com"
] | locvx1234@gmail.com |
ba9213240622adab8cae4a44d407a97634aa1ed4 | 348c846c8c39bc7b3933e9417c98a581fdcf2140 | /test/test_location.py | e0708fbd8ebf330f6bb4244d227325bc9bca7b74 | [
"MIT"
] | permissive | danpf/python-ihm | 42d20e9bf2cd490ab324642a3874a3f9526d629d | cbe681ee56701202769a401ee6380ccab5fcae84 | refs/heads/master | 2022-12-08T01:08:37.104767 | 2020-07-10T19:18:27 | 2020-07-10T19:18:27 | 287,045,448 | 0 | 0 | MIT | 2020-08-12T15:09:28 | 2020-08-12T15:09:27 | null | UTF-8 | Python | false | false | 10,337 | py | import utils
import os
import unittest
TOPDIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
utils.set_search_paths(TOPDIR)
import ihm.location
def _make_test_file(fname):
with open(fname, 'w') as fh:
fh.write('contents')
class Tests(unittest.TestCase):
def test_database_location(self):
"""Test DatabaseLocation"""
dl1 = ihm.location.DatabaseLocation('mydb', 'abc', version=1)
dl2 = ihm.location.DatabaseLocation('mydb', 'abc', version=1)
self.assertEqual(dl1, dl2)
dl3 = ihm.location.DatabaseLocation('mydb', 'abc', version=2)
self.assertNotEqual(dl1, dl3)
# details can change without affecting equality
dl4 = ihm.location.DatabaseLocation('mydb', 'abc', version=1,
details='foo')
self.assertEqual(dl1, dl4)
self.assertEqual(dl1.db_name, 'mydb')
self.assertEqual(dl1.access_code, 'abc')
self.assertEqual(dl1.version, 1)
self.assertIsNone(dl1.details)
def test_pdb_location(self):
"""Test PDBLocation"""
l = ihm.location.PDBLocation('1abc', version='foo', details='bar')
self.assertEqual(l.db_name, 'PDB')
self.assertEqual(l.access_code, '1abc')
self.assertEqual(l.version, 'foo')
self.assertEqual(l.details, 'bar')
def test_pdb_dev_location(self):
"""Test PDBDevLocation"""
l = ihm.location.PDBDevLocation(
'PDBDEV_00000029', version='foo', details='bar')
self.assertEqual(l.db_name, 'PDB-Dev')
self.assertEqual(l.access_code, 'PDBDEV_00000029')
self.assertEqual(l.version, 'foo')
self.assertEqual(l.details, 'bar')
def test_bmrb_location(self):
"""Test BMRBLocation"""
l = ihm.location.BMRBLocation('27600', version='foo', details='bar')
self.assertEqual(l.db_name, 'BMRB')
self.assertEqual(l.access_code, '27600')
self.assertEqual(l.version, 'foo')
self.assertEqual(l.details, 'bar')
def test_emdb_location(self):
"""Test EMDBLocation"""
l = ihm.location.EMDBLocation('EMDB-123', version='foo', details='bar')
self.assertEqual(l.db_name, 'EMDB')
self.assertEqual(l.access_code, 'EMDB-123')
self.assertEqual(l.version, 'foo')
self.assertEqual(l.details, 'bar')
def test_massive_location(self):
"""Test MassIVELocation class"""
d = ihm.location.MassIVELocation('abc', version=1, details='foo')
self.assertEqual(d.db_name, 'MASSIVE')
self.assertEqual(d.access_code, 'abc')
self.assertEqual(d.version, 1)
self.assertEqual(d.details, 'foo')
def test_empiar_location(self):
"""Test EMPIARLocation class"""
d = ihm.location.EMPIARLocation('abc', version=1, details='foo')
self.assertEqual(d.db_name, 'EMPIAR')
self.assertEqual(d.access_code, 'abc')
self.assertEqual(d.version, 1)
self.assertEqual(d.details, 'foo')
def test_sasbdb_location(self):
"""Test SASBDBLocation class"""
d = ihm.location.SASBDBLocation('abc', version=1, details='foo')
self.assertEqual(d.db_name, 'SASBDB')
self.assertEqual(d.access_code, 'abc')
self.assertEqual(d.version, 1)
self.assertEqual(d.details, 'foo')
def test_pride_location(self):
"""Test PRIDELocation class"""
d = ihm.location.PRIDELocation('abc', version=1, details='foo')
self.assertEqual(d.db_name, 'PRIDE')
self.assertEqual(d.access_code, 'abc')
self.assertEqual(d.version, 1)
self.assertEqual(d.details, 'foo')
def test_biogrid_location(self):
"""Test BioGRIDLocation class"""
d = ihm.location.BioGRIDLocation('abc', version=1, details='foo')
self.assertEqual(d.db_name, 'BioGRID')
self.assertEqual(d.access_code, 'abc')
self.assertEqual(d.version, 1)
self.assertEqual(d.details, 'foo')
def test_location(self):
"""Test Location base class"""
l = ihm.location.Location(details='foo')
l._allow_duplicates = True
self.assertEqual(l._eq_vals(), id(l))
# Locations should never compare equal to None
self.assertNotEqual(l, None)
def test_file_location_local(self):
"""Test InputFileLocation with a local file"""
# Make tmpdir under current directory, as it's not always possible to
# get a relative path from cwd to /tmp (e.g. on Windows where they may
# be on different drives)
with utils.temporary_directory('.') as tmpdir:
fname = os.path.join(tmpdir, 'test.pdb')
_make_test_file(fname)
l = ihm.location.InputFileLocation(fname)
self.assertEqual(l.path, os.path.abspath(fname))
self.assertIsNone(l.repo)
self.assertEqual(l.file_size, 8)
def test_file_location_local_not_exist(self):
"""Test InputFileLocation with a local file that doesn't exist"""
with utils.temporary_directory() as tmpdir:
fname = os.path.join(tmpdir, 'test.pdb')
self.assertRaises(ValueError, ihm.location.InputFileLocation, fname)
def test_file_location_repo(self):
"""Test InputFileLocation with a file in a repository"""
r = ihm.location.Repository(doi='1.2.3.4')
l = ihm.location.InputFileLocation('foo/bar', repo=r)
self.assertEqual(l.path, 'foo/bar')
self.assertEqual(l.repo, r)
self.assertIsNone(l.file_size)
# locations should only compare equal if path and repo both match
l2 = ihm.location.InputFileLocation('foo/bar', repo=r)
self.assertEqual(l, l2)
l3 = ihm.location.InputFileLocation('otherpath', repo=r)
self.assertNotEqual(l, l3)
r2 = ihm.location.Repository(doi='5.6.7.8')
l4 = ihm.location.InputFileLocation('foo/bar', repo=r2)
self.assertNotEqual(l, l4)
l5 = ihm.location.InputFileLocation(None, repo=r)
self.assertNotEqual(l, l5)
l6 = ihm.location.InputFileLocation(None, repo=r2)
self.assertNotEqual(l, l6)
def test_repository_equality(self):
"""Test Repository equality"""
r1 = ihm.location.Repository(doi='foo')
r2 = ihm.location.Repository(doi='foo')
r3 = ihm.location.Repository(doi='foo', url='bar')
r4 = ihm.location.Repository(doi='bar')
self.assertEqual(r1, r2)
self.assertEqual(hash(r1), hash(r2))
self.assertNotEqual(r1, r3)
self.assertNotEqual(r1, r4)
def test_repository(self):
"""Test Repository"""
# Make tmpdir under current directory, as it's not always possible to
# get a relative path from cwd to /tmp (e.g. on Windows where they may
# be on different drives)
with utils.temporary_directory(os.getcwd()) as tmpdir:
subdir = os.path.join(tmpdir, 'subdir')
subdir2 = os.path.join(tmpdir, 'subdir2')
os.mkdir(subdir)
_make_test_file(os.path.join(subdir, 'bar'))
s = ihm.location.Repository(doi='10.5281/zenodo.46266',
root=os.path.relpath(tmpdir),
url='foo', top_directory='baz')
self.assertEqual(s._root, tmpdir)
self.assertEqual(s.url, 'foo')
self.assertEqual(s.top_directory, 'baz')
loc = ihm.location.InputFileLocation(
os.path.relpath(os.path.join(subdir, 'bar')))
self.assertIsNone(loc.repo)
ihm.location.Repository._update_in_repos(loc, [s])
self.assertEqual(loc.repo.doi, '10.5281/zenodo.46266')
self.assertEqual(loc.path, os.path.join('subdir', 'bar'))
# Shouldn't touch locations that are already in repos
loc = ihm.location.InputFileLocation(repo='foo', path='bar')
self.assertEqual(loc.repo, 'foo')
ihm.location.Repository._update_in_repos(loc, [s])
self.assertEqual(loc.repo, 'foo')
# Shortest match should win
loc = ihm.location.InputFileLocation(
os.path.relpath(os.path.join(subdir, 'bar')))
s2 = ihm.location.Repository(doi='10.5281/zenodo.46280',
root=os.path.relpath(subdir),
url='foo', top_directory='baz')
# Repositories that aren't above the file shouldn't count
s3 = ihm.location.Repository(doi='10.5281/zenodo.56280',
root=os.path.relpath(subdir2),
url='foo', top_directory='baz')
ihm.location.Repository._update_in_repos(loc, [s2, s3, s])
self.assertEqual(loc.repo.doi, '10.5281/zenodo.46280')
self.assertEqual(loc.path, 'bar')
def test_repository_no_checkout(self):
"""Test Repository with no checkout"""
r = ihm.location.Repository(doi='10.5281/zenodo.46266')
f = ihm.location.InputFileLocation(repo=r, path='foo')
self.assertEqual(f.repo.doi, '10.5281/zenodo.46266')
self.assertEqual(f.path, 'foo')
def test_repository_get_full_path(self):
"""Test Repository._get_full_path"""
r = ihm.location.Repository(doi='10.5281/zenodo.46266',
top_directory='/foo')
self.assertEqual(r._get_full_path('bar'), '/foo%sbar' % os.sep)
def test_file_locations(self):
"""Test FileLocation derived classes"""
r = ihm.location.Repository(doi='10.5281/zenodo.46266')
l = ihm.location.InputFileLocation(repo=r, path='foo')
self.assertEqual(l.content_type, 'Input data or restraints')
l = ihm.location.OutputFileLocation(repo=r, path='foo')
self.assertEqual(l.content_type, 'Modeling or post-processing output')
l = ihm.location.WorkflowFileLocation(repo=r, path='foo')
self.assertEqual(l.content_type, 'Modeling workflow or script')
l = ihm.location.VisualizationFileLocation(repo=r, path='foo')
self.assertEqual(l.content_type, 'Visualization script')
if __name__ == '__main__':
unittest.main()
| [
"ben@salilab.org"
] | ben@salilab.org |
0c4e1416b8bf147efb68a6aa8f6cd1d073d05606 | 64cd09628f599fe18bf38528309349f7ac0df71e | /Introduction/01_Introduction_python/10 Python functions/zip.py | 9752853642d820a77fb69fc2a92b8d95daa4c8cf | [] | no_license | JunyoungJang/Python | 958c057b2fd37c03876d3cf566ee27ee637bb020 | 76d4cd441deff8061e10608e0848360bc4f34490 | refs/heads/master | 2021-01-19T21:54:42.208469 | 2020-02-14T09:54:17 | 2020-02-14T09:54:17 | 83,768,220 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | x = [1, 2, 3]
y = [4, 5, 6]
print zip(x, y) # [(1, 4), (2, 5), (3, 6)]
x = [1, 2, 3]
y = [4, 5]
print zip(x, y) # [(1, 4), (2, 5)]
x = [1, 2]
y = [4, 5, 6]
print zip(x, y) # [(1, 4), (2, 5)]
| [
"lakino@yonsei.ac.kr"
] | lakino@yonsei.ac.kr |
7d39c4708554eb4bc2ae95510d145826c1b4b271 | 84c49683c22a17dcc43a81dc17eafe4ec81571a5 | /google-cloud-sdk/platform/gsutil/gslib/commands/ls.py | 844f790e27fcd1c99b924c06efc5405feba9195d | [
"BSD-2-Clause",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | edenpark/xin-666 | 2a80d012923e7c2b5f3880579d014a61f17b3f8b | d40ea542eb9cedcbe7872c3a5f2d3457ea804f3d | refs/heads/master | 2021-05-24T01:04:54.895766 | 2017-03-07T02:21:58 | 2017-03-07T02:21:58 | 65,078,186 | 0 | 1 | NOASSERTION | 2020-07-25T00:03:49 | 2016-08-06T10:47:05 | Python | UTF-8 | Python | false | false | 18,904 | py | # -*- coding: utf-8 -*-
# Copyright 2011 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of Unix-like ls command for cloud storage providers."""
from __future__ import absolute_import
import re
from gslib.boto_translation import S3_DELETE_MARKER_GUID
from gslib.cloud_api import NotFoundException
from gslib.command import Command
from gslib.command_argument import CommandArgument
from gslib.cs_api_map import ApiSelector
from gslib.exception import CommandException
from gslib.ls_helper import ENCRYPTED_FIELDS
from gslib.ls_helper import LsHelper
from gslib.ls_helper import UNENCRYPTED_FULL_LISTING_FIELDS
from gslib.storage_url import ContainsWildcard
from gslib.storage_url import StorageUrlFromString
from gslib.translation_helper import AclTranslation
from gslib.util import ListingStyle
from gslib.util import MakeHumanReadable
from gslib.util import NO_MAX
from gslib.util import PrintFullInfoAboutObject
from gslib.util import UTF8
# Regex that assists with converting JSON timestamp to ls-style output.
# This excludes timestamp fractional seconds, for example:
# 2013-07-03 20:32:53.048000+00:00
JSON_TIMESTAMP_RE = re.compile(r'([^\s]*)\s([^\.\+]*).*')
_SYNOPSIS = """
gsutil ls [-a] [-b] [-d] [-l] [-L] [-r] [-p proj_id] url...
"""
_DETAILED_HELP_TEXT = ("""
<B>SYNOPSIS</B>
""" + _SYNOPSIS + """
<B>LISTING PROVIDERS, BUCKETS, SUBDIRECTORIES, AND OBJECTS</B>
If you run gsutil ls without URLs, it lists all of the Google Cloud Storage
buckets under your default project ID:
gsutil ls
(For details about projects, see "gsutil help projects" and also the -p
option in the OPTIONS section below.)
If you specify one or more provider URLs, gsutil ls will list buckets at
each listed provider:
gsutil ls gs://
If you specify bucket URLs, gsutil ls will list objects at the top level of
each bucket, along with the names of each subdirectory. For example:
gsutil ls gs://bucket
might produce output like:
gs://bucket/obj1.htm
gs://bucket/obj2.htm
gs://bucket/images1/
gs://bucket/images2/
The "/" at the end of the last 2 URLs tells you they are subdirectories,
which you can list using:
gsutil ls gs://bucket/images*
If you specify object URLs, gsutil ls will list the specified objects. For
example:
gsutil ls gs://bucket/*.txt
will list all files whose name matches the above wildcard at the top level
of the bucket.
See "gsutil help wildcards" for more details on working with wildcards.
<B>DIRECTORY BY DIRECTORY, FLAT, and RECURSIVE LISTINGS</B>
Listing a bucket or subdirectory (as illustrated near the end of the previous
section) only shows the objects and names of subdirectories it contains. You
can list all objects in a bucket by using the -r option. For example:
gsutil ls -r gs://bucket
will list the top-level objects and buckets, then the objects and
buckets under gs://bucket/images1, then those under gs://bucket/images2, etc.
If you want to see all objects in the bucket in one "flat" listing use the
recursive ("**") wildcard, like:
gsutil ls -r gs://bucket/**
or, for a flat listing of a subdirectory:
gsutil ls -r gs://bucket/dir/**
If you want to see only the subdirectory itself, use the -d option:
gsutil ls -d gs://bucket/dir
<B>LISTING OBJECT DETAILS</B>
If you specify the -l option, gsutil will output additional information
about each matching provider, bucket, subdirectory, or object. For example:
gsutil ls -l gs://bucket/*.txt
will print the object size, creation time stamp, and name of each matching
object, along with the total count and sum of sizes of all matching objects:
2276224 2012-03-02T19:25:17Z gs://bucket/obj1
3914624 2012-03-02T19:30:27Z gs://bucket/obj2
TOTAL: 2 objects, 6190848 bytes (5.9 MiB)
Note that the total listed in parentheses above is in mebibytes (or gibibytes,
tebibytes, etc.), which corresponds to the unit of billing measurement for
Google Cloud Storage.
You can get a listing of all the objects in the top-level bucket directory
(along with the total count and sum of sizes) using a command like:
gsutil ls -l gs://bucket
To print additional detail about objects and buckets use the gsutil ls -L
option. For example:
gsutil ls -L gs://bucket/obj1
will print something like:
gs://bucket/obj1:
Creation Time: Fri, 02 Mar 2012 19:25:17 GMT
Size: 2276224
Cache-Control: private, max-age=0
Content-Type: application/x-executable
ETag: 5ca6796417570a586723b7344afffc81
Generation: 1378862725952000
Metageneration: 1
ACL:
[
{
"entity": "group-00b4903a97163d99003117abe64d292561d2b4074fc90ce5c0e35ac45f66ad70",
"entityId": "00b4903a97163d99003117abe64d292561d2b4074fc90ce5c0e35ac45f66ad70",
"role": "OWNER"
}
]
TOTAL: 1 objects, 2276224 bytes (2.17 MiB)
See also "gsutil help acl" for getting a more readable version of the ACL.
<B>LISTING BUCKET DETAILS</B>
If you want to see information about the bucket itself, use the -b
option. For example:
gsutil ls -L -b gs://bucket
will print something like:
gs://bucket/ :
StorageClass: STANDARD
LocationConstraint: US
Versioning enabled: True
Logging: None
WebsiteConfiguration: None
CORS configuration: Present
Lifecycle configuration: None
[
{
"entity": "group-00b4903a97163d99003117abe64d292561d2b4074fc90ce5c0e35ac45f66ad70",
"entityId": "00b4903a97163d99003117abe64d292561d2b4074fc90ce5c0e35ac45f66ad70",
"role": "OWNER"
}
]
Default ACL:
[
{
"entity": "group-00b4903a97163d99003117abe64d292561d2b4074fc90ce5c0e35ac45f66ad70",
"entityId": "00b4903a97163d99003117abe64d292561d2b4074fc90ce5c0e35ac45f66ad70",
"role": "OWNER"
}
]
<B>OPTIONS</B>
-l Prints long listing (owner, length).
-L Prints even more detail than -l. Note: If you use this option
with the (non-default) XML API it will generate an additional
request per object being listed, which makes the -L option run
much more slowly (and cost more) using the XML API than the
default JSON API.
-d List matching subdirectory names instead of contents, and do not
recurse into matching subdirectories even if the -R option is
specified.
-b Prints info about the bucket when used with a bucket URL.
-h When used with -l, prints object sizes in human readable format
(e.g., 1 KiB, 234 MiB, 2 GiB, etc.)
-p proj_id Specifies the project ID to use for listing buckets.
-R, -r Requests a recursive listing.
-a Includes non-current object versions / generations in the listing
(only useful with a versioning-enabled bucket). If combined with
-l option also prints metageneration for each listed object.
-e Include ETag in long listing (-l) output.
""")
class LsCommand(Command):
"""Implementation of gsutil ls command."""
# Command specification. See base class for documentation.
command_spec = Command.CreateCommandSpec(
'ls',
command_name_aliases=['dir', 'list'],
usage_synopsis=_SYNOPSIS,
min_args=0,
max_args=NO_MAX,
supported_sub_args='aebdlLhp:rR',
file_url_ok=False,
provider_url_ok=True,
urls_start_arg=0,
gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
gs_default_api=ApiSelector.JSON,
argparse_arguments=[
CommandArgument.MakeZeroOrMoreCloudURLsArgument()
]
)
# Help specification. See help_provider.py for documentation.
help_spec = Command.HelpSpec(
help_name='ls',
help_name_aliases=['dir', 'list'],
help_type='command_help',
help_one_line_summary='List providers, buckets, or objects',
help_text=_DETAILED_HELP_TEXT,
subcommand_help_text={},
)
def _PrintBucketInfo(self, bucket_blr, listing_style):
"""Print listing info for given bucket.
Args:
bucket_blr: BucketListingReference for the bucket being listed
listing_style: ListingStyle enum describing type of output desired.
Returns:
Tuple (total objects, total bytes) in the bucket.
"""
if (listing_style == ListingStyle.SHORT or
listing_style == ListingStyle.LONG):
print bucket_blr
return
# listing_style == ListingStyle.LONG_LONG:
# We're guaranteed by the caller that the root object is populated.
bucket = bucket_blr.root_object
location_constraint = bucket.location
storage_class = bucket.storageClass
fields = {'bucket': bucket_blr.url_string,
'storage_class': storage_class,
'location_constraint': location_constraint,
'acl': AclTranslation.JsonFromMessage(bucket.acl),
'default_acl': AclTranslation.JsonFromMessage(
bucket.defaultObjectAcl)}
fields['versioning'] = bucket.versioning and bucket.versioning.enabled
fields['website_config'] = 'Present' if bucket.website else 'None'
fields['logging_config'] = 'Present' if bucket.logging else 'None'
fields['cors_config'] = 'Present' if bucket.cors else 'None'
fields['lifecycle_config'] = 'Present' if bucket.lifecycle else 'None'
# For field values that are multiline, add indenting to make it look
# prettier.
for key in fields:
previous_value = fields[key]
if (not isinstance(previous_value, basestring) or
'\n' not in previous_value):
continue
new_value = previous_value.replace('\n', '\n\t ')
# Start multiline values on a new line if they aren't already.
if not new_value.startswith('\n'):
new_value = '\n\t ' + new_value
fields[key] = new_value
print('{bucket} :\n'
'\tStorage class:\t\t\t{storage_class}\n'
'\tLocation constraint:\t\t{location_constraint}\n'
'\tVersioning enabled:\t\t{versioning}\n'
'\tLogging configuration:\t\t{logging_config}\n'
'\tWebsite configuration:\t\t{website_config}\n'
'\tCORS configuration: \t\t{cors_config}\n'
'\tLifecycle configuration:\t{lifecycle_config}\n'
'\tACL:\t\t\t\t{acl}\n'
'\tDefault ACL:\t\t\t{default_acl}'.format(**fields))
if bucket_blr.storage_url.scheme == 's3':
print('Note: this is an S3 bucket so configuration values may be '
'blank. To retrieve bucket configuration values, use '
'individual configuration commands such as gsutil acl get '
'<bucket>.')
def _PrintLongListing(self, bucket_listing_ref):
"""Prints an object with ListingStyle.LONG."""
obj = bucket_listing_ref.root_object
url_str = bucket_listing_ref.url_string
if (obj.metadata and S3_DELETE_MARKER_GUID in
obj.metadata.additionalProperties):
size_string = '0'
num_bytes = 0
num_objs = 0
url_str += '<DeleteMarker>'
else:
size_string = (MakeHumanReadable(obj.size)
if self.human_readable else str(obj.size))
num_bytes = obj.size
num_objs = 1
timestamp = JSON_TIMESTAMP_RE.sub(
r'\1T\2Z', str(obj.updated).decode(UTF8).encode('ascii'))
printstr = '%(size)10s %(timestamp)s %(url)s'
encoded_etag = None
encoded_metagen = None
if self.all_versions:
printstr += ' metageneration=%(metageneration)s'
encoded_metagen = str(obj.metageneration).encode(UTF8)
if self.include_etag:
printstr += ' etag=%(etag)s'
encoded_etag = obj.etag.encode(UTF8)
format_args = {
'size': size_string,
'timestamp': timestamp,
'url': url_str.encode(UTF8),
'metageneration': encoded_metagen,
'etag': encoded_etag
}
print printstr % format_args
return (num_objs, num_bytes)
def RunCommand(self):
"""Command entry point for the ls command."""
got_nomatch_errors = False
got_bucket_nomatch_errors = False
listing_style = ListingStyle.SHORT
get_bucket_info = False
self.recursion_requested = False
self.all_versions = False
self.include_etag = False
self.human_readable = False
self.list_subdir_contents = True
if self.sub_opts:
for o, a in self.sub_opts:
if o == '-a':
self.all_versions = True
elif o == '-e':
self.include_etag = True
elif o == '-b':
get_bucket_info = True
elif o == '-h':
self.human_readable = True
elif o == '-l':
listing_style = ListingStyle.LONG
elif o == '-L':
listing_style = ListingStyle.LONG_LONG
elif o == '-p':
self.project_id = a
elif o == '-r' or o == '-R':
self.recursion_requested = True
elif o == '-d':
self.list_subdir_contents = False
if not self.args:
# default to listing all gs buckets
self.args = ['gs://']
total_objs = 0
total_bytes = 0
def MaybePrintBucketHeader(blr):
if len(self.args) > 1:
print '%s:' % blr.url_string.encode(UTF8)
print_bucket_header = MaybePrintBucketHeader
for url_str in self.args:
storage_url = StorageUrlFromString(url_str)
if storage_url.IsFileUrl():
raise CommandException('Only cloud URLs are supported for %s'
% self.command_name)
bucket_fields = None
if (listing_style == ListingStyle.SHORT or
listing_style == ListingStyle.LONG):
bucket_fields = ['id']
elif listing_style == ListingStyle.LONG_LONG:
bucket_fields = ['location', 'storageClass', 'versioning', 'acl',
'defaultObjectAcl', 'website', 'logging', 'cors',
'lifecycle']
if storage_url.IsProvider():
# Provider URL: use bucket wildcard to list buckets.
for blr in self.WildcardIterator(
'%s://*' % storage_url.scheme).IterBuckets(
bucket_fields=bucket_fields):
self._PrintBucketInfo(blr, listing_style)
elif storage_url.IsBucket() and get_bucket_info:
# ls -b bucket listing request: List info about bucket(s).
total_buckets = 0
for blr in self.WildcardIterator(url_str).IterBuckets(
bucket_fields=bucket_fields):
if not ContainsWildcard(url_str) and not blr.root_object:
# Iterator does not make an HTTP call for non-wildcarded
# listings with fields=='id'. Ensure the bucket exists by calling
# GetBucket.
self.gsutil_api.GetBucket(
blr.storage_url.bucket_name,
fields=['id'], provider=storage_url.scheme)
self._PrintBucketInfo(blr, listing_style)
total_buckets += 1
if not ContainsWildcard(url_str) and not total_buckets:
got_bucket_nomatch_errors = True
else:
# URL names a bucket, object, or object subdir ->
# list matching object(s) / subdirs.
def _PrintPrefixLong(blr):
print '%-33s%s' % ('', blr.url_string.encode(UTF8))
if listing_style == ListingStyle.SHORT:
# ls helper by default readies us for a short listing.
ls_helper = LsHelper(self.WildcardIterator, self.logger,
all_versions=self.all_versions,
print_bucket_header_func=print_bucket_header,
should_recurse=self.recursion_requested,
list_subdir_contents=self.list_subdir_contents)
elif listing_style == ListingStyle.LONG:
bucket_listing_fields = ['name', 'updated', 'size']
if self.all_versions:
bucket_listing_fields.extend(['generation', 'metageneration'])
if self.include_etag:
bucket_listing_fields.append('etag')
ls_helper = LsHelper(self.WildcardIterator, self.logger,
print_object_func=self._PrintLongListing,
print_dir_func=_PrintPrefixLong,
print_bucket_header_func=print_bucket_header,
all_versions=self.all_versions,
should_recurse=self.recursion_requested,
fields=bucket_listing_fields,
list_subdir_contents=self.list_subdir_contents)
elif listing_style == ListingStyle.LONG_LONG:
# List all fields
bucket_listing_fields = (UNENCRYPTED_FULL_LISTING_FIELDS +
ENCRYPTED_FIELDS)
ls_helper = LsHelper(self.WildcardIterator, self.logger,
print_object_func=PrintFullInfoAboutObject,
print_dir_func=_PrintPrefixLong,
print_bucket_header_func=print_bucket_header,
all_versions=self.all_versions,
should_recurse=self.recursion_requested,
fields=bucket_listing_fields,
list_subdir_contents=self.list_subdir_contents)
else:
raise CommandException('Unknown listing style: %s' % listing_style)
exp_dirs, exp_objs, exp_bytes = ls_helper.ExpandUrlAndPrint(storage_url)
if storage_url.IsObject() and exp_objs == 0 and exp_dirs == 0:
got_nomatch_errors = True
total_bytes += exp_bytes
total_objs += exp_objs
if total_objs and listing_style != ListingStyle.SHORT:
print ('TOTAL: %d objects, %d bytes (%s)' %
(total_objs, total_bytes, MakeHumanReadable(float(total_bytes))))
if got_nomatch_errors:
raise CommandException('One or more URLs matched no objects.')
if got_bucket_nomatch_errors:
raise NotFoundException('One or more bucket URLs matched no buckets.')
return 0
| [
"edenp@techday.com"
] | edenp@techday.com |
fd276b40e39dc6d6a2c51ea5ff00896c701319db | ded3109fc9a05b60c36da2c41017f799fb887f07 | /moose_nerp-1/moose_nerp/graph/plot_channel.py | c5f38765ede441d9f83cf23a57ba64812dc2569b | [] | no_license | ModelDBRepository/245563 | f5f0d48213a9deb9b07ea694136e008cbcfdeff0 | 97cd40113230c4ddadc77725bb5148fcc2f5b9a7 | refs/heads/master | 2020-04-24T12:26:02.905630 | 2019-02-21T22:23:39 | 2019-02-21T22:23:39 | 171,954,802 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,160 | py | import numpy as np
import matplotlib.pyplot as plt
import moose
def plot_gate_params(chan,plotpow, VMIN=-0.1, VMAX=0.05, CAMIN=0, CAMAX=1):
#print "PLOT POWER", plotpow, chan.path,chan.Xpower
"""Plot the gate parameters like m and h of the channel."""
if chan.className == 'HHChannel':
cols=1
#n=range(0,2,1)
if chan.Zpower!=0 and (chan.Xpower!=0 or chan.Ypower!=0) and chan.useConcentration == True:
fig,axes=plt.subplots(3,cols,sharex=False)
axes[1].set_xlabel('voltage')
axes[2].set_xlabel('Calcium')
else:
fig,axes=plt.subplots(2,cols,sharex=True)
axes[1].set_xlabel('voltage')
plt.suptitle(chan.name)
if chan.Xpower > 0:
gate=moose.element(chan.path + '/gateX')
ma = gate.tableA
mb = gate.tableB
varray = np.linspace(gate.min, gate.max, len(ma))
axes[0].plot(varray, 1e3 / mb, label='mtau ' + chan.name)
if plotpow:
label = '(minf)**{}'.format(chan.Xpower)
inf = (ma / mb) ** chan.Xpower
else:
label = 'minf'
inf = ma / mb
axes[1].plot(varray, inf, label=label)
axes[1].axis([gate.min, gate.max, 0, 1])
if chan.Ypower > 0:
gate=moose.element(chan.path + '/gateY')
ha = gate.tableA
hb = gate.tableB
varray = np.linspace(gate.min, gate.max, len(ha))
axes[0].plot(varray, 1e3 / hb, label='htau ' + chan.name)
axes[1].plot(varray, ha / hb, label='hinf ' + chan.name)
axes[1].axis([gate.min, gate.max, 0, 1])
#
if chan.Zpower!=0:
gate=moose.element(chan.path + '/gateZ')
za = gate.tableA
zb = gate.tableB
xarray=np.linspace(gate.min,gate.max,len(za))
if (chan.Xpower==0 and chan.Ypower==0) or chan.useConcentration == False:
axes[0].plot(xarray,1e3/zb,label='ztau ' + chan.name)
axes[1].plot(xarray, za / zb, label='zinf' + chan.name)
if chan.useConcentration == True:
axes[1].set_xlabel('Calcium')
else:
axes[2].set_xscale("log")
axes[2].set_ylabel('ss, tau (s)')
axes[2].plot(xarray,1/zb,label='ztau ' + chan.name)
axes[2].plot(xarray, za / zb, label='zinf ' + chan.name)
axes[2].legend(loc='best', fontsize=8)
axes[0].set_ylabel('tau, ms')
axes[1].set_ylabel('steady state')
axes[0].legend(loc='best', fontsize=8)
axes[1].legend(loc='best', fontsize=8)
else: #Must be two-D tab channel
plt.figure()
ma = moose.element(chan.path + '/gateX').tableA
mb = moose.element(chan.path + '/gateX').tableB
ma = np.array(ma)
mb = np.array(mb)
plt.subplot(211)
plt.title(chan.name+'/gateX top: tau (ms), bottom: ss')
plt.imshow(1e3/mb,extent=[CAMIN,CAMAX,VMIN,VMAX],aspect='auto',origin='lower')
plt.colorbar()
plt.subplot(212)
if plotpow:
inf = (ma/mb)**chan.Xpower
else:
inf = ma/mb
plt.imshow(inf,extent=[CAMIN,CAMAX,VMIN,VMAX],aspect='auto',origin='lower')
plt.xlabel('Ca [mM]')
plt.ylabel('Vm [V]')
plt.colorbar()
if chan.Ypower > 0:
ha = moose.element(chan.path + '/gateY').tableA
hb = moose.element(chan.path + '/gateY').tableB
ha = np.array(ha)
hb = np.array(hb)
plt.figure()
plt.subplot(211)
plt.suptitle(chan.name+'/gateY tau')
plt.imshow(1e3/hb,extent=[CAMIN,CAMAX,VMIN,VMAX],aspect='auto')
plt.colorbar()
plt.subplot(212)
if plotpow:
inf = (ha/hb)**chan.Ypower
else:
inf = ha/hb
plt.imshow(inf,extent=[CAMIN,CAMAX,VMIN,VMAX],aspect='auto')
plt.xlabel('Ca [nM]')
plt.ylabel('Vm [V]')
plt.colorbar()
return
| [
"tom.morse@yale.edu"
] | tom.morse@yale.edu |
cfc0d9237456019b83e1e17feea2e776ea65aa18 | 209a7a4023a9a79693ec1f6e8045646496d1ea71 | /COMP0016_2020_21_Team12-datasetsExperimentsAna/pwa/FADapp/pythonScripts/venv/Lib/site-packages/pandas/tests/groupby/aggregate/test_other.py | d28fbb9f2ce9714f5c0abb1f9befb6b90a4ae255 | [
"MIT"
] | permissive | anzhao920/MicrosoftProject15_Invictus | 5e2347015411bbffbdf0ceb059df854661fb240c | 15f44eebb09561acbbe7b6730dfadf141e4c166d | refs/heads/main | 2023-04-16T13:24:39.332492 | 2021-04-27T00:47:13 | 2021-04-27T00:47:13 | 361,913,170 | 0 | 0 | MIT | 2021-04-26T22:41:56 | 2021-04-26T22:41:55 | null | UTF-8 | Python | false | false | 19,753 | py | """
test all other .agg behavior
"""
import datetime as dt
from functools import partial
import numpy as np
import pytest
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
PeriodIndex,
Series,
date_range,
period_range,
)
import pandas._testing as tm
from pandas.core.base import SpecificationError
from pandas.io.formats.printing import pprint_thing
def test_agg_api():
# GH 6337
# https://stackoverflow.com/questions/21706030/pandas-groupby-agg-function-column-dtype-error
# different api for agg when passed custom function with mixed frame
df = DataFrame(
{
"data1": np.random.randn(5),
"data2": np.random.randn(5),
"key1": ["a", "a", "b", "b", "a"],
"key2": ["one", "two", "one", "two", "one"],
}
)
grouped = df.groupby("key1")
def peak_to_peak(arr):
return arr.max() - arr.min()
expected = grouped.agg([peak_to_peak])
expected.columns = ["data1", "data2"]
result = grouped.agg(peak_to_peak)
tm.assert_frame_equal(result, expected)
def test_agg_datetimes_mixed():
data = [[1, "2012-01-01", 1.0], [2, "2012-01-02", 2.0], [3, None, 3.0]]
df1 = DataFrame(
{
"key": [x[0] for x in data],
"date": [x[1] for x in data],
"value": [x[2] for x in data],
}
)
data = [
[
row[0],
(dt.datetime.strptime(row[1], "%Y-%m-%d").date() if row[1] else None),
row[2],
]
for row in data
]
df2 = DataFrame(
{
"key": [x[0] for x in data],
"date": [x[1] for x in data],
"value": [x[2] for x in data],
}
)
df1["weights"] = df1["value"] / df1["value"].sum()
gb1 = df1.groupby("date").aggregate(np.sum)
df2["weights"] = df1["value"] / df1["value"].sum()
gb2 = df2.groupby("date").aggregate(np.sum)
assert len(gb1) == len(gb2)
def test_agg_period_index():
prng = period_range("2012-1-1", freq="M", periods=3)
df = DataFrame(np.random.randn(3, 2), index=prng)
rs = df.groupby(level=0).sum()
assert isinstance(rs.index, PeriodIndex)
# GH 3579
index = period_range(start="1999-01", periods=5, freq="M")
s1 = Series(np.random.rand(len(index)), index=index)
s2 = Series(np.random.rand(len(index)), index=index)
df = DataFrame.from_dict({"s1": s1, "s2": s2})
grouped = df.groupby(df.index.month)
list(grouped)
def test_agg_dict_parameter_cast_result_dtypes():
# GH 12821
df = DataFrame(
{
"class": ["A", "A", "B", "B", "C", "C", "D", "D"],
"time": date_range("1/1/2011", periods=8, freq="H"),
}
)
df.loc[[0, 1, 2, 5], "time"] = None
# test for `first` function
exp = df.loc[[0, 3, 4, 6]].set_index("class")
grouped = df.groupby("class")
tm.assert_frame_equal(grouped.first(), exp)
tm.assert_frame_equal(grouped.agg("first"), exp)
tm.assert_frame_equal(grouped.agg({"time": "first"}), exp)
tm.assert_series_equal(grouped.time.first(), exp["time"])
tm.assert_series_equal(grouped.time.agg("first"), exp["time"])
# test for `last` function
exp = df.loc[[0, 3, 4, 7]].set_index("class")
grouped = df.groupby("class")
tm.assert_frame_equal(grouped.last(), exp)
tm.assert_frame_equal(grouped.agg("last"), exp)
tm.assert_frame_equal(grouped.agg({"time": "last"}), exp)
tm.assert_series_equal(grouped.time.last(), exp["time"])
tm.assert_series_equal(grouped.time.agg("last"), exp["time"])
# count
exp = Series([2, 2, 2, 2], index=Index(list("ABCD"), name="class"), name="time")
tm.assert_series_equal(grouped.time.agg(len), exp)
tm.assert_series_equal(grouped.time.size(), exp)
exp = Series([0, 1, 1, 2], index=Index(list("ABCD"), name="class"), name="time")
tm.assert_series_equal(grouped.time.count(), exp)
def test_agg_cast_results_dtypes():
# similar to GH12821
# xref #11444
u = [dt.datetime(2015, x + 1, 1) for x in range(12)]
v = list("aaabbbbbbccd")
df = DataFrame({"X": v, "Y": u})
result = df.groupby("X")["Y"].agg(len)
expected = df.groupby("X")["Y"].count()
tm.assert_series_equal(result, expected)
def test_aggregate_float64_no_int64():
# see gh-11199
df = DataFrame({"a": [1, 2, 3, 4, 5], "b": [1, 2, 2, 4, 5], "c": [1, 2, 3, 4, 5]})
expected = DataFrame({"a": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a"]].mean()
tm.assert_frame_equal(result, expected)
expected = DataFrame({"a": [1, 2.5, 4, 5], "c": [1, 2.5, 4, 5]}, index=[1, 2, 4, 5])
expected.index.name = "b"
result = df.groupby("b")[["a", "c"]].mean()
tm.assert_frame_equal(result, expected)
def test_aggregate_api_consistency():
# GH 9052
# make sure that the aggregates via dict
# are consistent
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
grouped = df.groupby(["A", "B"])
c_mean = grouped["C"].mean()
c_sum = grouped["C"].sum()
d_mean = grouped["D"].mean()
d_sum = grouped["D"].sum()
result = grouped["D"].agg(["sum", "mean"])
expected = pd.concat([d_sum, d_mean], axis=1)
expected.columns = ["sum", "mean"]
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg([np.sum, np.mean])
expected = pd.concat([c_sum, c_mean, d_sum, d_mean], axis=1)
expected.columns = MultiIndex.from_product([["C", "D"], ["sum", "mean"]])
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped[["D", "C"]].agg([np.sum, np.mean])
expected = pd.concat([d_sum, d_mean, c_sum, c_mean], axis=1)
expected.columns = MultiIndex.from_product([["D", "C"], ["sum", "mean"]])
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({"C": "mean", "D": "sum"})
expected = pd.concat([d_sum, c_mean], axis=1)
tm.assert_frame_equal(result, expected, check_like=True)
result = grouped.agg({"C": ["mean", "sum"], "D": ["mean", "sum"]})
expected = pd.concat([c_mean, c_sum, d_mean, d_sum], axis=1)
expected.columns = MultiIndex.from_product([["C", "D"], ["mean", "sum"]])
msg = r"Column\(s\) \['r', 'r2'\] do not exist"
with pytest.raises(SpecificationError, match=msg):
grouped[["D", "C"]].agg({"r": np.sum, "r2": np.mean})
def test_agg_dict_renaming_deprecation():
# 15931
df = DataFrame({"A": [1, 1, 1, 2, 2], "B": range(5), "C": range(5)})
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
df.groupby("A").agg(
{"B": {"foo": ["sum", "max"]}, "C": {"bar": ["count", "min"]}}
)
msg = r"Column\(s\) \['ma'\] do not exist"
with pytest.raises(SpecificationError, match=msg):
df.groupby("A")[["B", "C"]].agg({"ma": "max"})
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
df.groupby("A").B.agg({"foo": "count"})
def test_agg_compat():
# GH 12334
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
g = df.groupby(["A", "B"])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
g["D"].agg({"C": ["sum", "std"]})
with pytest.raises(SpecificationError, match=msg):
g["D"].agg({"C": "sum", "D": "std"})
def test_agg_nested_dicts():
# API change for disallowing these types of nested dicts
df = DataFrame(
{
"A": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"],
"B": ["one", "one", "two", "two", "two", "two", "one", "two"],
"C": np.random.randn(8) + 1.0,
"D": np.arange(8),
}
)
g = df.groupby(["A", "B"])
msg = r"nested renamer is not supported"
with pytest.raises(SpecificationError, match=msg):
g.aggregate({"r1": {"C": ["mean", "sum"]}, "r2": {"D": ["mean", "sum"]}})
with pytest.raises(SpecificationError, match=msg):
g.agg({"C": {"ra": ["mean", "std"]}, "D": {"rb": ["mean", "std"]}})
# same name as the original column
# GH9052
with pytest.raises(SpecificationError, match=msg):
g["D"].agg({"result1": np.sum, "result2": np.mean})
with pytest.raises(SpecificationError, match=msg):
g["D"].agg({"D": np.sum, "result2": np.mean})
def test_agg_item_by_item_raise_typeerror():
df = DataFrame(np.random.randint(10, size=(20, 10)))
def raiseException(df):
pprint_thing("----------------------------------------")
pprint_thing(df.to_string())
raise TypeError("test")
with pytest.raises(TypeError, match="test"):
df.groupby(0).agg(raiseException)
def test_series_agg_multikey():
ts = tm.makeTimeSeries()
grouped = ts.groupby([lambda x: x.year, lambda x: x.month])
result = grouped.agg(np.sum)
expected = grouped.sum()
tm.assert_series_equal(result, expected)
def test_series_agg_multi_pure_python():
data = DataFrame(
{
"A": [
"foo",
"foo",
"foo",
"foo",
"bar",
"bar",
"bar",
"bar",
"foo",
"foo",
"foo",
],
"B": [
"one",
"one",
"one",
"two",
"one",
"one",
"one",
"two",
"two",
"two",
"one",
],
"C": [
"dull",
"dull",
"shiny",
"dull",
"dull",
"shiny",
"shiny",
"dull",
"shiny",
"shiny",
"shiny",
],
"D": np.random.randn(11),
"E": np.random.randn(11),
"F": np.random.randn(11),
}
)
def bad(x):
assert len(x.values.base) > 0
return "foo"
result = data.groupby(["A", "B"]).agg(bad)
expected = data.groupby(["A", "B"]).agg(lambda x: "foo")
tm.assert_frame_equal(result, expected)
def test_agg_consistency():
# agg with ([]) and () not consistent
# GH 6715
def P1(a):
return np.percentile(a.dropna(), q=1)
df = DataFrame(
{
"col1": [1, 2, 3, 4],
"col2": [10, 25, 26, 31],
"date": [
dt.date(2013, 2, 10),
dt.date(2013, 2, 10),
dt.date(2013, 2, 11),
dt.date(2013, 2, 11),
],
}
)
g = df.groupby("date")
expected = g.agg([P1])
expected.columns = expected.columns.levels[0]
result = g.agg(P1)
tm.assert_frame_equal(result, expected)
def test_agg_callables():
# GH 7929
df = DataFrame({"foo": [1, 2], "bar": [3, 4]}).astype(np.int64)
class fn_class:
def __call__(self, x):
return sum(x)
equiv_callables = [
sum,
np.sum,
lambda x: sum(x),
lambda x: x.sum(),
partial(sum),
fn_class(),
]
expected = df.groupby("foo").agg(sum)
for ecall in equiv_callables:
result = df.groupby("foo").agg(ecall)
tm.assert_frame_equal(result, expected)
def test_agg_over_numpy_arrays():
# GH 3788
df = DataFrame(
[
[1, np.array([10, 20, 30])],
[1, np.array([40, 50, 60])],
[2, np.array([20, 30, 40])],
],
columns=["category", "arraydata"],
)
result = df.groupby("category").agg(sum)
expected_data = [[np.array([50, 70, 90])], [np.array([20, 30, 40])]]
expected_index = Index([1, 2], name="category")
expected_column = ["arraydata"]
expected = DataFrame(expected_data, index=expected_index, columns=expected_column)
tm.assert_frame_equal(result, expected)
def test_agg_tzaware_non_datetime_result():
# discussed in GH#29589, fixed in GH#29641, operating on tzaware values
# with function that is not dtype-preserving
dti = pd.date_range("2012-01-01", periods=4, tz="UTC")
df = DataFrame({"a": [0, 0, 1, 1], "b": dti})
gb = df.groupby("a")
# Case that _does_ preserve the dtype
result = gb["b"].agg(lambda x: x.iloc[0])
expected = Series(dti[::2], name="b")
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# Cases that do _not_ preserve the dtype
result = gb["b"].agg(lambda x: x.iloc[0].year)
expected = Series([2012, 2012], name="b")
expected.index.name = "a"
tm.assert_series_equal(result, expected)
result = gb["b"].agg(lambda x: x.iloc[-1] - x.iloc[0])
expected = Series([pd.Timedelta(days=1), pd.Timedelta(days=1)], name="b")
expected.index.name = "a"
tm.assert_series_equal(result, expected)
def test_agg_timezone_round_trip():
# GH 15426
ts = pd.Timestamp("2016-01-01 12:00:00", tz="US/Pacific")
df = DataFrame({"a": 1, "b": [ts + dt.timedelta(minutes=nn) for nn in range(10)]})
result1 = df.groupby("a")["b"].agg(np.min).iloc[0]
result2 = df.groupby("a")["b"].agg(lambda x: np.min(x)).iloc[0]
result3 = df.groupby("a")["b"].min().iloc[0]
assert result1 == ts
assert result2 == ts
assert result3 == ts
dates = [
pd.Timestamp(f"2016-01-0{i:d} 12:00:00", tz="US/Pacific") for i in range(1, 5)
]
df = DataFrame({"A": ["a", "b"] * 2, "B": dates})
grouped = df.groupby("A")
ts = df["B"].iloc[0]
assert ts == grouped.nth(0)["B"].iloc[0]
assert ts == grouped.head(1)["B"].iloc[0]
assert ts == grouped.first()["B"].iloc[0]
# GH#27110 applying iloc should return a DataFrame
assert ts == grouped.apply(lambda x: x.iloc[0]).iloc[0, 1]
ts = df["B"].iloc[2]
assert ts == grouped.last()["B"].iloc[0]
# GH#27110 applying iloc should return a DataFrame
assert ts == grouped.apply(lambda x: x.iloc[-1]).iloc[0, 1]
def test_sum_uint64_overflow():
# see gh-14758
# Convert to uint64 and don't overflow
df = DataFrame([[1, 2], [3, 4], [5, 6]], dtype=object)
df = df + 9223372036854775807
index = Index(
[9223372036854775808, 9223372036854775810, 9223372036854775812], dtype=np.uint64
)
expected = DataFrame(
{1: [9223372036854775809, 9223372036854775811, 9223372036854775813]},
index=index,
)
expected.index.name = 0
result = df.groupby(0).sum()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"structure, expected",
[
(tuple, DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}})),
(list, DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}})),
(
lambda x: tuple(x),
DataFrame({"C": {(1, 1): (1, 1, 1), (3, 4): (3, 4, 4)}}),
),
(
lambda x: list(x),
DataFrame({"C": {(1, 1): [1, 1, 1], (3, 4): [3, 4, 4]}}),
),
],
)
def test_agg_structs_dataframe(structure, expected):
df = DataFrame(
{"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}
)
result = df.groupby(["A", "B"]).aggregate(structure)
expected.index.names = ["A", "B"]
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"structure, expected",
[
(tuple, Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
(list, Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
(lambda x: tuple(x), Series([(1, 1, 1), (3, 4, 4)], index=[1, 3], name="C")),
(lambda x: list(x), Series([[1, 1, 1], [3, 4, 4]], index=[1, 3], name="C")),
],
)
def test_agg_structs_series(structure, expected):
# Issue #18079
df = DataFrame(
{"A": [1, 1, 1, 3, 3, 3], "B": [1, 1, 1, 4, 4, 4], "C": [1, 1, 1, 3, 4, 4]}
)
result = df.groupby("A")["C"].aggregate(structure)
expected.index.name = "A"
tm.assert_series_equal(result, expected)
def test_agg_category_nansum(observed):
categories = ["a", "b", "c"]
df = DataFrame(
{"A": pd.Categorical(["a", "a", "b"], categories=categories), "B": [1, 2, 3]}
)
result = df.groupby("A", observed=observed).B.agg(np.nansum)
expected = Series(
[3, 3, 0],
index=pd.CategoricalIndex(["a", "b", "c"], categories=categories, name="A"),
name="B",
)
if observed:
expected = expected[expected != 0]
tm.assert_series_equal(result, expected)
def test_agg_list_like_func():
# GH 18473
df = DataFrame({"A": [str(x) for x in range(3)], "B": [str(x) for x in range(3)]})
grouped = df.groupby("A", as_index=False, sort=False)
result = grouped.agg({"B": lambda x: list(x)})
expected = DataFrame(
{"A": [str(x) for x in range(3)], "B": [[str(x)] for x in range(3)]}
)
tm.assert_frame_equal(result, expected)
def test_agg_lambda_with_timezone():
# GH 23683
df = DataFrame(
{
"tag": [1, 1],
"date": [
pd.Timestamp("2018-01-01", tz="UTC"),
pd.Timestamp("2018-01-02", tz="UTC"),
],
}
)
result = df.groupby("tag").agg({"date": lambda e: e.head(1)})
expected = DataFrame(
[pd.Timestamp("2018-01-01", tz="UTC")],
index=Index([1], name="tag"),
columns=["date"],
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"err_cls",
[
NotImplementedError,
RuntimeError,
KeyError,
IndexError,
OSError,
ValueError,
ArithmeticError,
AttributeError,
],
)
def test_groupby_agg_err_catching(err_cls):
# make sure we suppress anything other than TypeError or AssertionError
# in _python_agg_general
# Use a non-standard EA to make sure we don't go down ndarray paths
from pandas.tests.extension.decimal.array import DecimalArray, make_data, to_decimal
data = make_data()[:5]
df = DataFrame(
{"id1": [0, 0, 0, 1, 1], "id2": [0, 1, 0, 1, 1], "decimals": DecimalArray(data)}
)
expected = Series(to_decimal([data[0], data[3]]))
def weird_func(x):
# weird function that raise something other than TypeError or IndexError
# in _python_agg_general
if len(x) == 0:
raise err_cls
return x.iloc[0]
result = df["decimals"].groupby(df["id1"]).agg(weird_func)
tm.assert_series_equal(result, expected, check_names=False)
| [
"ana.kapros@yahoo.ro"
] | ana.kapros@yahoo.ro |
356f6d97d06b3df049dac75184bda584cee875f5 | 10f0d2f60b67e1bd45af82f1af0044b7ce1c8843 | /Questões do URI/Exercicio 7 (Reduce).py | 1ae64595fff54a5b2521678cd19aa78917b46155 | [] | no_license | vikvik98/Algoritmos_2017.1 | 184bce5fb6e155076253359e49f73fafb28dbc8d | 219fa2cfdf7320df34d282136c025c6c19be09d6 | refs/heads/master | 2021-08-24T04:15:29.550879 | 2017-12-08T02:08:34 | 2017-12-08T02:08:34 | 113,519,397 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | def main():
temperaturas = [0] * 121
menor = 0
maior = 0
media = 0
cont = 0
total_temp = 0
dias = 0
#Geração de vetor
for i in range(len(temperaturas)):
temperatura_dia = float(input())
temperaturas[i] = temperatura_dia
#Search
for i in range(len(temperaturas)):
menor = temperaturas[0]
if temperaturas[i] < menor:
menor = temperaturas[i]
print("A menor temperatura neste periodo foi de %.2f graus" % menor)
for i in range(len(temperaturas)):
maior = temperaturas[0]
if temperaturas[i] < maior:
maior = temperaturas[i]
print("A maior temperatura neste periodo foi de %.2f graus" % menor)
#Reduce
for i in range(len(temperaturas)):
total_temp += temperaturas[i]
cont += 1
media = total_temp / cont
print("A media das temperaturas é de %.2f" % media)
for i in range(len(temperaturas)):
if temperaturas[i] < media:
dias += 1
print("Em %d dias a temperatura foi abaixo da media." % dias)
if __name__ == '__main__':
main() | [
"vinicius.c.mascarenhas@hotmail.com"
] | vinicius.c.mascarenhas@hotmail.com |
166470769a122a0a59d76a1b0de5948e91f65b00 | 06b2a4f8dc27b5177bfd782386b59b47eafe6556 | /Django-level-3/project_exercise/users/admin.py | d703d0074246d644276dd261368797c5c0376557 | [] | no_license | mishrakeshav/Django-Tutorials | d81eb736f3d21f207147e1dd7c5132a54523feca | 3757c40e975fa782996f4968752d0b41da1cc96c | refs/heads/master | 2023-07-07T08:45:46.264661 | 2021-03-17T15:09:08 | 2021-03-17T15:09:08 | 275,316,311 | 1 | 0 | null | 2021-08-09T21:02:45 | 2020-06-27T06:49:23 | JavaScript | UTF-8 | Python | false | false | 129 | py | from django.contrib import admin
# Register your models here.
from users.models import User
admin.site.register(User)
| [
"keshav.sm@somaiya.edu"
] | keshav.sm@somaiya.edu |
eed1bf04dd38dc87166f710aeaa9a078af06b58e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03565/s696347515.py | d570956493fddbee98c2576a04e3d91b8bd5bc84 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | import copy
S_dash = input()
T = input()
len_S = len(S_dash)
len_T = len(T)
candidate0 = list(S_dash)
ans = "z"*51
if len_S >= len_T:
for s in range(len_S):
cha_S = S_dash[s]
if cha_S != "?" and cha_S in list(T):
for t in range(len_T):
cha_T = T[t]
if cha_S == cha_T and 0<= s-t < len_S and 0< s-t+len_T <= len_S:
S_cut = S_dash[s-t:s-t+len_T]
candidate1 = copy.deepcopy(candidate0)
for x in range(len_T):
if S_cut[x] == "?":
candidate1[s-t+x] = T[x]
elif S_cut[x] != T[x]:
break
else:
if "".join(candidate1[s-t:s-t+len_T]) == T:
if ans > "".join(candidate1).replace('?', 'a'):
ans = "".join(candidate1).replace('?', 'a')
for u in range(len_S-len_T+1):
cut_S = S_dash[u:u+len_T]
if cut_S.count("?") == len_T:
candidate1 = copy.deepcopy(candidate0)
for t in range(len_T):
candidate1[u+t] = T[t]
if ans > "".join(candidate1).replace('?', 'a'):
ans = "".join(candidate1).replace('?', 'a')
if ans == "z"*51:
ans = "UNRESTORABLE"
print(ans) | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
afb650659264e114dd31253efb373c7806e49a31 | 68728961294d360d26e8149e7e0a4816adf20842 | /utils/file_utils.py | 6754798f0e5a5d92d67229d23ed3d1bf4d5cd4e1 | [] | no_license | Dawn-Flying/text_summarization | d334fe884aa3a6341dd7bc381b03c1ab3e2c057e | ab68555c6f455c4f14fead5fc1c49420cdef8dc4 | refs/heads/master | 2023-07-17T07:49:21.995004 | 2021-08-26T15:46:19 | 2021-08-26T15:46:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | # coding=utf-8
import time
import os
from utils.config import results_dir
def get_result_filename(batch_size, epochs, max_length_inp, embedding_dim, commit=''):
"""获取时间
:return:
"""
now_time = time.strftime('%Y_%m_%d_%H_%M_%S')
filename = now_time + f'_batch_size_{batch_size}_epochs_{epochs}_max_length_inp_{max_length_inp}_embedding_dim_{embedding_dim}{commit}.csv'
# result_save_path = os.path.join(results_dir, filename)
return filename
| [
"184419810@qq.com"
] | 184419810@qq.com |
7fbababfe191ed000fffba22a9dbb5a9a39a966b | 4f923bec0048a74177b31ed5261f1e1df0c98c73 | /pytorch_example.py | 9b5bc3aa4755c3d49d8cbf1bd2c7ee35eece8986 | [] | no_license | EngineerKhan/Equi-RC | 33c6c16873ba87620e861d1af7bd3ee3e380976c | e2228c8946b1f0f10639f593dc5b0c074744cacb | refs/heads/main | 2023-06-23T19:27:07.991533 | 2021-07-16T10:19:19 | 2021-07-16T10:19:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,934 | py | import torch
import torch.nn as nn
from pytorch_rclayers import RegToRegConv, RegToIrrepConv, IrrepToIrrepConv, IrrepActivationLayer, \
IrrepConcatLayer, IrrepBatchNorm, RegBatchNorm, RegConcatLayer, ToKmerLayer
class CustomRCPS(nn.Module):
def __init__(self,
filters=(16, 16, 16),
kernel_sizes=(15, 14, 14),
pool_size=40,
pool_strides=20,
out_size=1,
placeholder_bn=False,
kmers=1):
"""
This is an example of use of the equivariant layers :
The network takes as inputs windows of 1000 base pairs one hot encoded and outputs a binary prediction
The architecture follows the paper of Avanti Shrikumar : Reverse Complement Parameter Sharing
We reimplement everything with equivariant layers and add the possibility to start the encoding with
a K-Mer encoding layer.
"""
super(CustomRCPS, self).__init__()
self.kmers = int(kmers)
self.to_kmer = ToKmerLayer(k=self.kmers)
reg_in = self.to_kmer.features // 2
filters = [reg_in] + list(filters)
# Now add the intermediate layer : sequence of conv, BN, activation
self.reg_layers = nn.ModuleList()
self.bn_layers = nn.ModuleList()
self.activation_layers = nn.ModuleList()
for i in range(len(filters) - 1):
prev_reg = filters[i]
next_reg = filters[i + 1]
self.reg_layers.append(RegToRegConv(
reg_in=prev_reg,
reg_out=next_reg,
kernel_size=kernel_sizes[i],
))
self.bn_layers.append(RegBatchNorm(reg_dim=next_reg, placeholder=placeholder_bn))
# Don't add activation if it's the last layer
placeholder = (i == len(filters) - 1)
self.activation_layers.append(nn.ReLU())
self.concat = RegConcatLayer(reg=filters[-1])
self.pool = nn.MaxPool1d(kernel_size=pool_size, stride=pool_strides)
self.flattener = nn.Flatten()
self.dense = nn.Linear(in_features=752, out_features=out_size)
def forward(self, inputs):
x = self.to_kmer(inputs)
for reg_layer, bn_layer, activation_layer in zip(self.reg_layers, self.bn_layers, self.activation_layers):
x = reg_layer(x)
x = bn_layer(x)
x = activation_layer(x)
# Average two strands predictions, pool and go through Dense
x = self.concat(x)
x = self.pool(x)
x = self.flattener(x)
x = self.dense(x)
outputs = torch.sigmoid(x)
return outputs
class EquiNetBinary(nn.Module):
def __init__(self,
filters=((16, 16), (16, 16), (16, 16)),
kernel_sizes=(15, 14, 14),
pool_size=40,
pool_length=20,
out_size=1,
placeholder_bn=False,
kmers=1):
"""
This network takes as inputs windows of 1000 base pairs one hot encoded and outputs a binary prediction
First maps the regular representation to irrep setting
Then goes from one setting to another.
"""
super(EquiNetBinary, self).__init__()
self.kmers = int(kmers)
self.to_kmer = ToKmerLayer(k=self.kmers)
# First mapping goes from the input to an irrep feature space
reg_in = self.to_kmer.features // 2
first_kernel_size = kernel_sizes[0]
first_a, first_b = filters[0]
self.last_a, self.last_b = filters[-1]
self.reg_irrep = RegToIrrepConv(reg_in=reg_in,
a_out=first_a,
b_out=first_b,
kernel_size=first_kernel_size)
self.first_bn = IrrepBatchNorm(a=first_a, b=first_b, placeholder=placeholder_bn)
self.first_act = IrrepActivationLayer(a=first_a, b=first_b)
# Now add the intermediate layers : sequence of conv, BN, activation
self.irrep_layers = nn.ModuleList()
self.bn_layers = nn.ModuleList()
self.activation_layers = nn.ModuleList()
for i in range(1, len(filters)):
prev_a, prev_b = filters[i - 1]
next_a, next_b = filters[i]
self.irrep_layers.append(IrrepToIrrepConv(
a_in=prev_a,
b_in=prev_b,
a_out=next_a,
b_out=next_b,
kernel_size=kernel_sizes[i],
))
self.bn_layers.append(IrrepBatchNorm(a=next_a, b=next_b, placeholder=placeholder_bn))
self.activation_layers.append(IrrepActivationLayer(a=next_a,
b=next_b))
self.concat = IrrepConcatLayer(a=self.last_a, b=self.last_b)
self.pool = nn.MaxPool1d(kernel_size=pool_size, stride=pool_length)
self.flattener = nn.Flatten()
self.dense = nn.Linear(in_features=1472, out_features=out_size)
self.final_activation = nn.Sigmoid()
def forward(self, inputs):
x = self.to_kmer(inputs)
x = self.reg_irrep(x)
x = self.first_bn(x)
x = self.first_act(x)
for irrep_layer, bn_layer, activation_layer in zip(self.irrep_layers, self.bn_layers, self.activation_layers):
x = irrep_layer(x)
x = bn_layer(x)
x = activation_layer(x)
# Average two strands predictions, pool and go through Dense
x = x.float()
x = self.concat(x)
x = self.pool(x)
x = self.flattener(x)
x = self.dense(x)
outputs = self.final_activation(x)
return outputs
if __name__ == '__main__':
inputs = torch.ones(size=(1, 4, 1000)).double()
model = EquiNetBinary(kmers=2, filters=((24, 8), (24, 8), (24, 8)))
outputs = model(inputs)
| [
"vincent.mallet96@gmail.com"
] | vincent.mallet96@gmail.com |
dde2cf28a3ea139ec0d626a79fe3807ac95e2aa5 | fa346a2d5886420e22707a7be03599e634b230a9 | /temboo/Library/GitHub/GistsAPI/Comments/DeleteComment.py | 605046638edf1fa9b4c894ae8181868343c2aca5 | [] | no_license | elihuvillaraus/entity-resolution | cebf937499ed270c3436b1dd25ab4aef687adc11 | 71dd49118a6e11b236861289dcf36436d31f06bc | refs/heads/master | 2021-12-02T17:29:11.864065 | 2014-01-08T04:29:30 | 2014-01-08T04:29:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,116 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# DeleteComment
# Deletes a specified comment.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class DeleteComment(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the DeleteComment Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/GitHub/GistsAPI/Comments/DeleteComment')
def new_input_set(self):
return DeleteCommentInputSet()
def _make_result_set(self, result, path):
return DeleteCommentResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return DeleteCommentChoreographyExecution(session, exec_id, path)
class DeleteCommentInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the DeleteComment
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved during the OAuth process.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_ID(self, value):
"""
Set the value of the ID input for this Choreo. ((required, string) The id of the comment to delete.)
"""
InputSet._set_input(self, 'ID', value)
class DeleteCommentResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the DeleteComment Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Limit(self):
"""
Retrieve the value for the "Limit" output from this Choreo execution. ((integer) The available rate limit for your account. This is returned in the GitHub response header.)
"""
return self._output.get('Limit', None)
def get_Remaining(self):
"""
Retrieve the value for the "Remaining" output from this Choreo execution. ((integer) The remaining number of API requests available to you. This is returned in the GitHub response header.)
"""
return self._output.get('Remaining', None)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from GitHub.)
"""
return self._output.get('Response', None)
class DeleteCommentChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return DeleteCommentResultSet(response, path)
| [
"cedric.warny@gmail.com"
] | cedric.warny@gmail.com |
8909c7579436ee650bba7657c438776d878afd26 | e2fb6865a02573709d26e8d8b4c52f2cd687da10 | /utils/__init__.py | 91bd60a419e4d2c09e9b69adff0b69d0b86ee136 | [] | no_license | tbarbugli/saleor | 0af6e21c540ce0af64b06a44821b82373c96d028 | caf9b245c35611c34094f59443da51a4e9657bfd | refs/heads/master | 2020-12-25T03:20:45.574256 | 2013-06-02T20:43:06 | 2013-06-02T20:43:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 511 | py | from compressor.templatetags.compress import CompressorNode
from django.template.base import Template
def seizaki_compress(context, data, name):
"""
Data is the string from the template (the list of js files in this case)
Name is either 'js' or 'css' (the sekizai namespace)
We basically just manually pass the string through the {% compress 'js' %} template tag
"""
print data
return CompressorNode(nodelist=Template(data).nodelist, kind=name, mode='file').render(context=context) | [
"tbarbugli@gmail.com"
] | tbarbugli@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.