blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
69
license_type
stringclasses
2 values
repo_name
stringlengths
5
118
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
63
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
2.91k
686M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
220 values
src_encoding
stringclasses
30 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
2
10.3M
extension
stringclasses
257 values
content
stringlengths
2
10.3M
authors
listlengths
1
1
author_id
stringlengths
0
212
e0d5509edde2bc597a60a52985623e184213d1fb
8925916f67b9b77290020c932d97314a284d0595
/contrib/spendfrom/spendfrom.py
6765e86936b351c633817fea871d6398e2e214b0
[ "MIT" ]
permissive
btcnode/btcnode
57c44726c6e289b893d07fde9146457c984324ab
c5005ee73e5e640e3a24a9c5648d20a30671652b
refs/heads/master
2023-04-05T04:11:07.691316
2021-03-19T22:12:37
2021-03-19T22:12:37
349,072,293
0
0
null
null
null
null
UTF-8
Python
false
false
10,032
py
#!/usr/bin/env python # # Use the raw transactions API to spend BTNs received on particular addresses, # and send any change back to that same address. # # Example usage: # spendfrom.py # Lists available funds # spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00 # # Assumes it will talk to a btcnoded or btcnode-Qt running # on localhost. # # Depends on jsonrpc # from decimal import * import getpass import math import os import os.path import platform import sys import time from jsonrpc import ServiceProxy, json BASE_FEE=Decimal("0.001") def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def determine_db_dir(): """Return the default location of the btcnode data directory""" if platform.system() == "Darwin": return os.path.expanduser("~/Library/Application Support/Btcnode/") elif platform.system() == "Windows": return os.path.join(os.environ['APPDATA'], "Btcnode") return os.path.expanduser("~/.btcnode") def read_bitcoin_config(dbdir): """Read the btcnode.conf file from dbdir, returns dictionary of settings""" from ConfigParser import SafeConfigParser class FakeSecHead(object): def __init__(self, fp): self.fp = fp self.sechead = '[all]\n' def readline(self): if self.sechead: try: return self.sechead finally: self.sechead = None else: s = self.fp.readline() if s.find('#') != -1: s = s[0:s.find('#')].strip() +"\n" return s config_parser = SafeConfigParser() config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "btcnode.conf")))) return dict(config_parser.items("all")) def connect_JSON(config): """Connect to a btcnode JSON-RPC server""" testnet = config.get('testnet', '0') testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False if not 'rpcport' in config: config['rpcport'] = 40743 if testnet else 30743 connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport']) try: result = ServiceProxy(connect) # ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors, # but also make sure the btcnoded we're talking to is/isn't testnet: if result.getmininginfo()['testnet'] != testnet: sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n") sys.exit(1) return result except: sys.stderr.write("Error connecting to RPC server at "+connect+"\n") sys.exit(1) def unlock_wallet(btcnoded): info = btcnoded.getinfo() if 'unlocked_until' not in info: return True # wallet is not encrypted t = int(info['unlocked_until']) if t <= time.time(): try: passphrase = getpass.getpass("Wallet is locked; enter passphrase: ") btcnoded.walletpassphrase(passphrase, 5) except: sys.stderr.write("Wrong passphrase\n") info = btcnoded.getinfo() return int(info['unlocked_until']) > time.time() def list_available(btcnoded): address_summary = dict() address_to_account = dict() for info in btcnoded.listreceivedbyaddress(0): address_to_account[info["address"]] = info["account"] unspent = btcnoded.listunspent(0) for output in unspent: # listunspent doesn't give addresses, so: rawtx = btcnoded.getrawtransaction(output['txid'], 1) vout = rawtx["vout"][output['vout']] pk = vout["scriptPubKey"] # This code only deals with ordinary pay-to-btcnode-address # or pay-to-script-hash outputs right now; anything exotic is ignored. if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash": continue address = pk["addresses"][0] if address in address_summary: address_summary[address]["total"] += vout["value"] address_summary[address]["outputs"].append(output) else: address_summary[address] = { "total" : vout["value"], "outputs" : [output], "account" : address_to_account.get(address, "") } return address_summary def select_coins(needed, inputs): # Feel free to improve this, this is good enough for my simple needs: outputs = [] have = Decimal("0.0") n = 0 while have < needed and n < len(inputs): outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]}) have += inputs[n]["amount"] n += 1 return (outputs, have-needed) def create_tx(btcnoded, fromaddresses, toaddress, amount, fee): all_coins = list_available(btcnoded) total_available = Decimal("0.0") needed = amount+fee potential_inputs = [] for addr in fromaddresses: if addr not in all_coins: continue potential_inputs.extend(all_coins[addr]["outputs"]) total_available += all_coins[addr]["total"] if total_available < needed: sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed)); sys.exit(1) # # Note: # Python's json/jsonrpc modules have inconsistent support for Decimal numbers. # Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode # Decimals, I'm casting amounts to float before sending them to btcnoded. # outputs = { toaddress : float(amount) } (inputs, change_amount) = select_coins(needed, potential_inputs) if change_amount > BASE_FEE: # don't bother with zero or tiny change change_address = fromaddresses[-1] if change_address in outputs: outputs[change_address] += float(change_amount) else: outputs[change_address] = float(change_amount) rawtx = btcnoded.createrawtransaction(inputs, outputs) signed_rawtx = btcnoded.signrawtransaction(rawtx) if not signed_rawtx["complete"]: sys.stderr.write("signrawtransaction failed\n") sys.exit(1) txdata = signed_rawtx["hex"] return txdata def compute_amount_in(btcnoded, txinfo): result = Decimal("0.0") for vin in txinfo['vin']: in_info = btcnoded.getrawtransaction(vin['txid'], 1) vout = in_info['vout'][vin['vout']] result = result + vout['value'] return result def compute_amount_out(txinfo): result = Decimal("0.0") for vout in txinfo['vout']: result = result + vout['value'] return result def sanity_test_fee(btcnoded, txdata_hex, max_fee): class FeeError(RuntimeError): pass try: txinfo = btcnoded.decoderawtransaction(txdata_hex) total_in = compute_amount_in(btcnoded, txinfo) total_out = compute_amount_out(txinfo) if total_in-total_out > max_fee: raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out)) tx_size = len(txdata_hex)/2 kb = tx_size/1000 # integer division rounds down if kb > 1 and fee < BASE_FEE: raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes") if total_in < 0.01 and fee < BASE_FEE: raise FeeError("Rejecting no-fee, tiny-amount transaction") # Exercise for the reader: compute transaction priority, and # warn if this is a very-low-priority transaction except FeeError as err: sys.stderr.write((str(err)+"\n")) sys.exit(1) def main(): import optparse parser = optparse.OptionParser(usage="%prog [options]") parser.add_option("--from", dest="fromaddresses", default=None, help="addresses to get BTNs from") parser.add_option("--to", dest="to", default=None, help="address to get send BTNs to") parser.add_option("--amount", dest="amount", default=None, help="amount to send") parser.add_option("--fee", dest="fee", default="0.0", help="fee to include") parser.add_option("--datadir", dest="datadir", default=determine_db_dir(), help="location of btcnode.conf file with RPC username/password (default: %default)") parser.add_option("--testnet", dest="testnet", default=False, action="store_true", help="Use the test network") parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true", help="Don't broadcast the transaction, just create and print the transaction data") (options, args) = parser.parse_args() check_json_precision() config = read_bitcoin_config(options.datadir) if options.testnet: config['testnet'] = True btcnoded = connect_JSON(config) if options.amount is None: address_summary = list_available(btcnoded) for address,info in address_summary.iteritems(): n_transactions = len(info['outputs']) if n_transactions > 1: print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions)) else: print("%s %.8f %s"%(address, info['total'], info['account'])) else: fee = Decimal(options.fee) amount = Decimal(options.amount) while unlock_wallet(btcnoded) == False: pass # Keep asking for passphrase until they get it right txdata = create_tx(btcnoded, options.fromaddresses.split(","), options.to, amount, fee) sanity_test_fee(btcnoded, txdata, amount*Decimal("0.01")) if options.dry_run: print(txdata) else: txid = btcnoded.sendrawtransaction(txdata) print(txid) if __name__ == '__main__': main()
[ "root@vmi546684.contaboserver.net" ]
root@vmi546684.contaboserver.net
376e21623d3fc2c5d5c28e523d7aebd69a3d0cb9
54869fe38624f1c4338f8dc5dd5f0d89aa17c9e4
/p02.py
09a6e54ab7e48ccdcf3c8f292f157dc5bbaa7bcc
[]
no_license
qoqosz/Advent-of-Code-2020
d3b3512eb3b59b7404189ad094a2cc3b8ddb07be
e27928482e8fc9f30aea3fed21e11f8d8743431d
refs/heads/master
2023-02-05T22:17:45.767109
2020-12-24T21:20:57
2020-12-24T21:21:06
318,346,507
0
0
null
null
null
null
UTF-8
Python
false
false
472
py
from collections import Counter p1_count, p2_count = 0, 0 with open('p02.txt') as f: for line in f: rng, char, text = line.split(' ') min_, max_ = map(int, rng.split('-')) char = char.strip(':') counter = Counter(text) if min_ <= counter[char] <= max_: p1_count += 1 if (char == text[min_ - 1]) ^ (char == text[max_ - 1]): p2_count += 1 print('Part 1:', p1_count) print('Part 2:', p2_count)
[ "lukasz@bednarski.me" ]
lukasz@bednarski.me
0846c0ba23ee639e01b60dbedf18499542be341e
0156fd64d89df94c1f7fdb6003a11272a24e987e
/divineai1/manage.py
c3d5d325963809a3971f1249582a433cdcf6c1dc
[]
no_license
Vengers-Ritam/divineplatform
96b7f762188417ccf69586711abb1b6fbf3533b5
b25fdb3fc4e96bbe328b9d3c7f5ef390202ed8c6
refs/heads/master
2023-04-24T05:00:08.594565
2021-05-03T11:23:43
2021-05-03T11:23:43
363,904,149
0
0
null
null
null
null
UTF-8
Python
false
false
629
py
#!/usr/bin/env python """Django's command-line utility for administrative tasks.""" import os import sys def main(): os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'divineai1.settings') try: from django.core.management import execute_from_command_line except ImportError as exc: raise ImportError( "Couldn't import Django. Are you sure it's installed and " "available on your PYTHONPATH environment variable? Did you " "forget to activate a virtual environment?" ) from exc execute_from_command_line(sys.argv) if __name__ == '__main__': main()
[ "ritammoharana32@gmail.com" ]
ritammoharana32@gmail.com
8ab3356264d79c13893cbd6fbacaabfc53946f25
55be4a49ed1cd1b8b7b0ac2e6fa55aa58c180e15
/ICS 32/Project 4/test_project4.py
490730e4bd9f5d19726a62c27289657fd2313e99
[]
no_license
rvcervan/ICS-32-Projects-Python
bc64abda6ea20f63542bd121f1161d31d23a2d42
ec01343708028fbe07fc95dc229bd111c12c3836
refs/heads/main
2023-07-01T16:41:48.506241
2021-08-09T05:13:24
2021-08-09T05:13:24
394,155,974
0
0
null
null
null
null
UTF-8
Python
false
false
7,133
py
import mechanics import unittest class GameTest(unittest.TestCase): def setUp(self): self._game = mechanics.Game([[' ', ' ',' '], [' ', ' ',' '], [' ', ' ',' '], [' ', ' ',' '], [' ', ' ',' '], [' ', ' ',' ']], 4, 3) def test_faller_rotates(self): self._game._board = [[' ', ' ',' '], [' ', ' ',' '], [' ', ' ','[S]'], [' ', ' ','[T]'], [' ', ' ','[V]'], [' ', ' ',' ']] self.assertEqual(self._game.rotate(), [[' ', ' ',' '], [' ', ' ',' '], [' ', ' ','[V]'], [' ', ' ','[S]'], [' ', ' ','[T]'], [' ', ' ',' ']]) def test_faller_moves_left(self): self._game._board = [[' ', ' ',' '], [' ', ' ',' '], [' ', ' ','[S]'], [' ', ' ','[T]'], [' ', ' ','[V]'], [' ', ' ',' ']] self.assertEqual(self._game.move_left(), [[' ', ' ',' '], [' ', ' ',' '], [' ', '[S]',' '], [' ', '[T]',' '], [' ', '[V]',' '], [' ', ' ',' ']]) self.assertEqual(self._game.move_left(), [[' ', ' ',' '], [' ', ' ',' '], ['[S]', ' ',' '], ['[T]', ' ',' '], ['[V]', ' ',' '], [' ', ' ',' ']]) def test_faller_moves_right(self): self._game._board = [[' ', ' ',' '], [' ', ' ',' '], ['[S]', ' ',' '], ['[T]', ' ',' '], ['[V]', ' ',' '], [' ', ' ',' ']] self.assertEqual(self._game.move_right(), [[' ', ' ',' '], [' ', ' ',' '], [' ', '[S]',' '], [' ', '[T]',' '], [' ', '[V]',' '], [' ', ' ',' ']]) self.assertEqual(self._game.move_right(), [[' ', ' ',' '], [' ', ' ',' '], [' ', ' ','[S]'], [' ', ' ','[T]'], [' ', ' ','[V]'], [' ', ' ',' ']]) def test_pieces_match(self): self._game._board = [[' T ', ' ',' '], [' ', ' T ',' '], [' W ', ' ',' T '], [' W ', ' ',' S '], [' W ', ' S ',' '], [' S ', ' S ',' S ']] self.assertEqual(self._game.replace_matching(), [['*T*', ' ',' '], [' ', '*T*',' '], ['*W*', ' ','*T*'], ['*W*', ' ','*S*'], ['*W*', '*S*',' '], ['*S*', '*S*','*S*']]) def test_pieces_fill_empty_space(self): self._game._board = [[' S ', ' ',' V '], [' ', ' ',' '], [' T ', ' ',' '], [' ', ' Y ',' '], [' W ', ' X ',' '], [' ', ' ',' V ']] self.assertEqual(self._game.fill_empty_space(), [[' ', ' ',' '], [' ', ' ',' '], [' ', ' ',' '], [' S ', ' ',' '], [' T ', ' Y ',' V '], [' W ', ' X ',' V ']]) def test_freeze_faller_if_landed(self): self._game._board = [[' ', ' ',' '], [' ', ' ',' '], [' ', ' ','[Z]'], ['[S]', ' ','[Y]'], ['[T]', ' ','[X]'], ['[V]', ' ',' W ']] self.assertEqual(self._game.freeze_faller(), [[' ', ' ',' '], [' ', ' ',' '], [' ', ' ','|Z|'], ['|S|', ' ','|Y|'], ['|T|', ' ','|X|'], ['|V|', ' ',' W ']]) def test_unfreeze_faller_if_moved(self): self._game._board = [[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', '|Z|'], [' ', ' ', '|Y|'], [' ', ' ', '|X|'], [' ', ' ', ' W ']] self.assertEqual(self._game.move_left(), self._game.unfreeze_faller(), [[' ', ' ', ' '], [' ', ' ', ' '], [' ', '[Z]', ' '], [' ', '[Y]', ' '], [' ', '[X]', ' '], [' ', ' ', ' W ']]) def test_if_faller_is_falling(self): self._game._board = [[' ', ' ', ' '], [' ', ' ', '[S]'], [' ', ' ', '[T]'], [' ', ' ', '[V]'], [' ', ' ', ' '], [' ', ' ', ' ']] self.assertEqual(self._game.falling(), [[' ', ' ', ' '], [' ', ' ', ' '], [' ', ' ', '[S]'], [' ', ' ', '[T]'], [' ', ' ', '[V]'], [' ', ' ', ' ']]) if __name__ == '__main__': unittest.main()
[ "noreply@github.com" ]
rvcervan.noreply@github.com
3890719b1de619a46527dd653f3b42ca89a5dcb1
430cfece27c54180baf29b3199a67f79fe7d155c
/pygmt/tests/test_grdimage.py
5ad3913c5bef4b786a5d1de55c30b647ec31619e
[ "BSD-3-Clause" ]
permissive
JamieJQuinn/pygmt
139f25a3f4280b2d2d43c3fa63179437a9227d31
9269fbcb2fc7fca2d5c412acdb794be375c260ab
refs/heads/main
2023-08-24T16:19:27.673739
2021-10-29T09:51:44
2021-10-29T09:51:44
384,119,354
0
0
BSD-3-Clause
2021-07-08T12:37:21
2021-07-08T12:37:21
null
UTF-8
Python
false
false
7,213
py
""" Test Figure.grdimage. """ import numpy as np import pytest import xarray as xr from pygmt import Figure from pygmt.datasets import load_earth_relief from pygmt.exceptions import GMTInvalidInput from pygmt.helpers.testing import check_figures_equal @pytest.fixture(scope="module", name="grid") def fixture_grid(): """ Load the grid data from the sample earth_relief file. """ return load_earth_relief(registration="gridline") @pytest.fixture(scope="module", name="grid_360") def fixture_grid_360(grid): """ Earth relief grid with longitude range from 0 to 360 (instead of -180 to 180). """ _grid = grid.copy() # get a copy of original earth_relief grid _grid.encoding.pop("source") # unlink earth_relief NetCDF source _grid["lon"] = np.arange(0, 361, 1) # convert longitude from -180:180 to 0:360 return _grid @pytest.fixture(scope="module", name="xrgrid") def fixture_xrgrid(): """ Create a sample xarray.DataArray grid for testing. """ longitude = np.arange(0, 360, 1) latitude = np.arange(-89, 90, 1) x = np.sin(np.deg2rad(longitude)) y = np.linspace(start=0, stop=1, num=179) data = y[:, np.newaxis] * x return xr.DataArray( data, coords=[ ("latitude", latitude, {"units": "degrees_north"}), ("longitude", longitude, {"units": "degrees_east"}), ], attrs={"actual_range": [-1, 1]}, ) @pytest.mark.mpl_image_compare def test_grdimage(grid): """ Plot an image using an xarray grid. """ fig = Figure() fig.grdimage(grid, cmap="earth", projection="W0/6i") return fig @pytest.mark.mpl_image_compare def test_grdimage_slice(grid): """ Plot an image using an xarray grid that has been sliced. """ grid_ = grid.sel(lat=slice(-30, 30)) fig = Figure() fig.grdimage(grid_, cmap="earth", projection="M6i") return fig @pytest.mark.mpl_image_compare def test_grdimage_file(): """ Plot an image using file input. """ fig = Figure() fig.grdimage( "@earth_relief_01d_g", cmap="ocean", region=[-180, 180, -70, 70], projection="W0/10i", shading=True, ) return fig @check_figures_equal() @pytest.mark.parametrize( "shading", [True, 0.5, "+a30+nt0.8", "@earth_relief_01d_g+d", "@earth_relief_01d_g+a60+nt0.8"], ) def test_grdimage_shading_xarray(grid, shading): """ Test that shading works well for xarray. The ``shading`` can be True, a constant intensity, some modifiers, or a grid with modifiers. See https://github.com/GenericMappingTools/pygmt/issues/364 and https://github.com/GenericMappingTools/pygmt/issues/618. """ fig_ref, fig_test = Figure(), Figure() kwargs = dict( region=[-180, 180, -90, 90], frame=True, projection="Cyl_stere/6i", cmap="geo", shading=shading, ) fig_ref.grdimage("@earth_relief_01d_g", **kwargs) fig_test.grdimage(grid, **kwargs) return fig_ref, fig_test @pytest.mark.xfail( reason="Incorrect scaling of geo CPT on xarray.DataArray grdimage plot." "See https://github.com/GenericMappingTools/gmt/issues/5294", ) @check_figures_equal() def test_grdimage_grid_and_shading_with_xarray(grid, xrgrid): """ Test that shading works well when xarray.DataArray is input to both the ``grid`` and ``shading`` arguments. """ fig_ref, fig_test = Figure(), Figure() fig_ref.grdimage( grid="@earth_relief_01d_g", region="GL", cmap="geo", shading=xrgrid, verbose="i" ) fig_ref.colorbar() fig_test.grdimage(grid=grid, region="GL", cmap="geo", shading=xrgrid, verbose="i") fig_test.colorbar() return fig_ref, fig_test def test_grdimage_fails(): """ Should fail for unrecognized input. """ fig = Figure() with pytest.raises(GMTInvalidInput): fig.grdimage(np.arange(20).reshape((4, 5))) @pytest.mark.mpl_image_compare def test_grdimage_over_dateline(xrgrid): """ Ensure no gaps are plotted over the 180 degree international dateline. Specifically checking that `xrgrid.gmt.gtype = 1` sets `GMT_GRID_IS_GEO`, and that `xrgrid.gmt.registration = 0` sets `GMT_GRID_NODE_REG`. Note that there would be a gap over the dateline if a pixel registered grid is used. See also https://github.com/GenericMappingTools/pygmt/issues/375. """ fig = Figure() assert xrgrid.gmt.registration == 0 # gridline registration xrgrid.gmt.gtype = 1 # geographic coordinate system fig.grdimage(grid=xrgrid, region="g", projection="A0/0/1c") return fig @pytest.mark.mpl_image_compare def test_grdimage_global_subset(grid_360): """ Ensure subsets of grids are plotted correctly on a global map. Specifically checking that xarray.DataArray grids can wrap around the left and right sides on a Mollweide projection (W) plot correctly. Note that a Cartesian grid is used here instead of a Geographic grid (i.e. GMT_GRID_IS_CARTESIAN). This is a regression test for https://github.com/GenericMappingTools/pygmt/issues/732. """ # Get a slice of South America and Africa only (lat=-90:31, lon=-180:41) sliced_grid = grid_360[0:121, 0:221] assert sliced_grid.gmt.registration == 0 # gridline registration assert sliced_grid.gmt.gtype == 0 # Cartesian coordinate system fig = Figure() fig.grdimage( grid=sliced_grid, cmap="vik", region="g", projection="W0/3.5c", frame=True ) return fig @check_figures_equal() @pytest.mark.parametrize("lon0", [0, 123, 180]) @pytest.mark.parametrize("proj_type", ["H", "W"]) def test_grdimage_central_meridians(grid, proj_type, lon0): """ Test that plotting a grid with different central meridians (lon0) using Hammer (H) and Mollweide (W) projection systems work. """ fig_ref, fig_test = Figure(), Figure() fig_ref.grdimage( "@earth_relief_01d_g", projection=f"{proj_type}{lon0}/15c", cmap="geo" ) fig_test.grdimage(grid, projection=f"{proj_type}{lon0}/15c", cmap="geo") return fig_ref, fig_test # Cylindrical Equidistant (Q) projections plotted with xarray and NetCDF grids # are still slightly different with an RMS error of 25, see issue at # https://github.com/GenericMappingTools/pygmt/issues/390 # TO-DO remove tol=1.5 and pytest.mark.xfail once bug is solved in upstream GMT @check_figures_equal(tol=1.5) @pytest.mark.parametrize("lat0", [0, 30]) @pytest.mark.parametrize("lon0", [0, 123, 180]) @pytest.mark.parametrize("proj_type", [pytest.param("Q", marks=pytest.mark.xfail), "S"]) def test_grdimage_central_meridians_and_standard_parallels(grid, proj_type, lon0, lat0): """ Test that plotting a grid with different central meridians (lon0) and standard_parallels (lat0) using Cylindrical Equidistant (Q) and General Stereographic (S) projection systems work. """ fig_ref, fig_test = Figure(), Figure() fig_ref.grdimage( "@earth_relief_01d_g", projection=f"{proj_type}{lon0}/{lat0}/15c", cmap="geo" ) fig_test.grdimage(grid, projection=f"{proj_type}{lon0}/{lat0}/15c", cmap="geo") return fig_ref, fig_test
[ "noreply@github.com" ]
JamieJQuinn.noreply@github.com
fdbc95d9a4ad946af1d4b66ea9d2b9a58fc8e2e4
4dc91b14630d507d32ec75c7c099ba3576b07232
/TopAnalysis/scripts/KinAlg4tree.py
10f69e8216a75bc8d47b8372486a19bc4053939d
[]
no_license
beatrizlopes/TopLJets
6b62ccfd5249f6d0d06c04a487e638958df229af
198250ab1eae8a6a11b66dad626a827f46ec0092
refs/heads/master
2023-06-21T20:10:46.840262
2019-08-01T10:44:01
2019-08-01T10:44:01
198,248,791
0
1
null
null
null
null
UTF-8
Python
false
false
6,138
py
import ROOT import optparse import json import sys import os import numpy as np from array import array from TopLJets2015.TopAnalysis.storeTools import getEOSlslist from TopLJets2015.TopAnalysis.nuSolutions import * """ a dummy converter """ def convertToPtEtaPhiM(lVec,xyz,m=0.): en=ROOT.TMath.Sqrt(xyz[0]**2+xyz[1]**2+xyz[2]**2) p4=ROOT.TLorentzVector(xyz[0],xyz[1],xyz[2],en) return lVec(p4.Pt(),p4.Eta(),p4.Phi(),p4.M()) def KinematicsAlgorithm(): args = sys.argv[1:] for filename in args: inFileName=filename print '....analysing',inFileName fIn=ROOT.TFile.Open(inFileName,"UPDATE") #fOut=ROOT.TFile.Open("teste.root","RECREATE") tree=fIn.Get("sel") if not fIn.GetListOfKeys().Contains("sel") : print "unable to read tree from file. Skipping file ",inFileName continue #newtree=ROOT.TTree("sel2","sel2") newtree=ROOT.TNtuple("sel2","sel2","run:lumi:ev:nvtx:rho:channel:mll:nljets:nbjets:ht:metpt:metphi:l1pt:l1eta:l1phi:l1m:l2pt:l2eta:l2phi:l2m:b1pt:b1eta:b1phi:b1m:b2pt:b2eta:b2phi:b2m:px2:py2:pz2:E2:yvis:ysum:max_dy:min_dy:deltarll:deltaphill:mlb:mpp:ypp:gen_mtt:gen_ytt:rec_mtt:rec_ytt:weight") newtree.SetDirectory(fIn) #branch.SetEntries(tree.GetEntries()) #fOut.cd(); #newtree.SetDirectory(fOut); #loop over events in the tree and fill histos totalEntries=tree.GetEntries() lVec = ROOT.Math.LorentzVector(ROOT.Math.PtEtaPhiM4D('double')) # h1=ROOT.TH1F('yttrec-yttgen','[y_{tt} (rec) - y_{tt} (gen)]',50,-2,2) for i in xrange(0,totalEntries): tree.GetEntry(i) if i%100==0 : sys.stdout.write('\r [ %d/100 ] done' %(int(float(100.*i)/float(totalEntries))) ) #evWeight=puNormSF*tree.weight[0]*filtWeight*filtNormRwgt #leptons leptons=[] leptons.append( lVec(tree.l1pt,tree.l1eta,tree.l1phi,tree.l1m) ) leptons.append( lVec(tree.l2pt,tree.l2eta,tree.l2phi,tree.l2m) ) #if len(leptons)<2 : continue #preselect the b-jets (save always the jet and the gen jet) bjets=[] bjets.append( lVec(tree.b1pt,tree.b1eta,tree.b1phi,tree.b1m) ) bjets.append( lVec(tree.b2pt,tree.b2eta,tree.b2phi,tree.b2m) ) #met metx,mety=tree.metpt*ROOT.TMath.Cos(tree.metphi),tree.metpt*ROOT.TMath.Sin(tree.metphi) #try to solve the kinematics (need to swap bl assignments) allSols=[] try: sols=doubleNeutrinoSolutions( (bjets[0], bjets[1]), (leptons[0], leptons[1]), (metx,mety) ) for isol in xrange(0,len(sols.nunu_s)): top = bjets[0]+leptons[0]+convertToPtEtaPhiM(lVec,sols.nunu_s[isol][0],0.) top_ = bjets[1]+leptons[1]+convertToPtEtaPhiM(lVec,sols.nunu_s[isol][1],0.) allSols.append( (0,top,top_) ) except np.linalg.linalg.LinAlgError: pass try: sols=doubleNeutrinoSolutions( (bjets[0], bjets[1]), (leptons[1], leptons[0]), (metx,mety) ) for isol in xrange(0,len(sols.nunu_s)): top = bjets[0]+leptons[1]+convertToPtEtaPhiM(lVec,sols.nunu_s[isol][0],0.) top_ = bjets[1]+leptons[0]+convertToPtEtaPhiM(lVec,sols.nunu_s[isol][1],0.) allSols.append( (1,top,top_) ) except np.linalg.linalg.LinAlgError : pass #sort solutions by increasing m(ttbar) if len(allSols)==0: continue #print "length of allSols", len(allSols) allSols=sorted(allSols, key=lambda sol: (sol[1]+sol[2]).mass() ) # print 'lowest m(ttbar) solution:', (allSols[0][1]+allSols[0][2]).mass() lowMtt=(allSols[0][1]+allSols[0][2]).mass() lowYtt=(allSols[0][1]+allSols[0][2]).Rapidity() # h1.Fill(lowYtt-tree.gen_ytt) varsel=[tree.run,tree.lumi,tree.ev,tree.rho,tree.nvtx,tree.channel,tree.mll,tree.nljets,tree.nbjets,tree.ht, tree.metpt,tree.metphi,tree.l1pt,tree.l1eta,tree.l1phi,tree.l1m,tree.l2pt,tree.l2eta,tree.l2phi,tree.l2m, tree.b1pt,tree.b1eta,tree.b1phi,tree.b1m,tree.b2pt,tree.b2eta,tree.b2phi,tree.b2m, tree.px2,tree.py2,tree.pz2,tree.E2,tree.yvis,tree.ysum,tree.max_dy,tree.min_dy, tree.deltarll,tree.deltaphill,tree.mlb,tree.mpp,tree.ypp,tree.gen_mtt,tree.gen_ytt,lowMtt,lowYtt,tree.weight] newtree.Fill(array("f",varsel)) fIn.cd() fIn.Write() fIn.Close() def main(): KinematicsAlgorithm() if __name__ == "__main__": main()
[ "bribeiro@cern.ch" ]
bribeiro@cern.ch
b2c89f08137187610f4e4c87d7926691feebaf94
e10513e1ef7d195a051befb6c829b27a0cf685c2
/core/utils/resume.py
39fc888d1e92b31d15ad1e50ee51e359c3d01d89
[]
no_license
kirenng/image-caption
5293409a4dca0ed225f3ec342a244ea5c0a49a60
78b521f306aac3fc02bb051be0e906108f98f7dd
refs/heads/master
2023-07-06T11:34:23.420494
2021-08-13T09:55:19
2021-08-13T09:55:19
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,046
py
import os import torch def resume_from_checkpoint(args, model, optimizer, best_acc1): if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) if args.gpu is None: checkpoint = torch.load(args.resume) else: # Map models to be loaded to specified single gpu. loc = 'cuda:{}'.format(args.gpu) checkpoint = torch.load(args.resume, map_location=loc) args.start_epoch = checkpoint['epoch'] best_acc1 = checkpoint['best_acc1'] if args.gpu is not None: # best_acc1 may be from a checkpoint from a different GPU best_acc1 = best_acc1.to(args.gpu) model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})" .format(args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) return args, model, optimizer, best_acc1
[ "3280867946@qq.com" ]
3280867946@qq.com
243b30d8a04317b70aab7c0bbadabf27a895a4a2
480a175ab2b3c012af2d1cddb79674fad1490fe5
/0x08-python-more_classes/tests/main.2.py
2cb60d1c599573c08cc695829729fe51c64ab27d
[]
no_license
ianliu-johnston/holbertonschool-higher_level_programming
a8a6476fc6a7ac0bd8ae300f2196f17c13e1b36f
f6a7c9cddb2482991c2aadacb99aa66e64eb50eb
refs/heads/master
2021-04-29T11:12:56.820851
2017-05-10T00:48:17
2017-05-10T00:48:17
77,854,226
3
3
null
null
null
null
UTF-8
Python
false
false
944
py
#!/usr/bin/python3 Rectangle = __import__('2-rectangle').Rectangle new_rect = Rectangle(3, 4) print("Dimensions of your new rectangle: {} x {}".format(new_rect.width, new_rect.height)) print("Area: {}".format(new_rect.area())) print("Perimeter: {}".format(new_rect.perimeter())) new_rect.width = 5 print("Width just changed. New Dimensions: {} x {}".format(new_rect.width, new_rect.height)) print("Area: {}".format(new_rect.area())) print("Perimeter: {}".format(new_rect.perimeter())) new_rect.height = 15 print("height just changed. New Dimensions: {} x {}".format(new_rect.width, new_rect.height)) print("Area: {}".format(new_rect.area())) print("Perimeter: {}".format(new_rect.perimeter())) print("Making another one.") next_rect = Rectangle() print("Dimensions of your new rectangle: {} x {}".format(next_rect.width, next_rect.height)) print("Area: {}".format(next_rect.area())) print("Perimeter: {}".format(next_rect.perimeter()))
[ "ian.liu-johnson@holbertonschool.com" ]
ian.liu-johnson@holbertonschool.com
702e93ec385bbb5567fec0ac4ca70cf08f9f04db
7dbcf66e47684c652f9d90a47b2381cf846e003d
/pkg/Conf.py
d8e12155528eb0090ab0006f88fcc253282e3ede
[]
no_license
hlanSmart/simple
531b9a8be524d29c43016c865f64132aa4bf3069
c8536edd4cec1f39e23a5ff35ae16f0efa15f323
refs/heads/master
2020-12-27T08:24:04.383170
2016-09-22T04:29:44
2016-09-22T04:29:44
68,556,669
0
1
null
null
null
null
UTF-8
Python
false
false
1,020
py
#!/usr/bin/python #coding:utf-8 import os,yaml BASE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) def readServer(sg,sl=False): #sg ServerGroup 服务器组 sl ServerList 组列表 with open(os.path.join(BASE_PATH,'etc/server.yml'),'r') as f: server=yaml.load(f) if sl: #当ServerList为真时返回组,而不是组信息 li=[] for i in server: li.append(i) return li if sg in server: gp=server[sg] #gp group 服务器组信息 for i in gp: #默认22端口在配置文件不存在,所以手动添加到返回结果 if len(gp[i])<3: gp[i].append(22) return gp return False #Server Group 不存在时返回False def readYaml(P): try: with open(P) as f: return yaml.load(f) except Exception as e: print(e) return False
[ "root@localhost" ]
root@localhost
9d9f3f8f6419b9565b74b794fce0b1e7d24d7632
f08f7e4da3cb83257bbeb6cf198e23ac65d91fd0
/안준혁/[21.07.19]1065.py
fed3c18cdb8c06b5c269118c09f0dc7e99e0b152
[]
no_license
4RG0S/2021-Summer-Jookgorithm
cabcd2071b88510ac22a971ed600e7b4645eb5f2
bf23a3a0f2679bcd47c825247d57998eb23c1df8
refs/heads/main
2023-07-17T06:49:17.165893
2021-09-06T09:49:36
2021-09-06T09:49:36
384,205,067
1
1
null
2021-07-14T05:46:52
2021-07-08T17:44:15
Java
UTF-8
Python
false
false
324
py
n = int(input()) count = 0 for i in range(1, n+1): if i < 100: count += 1 elif 100 < i < 1000: first = i % 10 second = int(i / 10) % 10 third = int(i / 100) comp1 = first - second comp2 = second - third if comp1 == comp2: count += 1 print(count)
[ "ajh99345@gmail.com" ]
ajh99345@gmail.com
adc821f5df6ddc1460050390a5807ed3f8662942
d165d718b2a5e4b18f9b52054e1f5d382d72be03
/0x04-python-more_data_structures/5-number_keys.py
ca851394809637d9e4344d5ff96907b884f2f1f7
[]
no_license
Fabian-Andres/holbertonschool-higher_level_programming
f1b67fd28fb135c84ed9b3240d66ef125a043e00
0b08e2f3c4a798dd0cce1a9776304c74ad0b6ba3
refs/heads/master
2022-12-16T04:56:11.222686
2020-09-25T04:47:02
2020-09-25T04:47:34
259,423,074
1
0
null
null
null
null
UTF-8
Python
false
false
146
py
#!/usr/bin/python3 def number_keys(a_dictionary): no_keys = 0 for i in range(len(a_dictionary)): no_keys += 1 return no_keys
[ "f4bian.andres@gmail.com" ]
f4bian.andres@gmail.com
36c88c84948b0dd704090817ec765ae54204629c
0c5ce271c857d067c77d268c8cd6a0b1c0f70e11
/app.py
569b218bc98727dac4ed4a5cac32baa944672cea
[]
no_license
Eduardo-JReis/translate-script
0b1723a58204885734d6d235fdafc7abe0e71c83
5ad6e0d2311e7dad938af300feb55b82f6f0622d
refs/heads/master
2023-01-12T20:06:20.265520
2020-11-22T11:17:08
2020-11-22T11:17:08
315,018,659
0
0
null
null
null
null
UTF-8
Python
false
false
300
py
# import googletrans as gt from googletrans import Translator # print(gt.LANGUAGES) trans = Translator() test = True while test: word = str(input('Digite a palavra: ')) print() res = trans.translate(word, dest='pt') print(res.text) if word == 'esc': test = False
[ "edu.publicidade81@gmail.com" ]
edu.publicidade81@gmail.com
c2bb14d7ae24c97ce9e538b563179a0fb27d3f71
2aace9bb170363e181eb7520e93def25f38dbe5c
/build/idea-sandbox/system/python_stubs/cache/77d922e63877a9db19d31d69878e680aa58a54c85eee51673bc8bfa5abec9462/cython_runtime.py
f0626603901297c264822be8e70b80c27bee933e
[]
no_license
qkpqkp/PlagCheck
13cb66fd2b2caa2451690bb72a2634bdaa07f1e6
d229904674a5a6e46738179c7494488ca930045e
refs/heads/master
2023-05-28T15:06:08.723143
2021-06-09T05:36:34
2021-06-09T05:36:34
375,235,940
1
0
null
null
null
null
UTF-8
Python
false
false
270
py
# encoding: utf-8 # module cython_runtime # from C:\Users\Doly\Anaconda3\lib\site-packages\scipy\stats\statlib.cp37-win_amd64.pyd # by generator 1.147 # no doc # no imports # Variables with simple values __loader__ = None __spec__ = None # no functions # no classes
[ "qinkunpeng2015@163.com" ]
qinkunpeng2015@163.com
9f03d7bd8bcc479327be64dc54ad22ec87c35ae7
a0990640cb7d2b93262af982185f98ff0b0addbc
/2021-05-13_vibe+/frame2video.py
69e25c19525fd94f91b15ce947fdf445fb7de784
[]
no_license
chgex/Others
c320bff96aa85fa3127092f7e2a433801fbb62ea
2a8970474bd7c78aa40a7e6f4135192c2972a334
refs/heads/main
2023-04-24T09:48:56.475389
2021-05-18T10:11:13
2021-05-18T10:11:13
366,975,035
0
0
null
null
null
null
UTF-8
Python
false
false
2,401
py
''' Author: liubai Date: 2021-04-22 LastEditTime: 2021-04-22 ''' import cv2 import os # 图片重命名 def imageRename(image_path): image_list=os.listdir(image_path) total=len(image_list) # 第一张图片 cnt=1 for i in range(1,total+1): old_image_name=image_path + '/' + str(i) + '.jpg' new_image_name=image_path + '/' + str(i).zfill(5) + '.jpg' os.rename(old_image_name,new_image_name) print('rename success') # 计算视频长度 def getTime(filename): total_time=0 cap=cv2.VideoCapture(filename) if cap.isOpened(): rate = cap.get(5) # 该函数返回 帧速率 fraNum=cap.get(7) # 该函数返回 视频文件中的帧数 duration=fraNum/rate total_time+=duration cap.release() return total_time # 计算fps def getFPS(filename='../test.mp4'): # filename='test.mp4' cap = cv2.VideoCapture(filename) total_frame = 0 while(True): ret, frame = cap.read() if ret is False: break total_frame = total_frame + 1 cap.release() # 视频长度:秒 total_time=getTime(filename) # 计算fps fps=total_frame/total_time return int(fps) # 将帧组合成视频 def frame2video(image_path,video_name): image_list=os.listdir(image_path) image_list.sort() # 第一张图片 first_image = cv2.imread( image_path + '/' + image_list[0]) fps = 20 # fps=getFPS() print('fps: ',fps) # size size= (first_image.shape[1],first_image.shape[0]) print(size) # 编码器 fourcc = cv2.VideoWriter_fourcc(*"mp4v") # MJPG # videowriter videoWrite = cv2.VideoWriter(video_name,fourcc,fps,size) for image in image_list: print(image) image=image_path + '/' + image img = cv2.imread(image,cv2.IMREAD_COLOR) # 调整大小 img = cv2.resize(img,size,interpolation=cv2.INTER_CUBIC) # 写 videoWrite.write(img) print('video write success') if __name__=='__main__': image_path='./upImg' video_name='update.mp4' imageRename(image_path) frame2video(image_path,video_name) image_path='./SegImg' video_name='seg.mp4' imageRename(image_path) frame2video(image_path,video_name)
[ "noreply@github.com" ]
chgex.noreply@github.com
7688e234fa65ebe9a7d4ff0798517fcd1f8b8b52
6b7f81afdb9983664d12a9fc54452dd48ed5779a
/env/bin/python-config
3f755f3f8c33a751286ee92004eaaa4bc470a0c0
[]
no_license
Wilians001/axf
502fcf91d737f2901572c6dd59ff3e9c81615412
fb4c41c01a23c5dd0f64e4c2f61c0a042cb72935
refs/heads/master
2020-03-31T07:43:32.643230
2018-10-08T12:02:25
2018-10-08T12:02:25
152,032,740
0
0
null
null
null
null
UTF-8
Python
false
false
2,339
#!/home/wilians/axf/env/bin/python import sys import getopt import sysconfig valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags', 'ldflags', 'help'] if sys.version_info >= (3, 2): valid_opts.insert(-1, 'extension-suffix') valid_opts.append('abiflags') if sys.version_info >= (3, 3): valid_opts.append('configdir') def exit_with_usage(code=1): sys.stderr.write("Usage: {0} [{1}]\n".format( sys.argv[0], '|'.join('--'+opt for opt in valid_opts))) sys.exit(code) try: opts, args = getopt.getopt(sys.argv[1:], '', valid_opts) except getopt.error: exit_with_usage() if not opts: exit_with_usage() pyver = sysconfig.get_config_var('VERSION') getvar = sysconfig.get_config_var opt_flags = [flag for (flag, val) in opts] if '--help' in opt_flags: exit_with_usage(code=0) for opt in opt_flags: if opt == '--prefix': print(sysconfig.get_config_var('prefix')) elif opt == '--exec-prefix': print(sysconfig.get_config_var('exec_prefix')) elif opt in ('--includes', '--cflags'): flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')] if opt == '--cflags': flags.extend(getvar('CFLAGS').split()) print(' '.join(flags)) elif opt in ('--libs', '--ldflags'): abiflags = getattr(sys, 'abiflags', '') libs = ['-lpython' + pyver + abiflags] libs += getvar('LIBS').split() libs += getvar('SYSLIBS').split() # add the prefix/lib/pythonX.Y/config dir, but only if there is no # shared library in prefix/lib/. if opt == '--ldflags': if not getvar('Py_ENABLE_SHARED'): libs.insert(0, '-L' + getvar('LIBPL')) if not getvar('PYTHONFRAMEWORK'): libs.extend(getvar('LINKFORSHARED').split()) print(' '.join(libs)) elif opt == '--extension-suffix': ext_suffix = sysconfig.get_config_var('EXT_SUFFIX') if ext_suffix is None: ext_suffix = sysconfig.get_config_var('SO') print(ext_suffix) elif opt == '--abiflags': if not getattr(sys, 'abiflags', None): exit_with_usage() print(sys.abiflags) elif opt == '--configdir': print(sysconfig.get_config_var('LIBPL'))
[ "1518209084@qq.com" ]
1518209084@qq.com
e4d3b1c290b0ee2787f51f3bb625a45c1c113234
6daa3815511b1eb1f4ff3a40b7e9332fab38b8ef
/tastesavant/taste/apps/profiles/migrations/0010_auto__add_field_profile_preferred_site__chg_field_profile_user.py
f631b68b525621e7885479041e53e8ea8b703f7e
[]
no_license
kaizensoze/archived-projects
76db01309453606e6b7dd9d2ff926cfee42bcb05
d39ac099cb40131bac5de66bde7d0e2db5f74189
refs/heads/master
2021-05-31T12:16:17.800730
2016-02-23T00:27:56
2016-02-23T00:27:56
14,407,212
1
0
null
null
null
null
UTF-8
Python
false
false
7,513
py
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding field 'Profile.preferred_site' # The default value, 3, should refer to the NYC site. db.add_column('profiles_profile', 'preferred_site', self.gf('django.db.models.fields.related.ForeignKey')(default=3, to=orm['sites.Site']), keep_default=False) # Changing field 'Profile.user' db.alter_column('profiles_profile', 'user_id', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)) def backwards(self, orm): # Deleting field 'Profile.preferred_site' db.delete_column('profiles_profile', 'preferred_site_id') # Changing field 'Profile.user' db.alter_column('profiles_profile', 'user_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], unique=True)) models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'profiles.friendship': { 'Meta': {'object_name': 'Friendship'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'notice_sent_to_user_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'profile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['profiles.Profile']"}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}) }, 'profiles.profile': { 'Meta': {'object_name': 'Profile'}, 'birthday': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'blogger': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'digest_notifications': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}), 'favorite_food': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'favorite_restaurant': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'friends': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'friends'", 'to': "orm['auth.User']", 'through': "orm['profiles.Friendship']", 'blank': 'True', 'symmetrical': 'False', 'null': 'True'}), 'gender': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'last_sync_facebook': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'last_sync_foursquare': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'location': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'notification_level': ('django.db.models.fields.CharField', [], {'default': "'instant'", 'max_length': '16'}), 'preferred_site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}), 'type_expert': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'type_reviewer': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}), 'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}), 'zipcode': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}) }, 'sites.site': { 'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"}, 'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) } } complete_apps = ['profiles']
[ "gallo.j@gmail.com" ]
gallo.j@gmail.com
e917e03d7e2392418e4b1f02d89aec290dda311a
9ef2c82ae61064c4f78798f04ab3310e7f5e4629
/tests/test_prepare_data.py
3a97f2cc09d08dbc04605095acef5c508ca0f788
[ "BSD-3-Clause" ]
permissive
chenxofhit/cirrocumulus
b999d49afc024c30e61fbc6905c968f71714291e
18ee1264303138cbba8ff34318f90f0e2619dc13
refs/heads/master
2023-03-17T15:23:02.935136
2021-03-16T20:01:02
2021-03-16T20:01:02
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,476
py
import fsspec import pandas as pd import scipy from cirrocumulus.embedding_aggregator import get_basis from cirrocumulus.parquet_dataset import ParquetDataset from cirrocumulus.prepare_data import PrepareData def test_prepare(test_data, measures, dimensions, continuous_obs, basis, tmp_path): output_dir = str(tmp_path) test_data = test_data[:, measures] test_data.obs = test_data.obs[dimensions + continuous_obs] prepare_data = PrepareData(adata=test_data, output=output_dir) prepare_data.execute() pq_ds = ParquetDataset() dataset = dict(id='') schema = dict(shape=test_data.shape) fs = fsspec.filesystem('file') prepared_df = pq_ds.read_dataset(file_system=fs, path=output_dir, dataset=dataset, schema=schema, keys=dict(X=measures, obs=dimensions + continuous_obs, basis=[get_basis(basis, -1, '')])) if not scipy.sparse.issparse(test_data.X): test_data.X = scipy.sparse.csr_matrix(test_data.X) df = pd.DataFrame.sparse.from_spmatrix(test_data.X, columns=measures) for f in dimensions: df[f] = test_data.obs[f].values df[f] = df[f].astype('category') for f in continuous_obs: df[f] = test_data.obs[f].values embedding_data = test_data.obsm[basis] for i in range(embedding_data.shape[1]): df["{}_{}".format(basis, i + 1)] = embedding_data[:, i] prepared_df = prepared_df[df.columns] pd.testing.assert_frame_equal(df, prepared_df, check_names=False)
[ "jgould@broadinstitute.org" ]
jgould@broadinstitute.org
ad784210df07d410b4d9d0b3795e111aa61b9193
b7453e5a2700f2017a6f783eaf3990ee2486cd65
/test/utils/test_clean_identity.py
54c6c0a2df4ef8f53c92989877f93ce940c57635
[ "Apache-2.0" ]
permissive
LaRiffle/cleaning-scripts
8525164cca8336b67a2362d6907414e27ca088fa
08f360721056d30befe8d58ded583a4a5d126184
refs/heads/master
2020-07-28T06:52:47.673033
2019-11-19T15:26:19
2019-11-19T15:26:19
209,343,798
0
0
Apache-2.0
2019-09-20T13:13:25
2019-09-18T15:33:16
Python
UTF-8
Python
false
false
233
py
from scripts import utils def test_clean_identity(): assert utils.clean_identity(None) == "" assert utils.clean_identity("NaN") == "" row_input = "Holà chicanos" assert utils.clean_identity(row_input) == row_input
[ "theo.leffyr@gmail.com" ]
theo.leffyr@gmail.com
1b5cd48ff39ee1da8dbaf2f526d75d0746e5c1e6
f1d9df04036fc43c9e5cc7998b83261f4daa94b8
/management_commands/insert_base_data.py
cf87a7c11fd7db6f4e396e72c0e9d41bce402ce1
[]
no_license
Eaterator/web
019eb6547995be30b3468e5c44ecc52f05858fb4
9c598607f76ad770c66d85c47ffcec05f92f4d66
refs/heads/master
2021-01-09T20:30:13.417308
2017-04-25T02:44:35
2017-04-25T02:44:35
81,286,177
2
0
null
null
null
null
UTF-8
Python
false
false
2,324
py
from application.auth.models import Role from application.recipe.models import Source from application.base_models import db def insert_role_data(): roles = [ { 'name': 'regular', 'type_': 'consumer', 'is_admin': False }, { 'name': 'corporate', 'type_': 'business', 'is_admin': False }, { 'name': 'admin', 'type_': 'admin', 'is_admin': True } ] if len(Role.query.all()) > 0: return for role in roles: new_role = Role(**role) db.session.add(new_role) db.session.commit() def insert_source_data(): sources = [ { 'base_url': 'foodnetwork.com', 'name': 'Food Network' }, { 'base_url': 'epicurious.com', 'name': 'Epicurious' }, { 'base_url': 'therecipedepository.com', 'name': 'The Recipe Depository', }, { 'base_url': 'allrecipes.com', 'name': 'All Recipes', }, { 'base_url': 'bonappetit.com', 'name': 'Bon Appetit' }, { 'base_url': 'food.com', 'name': 'Food' }, { 'base_url': 'simplyrecipes.com', 'name': 'Simply Recipes' }, { 'base_url': 'bbcgoodfood.com', 'name': 'BBC Good Food' }, { 'base_url': 'williams-sonoma.com', 'name': 'Williams Sonoma' }, { 'base_url': 'finedininglovers.com', 'name': 'Fine Dining Lovers' }, { 'base_url': 'thekitchn.com', 'name': 'The Kitchn' }, { 'base_url': 'chowhound.com', 'name': 'Chow' }, { 'base_url': 'myrecipes.com', 'name': 'My Recipes' }, { 'base_url': '', 'name': 'Other' } ] for source in sources: exists = Source.query.filter(Source.name == source['name']).all() if len(exists) <= 0: new_source = Source(**source) db.session.add(new_source) db.session.commit()
[ "currahl@yahoo.ca" ]
currahl@yahoo.ca
d85cf1dc2e0922928193d390f42218e83afbb210
35665123a96d6e97deb9f2e8761e0415ea9eb620
/Mean_Stddev_Calculator.py
fbfae1a14808b211dad6b083ae78c28d4bcb83d7
[]
no_license
ArvindSinghRawat/TransportModeDetection
8f7eee3608a1bca612477c3b746cd893ad404986
5cfb7a9c735b07f4d0f5b7103f3f6a429c2369a5
refs/heads/master
2020-05-20T14:51:18.116449
2019-05-08T17:04:54
2019-05-08T17:04:54
185,630,764
0
0
null
null
null
null
UTF-8
Python
false
false
729
py
import pandas as pd import numpy as np import csv def desc(path="data/Arvind 2000.csv",cname = "speed (m/s)",export=False): data = pd.read_csv(path)[cname] d = dict() data = data.fillna(0) d['Mean'] = data.mean() dq = np.percentile(data,(0,25,50,75,100)) d['Min'] = dq[0] d['1Q'] = dq[1] d['Median'] = dq[2] d['3Q'] = dq[3] d['Max'] = dq[4] d['Std dev'] = data.var() ** 0.5 d['Count'] = len(data) if export == True: t= path.split('.')[0] t= t.split('/') target = t[0]+'/details/'+t[1]+' Details.csv' with open(target, 'w') as f: for key in d.keys(): f.write("%s,%s\n"%(key,d[key])) return d
[ "noreply@github.com" ]
ArvindSinghRawat.noreply@github.com
5eec040553e54df0d88d4c9465f5455d57ba102b
c215d282844bad35d026d4bb65be37d6a46100d0
/recommender.py
77268f472da0b964fd00a03208dd585aaca85777
[]
no_license
informal-economy/backend
d0823310c5997647bada0de374d6132b8d23e724
8657e831473e03450cc38a242d8aa75951a73ee5
refs/heads/master
2021-05-17T16:22:35.107644
2020-03-29T15:05:29
2020-03-29T15:05:29
250,869,504
0
0
null
2021-03-20T03:14:24
2020-03-28T18:47:47
Python
UTF-8
Python
false
false
8,886
py
import lenskit.datasets as ds import pandas as pd import csv from lenskit.algorithms import Recommender from lenskit.algorithms.user_knn import UserUser #The function input x is the user specific input .csv file that has the columns: #item,title,genres,ratings #which is equivalent to #jobId,jobtitle,jobcategory,ratings #The output of the function is a list of so far 20 records of the best job options #[genres, title], i.e. [category, job] def recommender(x): data = ds.MovieLens('lab4-recommender-systems/') print("Successfully installed dataset.") rows_to_show = 10 # <-- Try changing this number to see more rows of data data.ratings.head(rows_to_show) # <-- Try changing "ratings" to "movies", "tags", or "links" to see the kinds of data that's stored in the other MovieLens files print(data.ratings.head(rows_to_show)) joined_data = data.ratings.join(data.movies['genres'], on='item') joined_data = joined_data.join(data.movies['title'], on='item') joined_data.head(rows_to_show) print(joined_data.head(rows_to_show)) #STEP 2.1 average_ratings = (data.ratings).groupby(['item']).mean() sorted_avg_ratings = average_ratings.sort_values(by="rating", ascending=False) joined_data = sorted_avg_ratings.join(data.movies['genres'], on='item') joined_data = joined_data.join(data.movies['title'], on='item') joined_data = joined_data[joined_data.columns[1:]] print("RECOMMENDED FOR ANYBODY:") joined_data.head(rows_to_show) print(joined_data.head(rows_to_show)) average_ratings = (data.ratings).groupby('item') \ .agg(count=('user', 'size'), rating=('rating', 'mean')) \ .reset_index() sorted_avg_ratings = average_ratings.sort_values(by="rating", ascending=False) joined_data = sorted_avg_ratings.join(data.movies['genres'], on='item') joined_data = joined_data.join(data.movies['title'], on='item') joined_data = joined_data[joined_data.columns[1:]] print("RECOMMENDED FOR ANYBODY:") joined_data.head(rows_to_show) print(joined_data.head(rows_to_show)) #Step 2.2 minimum_to_include = 1 #20<-- You can try changing this minimum to include movies rated by fewer or more people average_ratings = (data.ratings).groupby(['item']).mean() rating_counts = (data.ratings).groupby(['item']).count() average_ratings = average_ratings.loc[rating_counts['rating'] > minimum_to_include] sorted_avg_ratings = average_ratings.sort_values(by="rating", ascending=False) joined_data = sorted_avg_ratings.join(data.movies['genres'], on='item') joined_data = joined_data.join(data.movies['title'], on='item') joined_data = joined_data[joined_data.columns[3:]] print("RECOMMENDED FOR ANYBODY:") myjoined_data=joined_data.head(rows_to_show) print(joined_data.head(rows_to_show)) # myjoined_data=anybody[anybody[1:]] print("RECOMMENDED FOR ANYBODY JUST genres and title:") print(myjoined_data) #Step 2.3 average_ratings = (data.ratings).groupby(['item']).mean() rating_counts = (data.ratings).groupby(['item']).count() average_ratings = average_ratings.loc[rating_counts['rating'] > minimum_to_include] average_ratings = average_ratings.join(data.movies['genres'], on='item') average_ratings = average_ratings.loc[average_ratings['genres'].str.contains('Education')] sorted_avg_ratings = average_ratings.sort_values(by="rating", ascending=False) joined_data = sorted_avg_ratings.join(data.movies['title'], on='item') joined_data = joined_data[joined_data.columns[3:]] print("\n\nRECOMMENDED FOR AN EDUCATION SPECIALIST:") joined_data.head(rows_to_show) print(joined_data.head(rows_to_show)) #Step 2.4 average_ratings = (data.ratings).groupby(['item']).mean() rating_counts = (data.ratings).groupby(['item']).count() average_ratings = average_ratings.loc[rating_counts['rating'] > minimum_to_include] average_ratings = average_ratings.join(data.movies['genres'], on='item') average_ratings = average_ratings.loc[average_ratings['genres'].str.contains('sewing')] sorted_avg_ratings = average_ratings.sort_values(by="rating", ascending=False) joined_data = sorted_avg_ratings.join(data.movies['title'], on='item') joined_data = joined_data[joined_data.columns[3:]] print("\n\nRECOMMENDED FOR A SEWING SPECIALIST:") joined_data.head(rows_to_show) print(joined_data.head(rows_to_show)) #Step 3 Personalized Recommendation jabril_rating_dict = {} #jgb_rating_dict = {} with open(x, newline='') as csvfile: ratings_reader = csv.DictReader(csvfile) for row in ratings_reader: if ((row['ratings'] != "") and (float(row['ratings']) > 0) and (float(row['ratings']) < 6)): jabril_rating_dict.update({int(row['item']): float(row['ratings'])}) #print("Jabril Dictionary") #print(jabril_rating_dict) # with open("./lab4-recommender-systems/jgb-movie-ratings.csv", newline='') as csvfile: # ratings_reader = csv.DictReader(csvfile) # for row in ratings_reader: # if ((row['ratings'] != "") and (float(row['ratings']) > 0) and (float(row['ratings']) < 6)): # jgb_rating_dict.update({int(row['item']): float(row['ratings'])}) print("\n\nRating dictionaries assembled!") print("Sanity check:") print("\tJabril's rating for Banker is " + str(jabril_rating_dict[2])) #print("\tJohn-Green-Bot's rating for 1197 (The Princess Bride) is " + str(jgb_rating_dict[1197])) #Step 4 Train a new collaborative filtering model to provide recommendations. num_recs = 20 #<---- This is the number of recommendations to generate. You can change this if you want to see more recommendations user_user = UserUser(30, min_nbrs=2) #These two numbers set the minimum (3) and maximum (15) Niki:Now 4 number of neighbors to consider. These are considered "reasonable defaults," but you can experiment with others too algo = Recommender.adapt(user_user) print("algo") print(algo) algo.fit(data.ratings) print(algo.fit(data.ratings)) print("Set up a User-User algorithm!") #Step 4.1 Now that the system has defined clusters, we can give it our personal ratings to get the top 10 recommended movies for me and for John-Green-bot! jabril_recs = algo.recommend(-1, num_recs, ratings=pd.Series(jabril_rating_dict)) #Here, -1 tells it that it's not an existing user in the set, that we're giving new ratings, while 10 is how many recommendations it should generate print("jabril_recs") print(jabril_recs) joined_data = jabril_recs.join(data.movies['genres'], on='item') joined_data = joined_data.join(data.movies['title'], on='item') joined_data = joined_data[joined_data.columns[2:]] print("\n\nRECOMMENDED JOB FOR JABRIL:") #joined_data print(joined_data) if joined_data.empty: joined_data=myjoined_data #joined_data = joined_data[joined_data.columns[2:4]] joined_data = joined_data[['genres','title']] print("DataFrame was empty") print(joined_data) return(joined_data) #jgb_recs = algo.recommend(-1, num_recs, ratings=pd.Series(jgb_rating_dict)) #Here, -1 tells it that it's not an existing user in the set, that we're giving new ratings, while 10 is how many recommendations it should generate # #joined_data = jgb_recs.join(data.movies['genres'], on='item') #joined_data = joined_data.join(data.movies['title'], on='item') #joined_data = joined_data[joined_data.columns[2:]] #print("\n\nRECOMMENDED JOB FOR JOHN-GREEN-BOT:") #joined_data #print(joined_data) ##Step 5 Making a combined movie recommendation list. (Can be ommited??) #combined_rating_dict = {} #for k in jabril_rating_dict: # if k in jgb_rating_dict: # combined_rating_dict.update({k: float((jabril_rating_dict[k]+jgb_rating_dict[k])/2)}) # else: # combined_rating_dict.update({k:jabril_rating_dict[k]}) #for k in jgb_rating_dict: # if k not in combined_rating_dict: # combined_rating_dict.update({k:jgb_rating_dict[k]}) # #print("Combined ratings dictionary assembled!") #print("Sanity check:") #print("\tCombined rating for 1197 (The Princess Bride) is " + str(combined_rating_dict[1197])) # # ##Step 5.2 #combined_recs = algo.recommend(-1, num_recs, ratings=pd.Series(combined_rating_dict)) #Here, -1 tells it that it's not an existing user in the set, that we're giving new ratings, while 10 is how many recommendations it should generate # #joined_data = combined_recs.join(data.movies['genres'], on='item') #joined_data = joined_data.join(data.movies['title'], on='item') #joined_data = joined_data[joined_data.columns[2:]] #print("\n\nRECOMMENDED FOR JABRIL / JOHN-GREEN-BOT HYBRID:") #joined_data
[ "egetenmeyer@Nikolas-MacBook-Pro.local" ]
egetenmeyer@Nikolas-MacBook-Pro.local
74c451e67b80b2f8cba3e0eac1b09d2eedf46702
2eff698abfad7693e61bc942de619c5abe7dd270
/PyPoll.py
1683debe67677fc4bc61c39ab1d0e1a47ff942a3
[]
no_license
zanelouis/Election_Analysis
ff894d228777a465e5cd0b328538267722cc0624
4353f566826eabea5176d83051ca3dcbba6524ca
refs/heads/main
2023-01-07T20:59:15.794899
2020-11-04T00:01:20
2020-11-04T00:01:20
300,511,213
0
0
null
null
null
null
UTF-8
Python
false
false
4,043
py
# Add our dependencies. import csv import os # Assign a variable to load a file from a path. file_to_load = os.path.join("Resources", "election_results.csv") # Assign a variable to save the file to a path. file_to_save = os.path.join("Analysis", "election_analysis.txt") # Initialize a total vote counter. total_votes = 0 # Candidate options and candidate votes candidate_options = [] # 1. Declare the empty dictionary. candidate_votes = {} # Winning Candidate and Winning Count Tracker winning_candidate = "" winning_count = 0 winning_percentage = 0 # Open the election results and read the file. with open(file_to_load) as election_data: file_reader = csv.reader(election_data) # Read the header row. headers = next(file_reader) # Print each row in the CSV file. for row in file_reader: # Add to the total vote count. total_votes += 1 # Print the candidate name from each row. candidate_name = row[2] if candidate_name not in candidate_options: # Add the candidate name to the candidate list. candidate_options.append(candidate_name) # Begin tracking that candidate's vote count. candidate_votes[candidate_name] = 0 # Add a vote to that candidate's count. candidate_votes[candidate_name] += 1 with open(file_to_save, "w") as txt_file: # Print the final vote count to the terminal. election_results = ( f"\nElection Results\n" f"-------------------------\n" f"Total Votes: {total_votes:,}\n" f"-------------------------\n") print(election_results, end="") # Save the final vote count to the text file. txt_file.write(election_results) # Determine the percentage of votes for each candidate by looping through the counts. # Iterate through the candidate list. for candidate_name in candidate_votes: # Retrieve vote count of a candidate. votes = candidate_votes[candidate_name] # Calculate the percentage of votes. vote_percentage = float(votes) / float(total_votes) * 100 candidate_results = ( f"{candidate_name}: {vote_percentage:.1f}% ({votes:,})\n") # To do: print out each candidate's name, vote count, and percentage of # votes to the terminal. #print(f"{candidate_name}: {vote_percentage:.1f}% ({votes:,})\n") # Print each candidate, their voter count, and percentage to the terminal. print(candidate_results) # Save the candidate results to our text file. txt_file.write(candidate_results) # Determine winning vote count and candidate # Determine if the votes is greater than the winning count. if (votes > winning_count) and (vote_percentage > winning_percentage): # If true then set winning_count = votes and winning_percent = # vote_percentage. winning_count = votes winning_percentage = vote_percentage # And, set the winning_candidate equal to the candidate's name. winning_candidate = candidate_name # Print the winning candidate's results to the terminal. winning_candidate_summary = ( f"-------------------------\n" f"Winner: {winning_candidate}\n" f"Winning Vote Count: {winning_count:,}\n" f"Winning Percentage: {winning_percentage:.1f}%\n" f"-------------------------\n") print(winning_candidate_summary) # Save the winning candidate's results to the text file. txt_file.write(winning_candidate_summary)
[ "noreply@github.com" ]
zanelouis.noreply@github.com
740f614b0665f2538f1e0cc1c16f2877b2961e47
e3393d4d4bdf6684bfba47817e60f96c69ec9475
/utils.py
e5e14bd563560eb049fb0d71e1e6075e2ae90dba
[]
no_license
INFO3401/problem-set-three-brru7260
402f9f38e15c8e0ca115c9be338ee48597ae89d8
e80c26e2ee33de14439c664717674a91c686e40a
refs/heads/master
2021-01-09T04:01:48.068480
2020-02-24T04:09:39
2020-02-24T04:09:39
242,239,274
0
0
null
null
null
null
UTF-8
Python
false
false
683
py
# from utils import * # import pandas as pd import pandas as pd def loadAndCleanData(): item = pd.read_csv("creditData.csv") data = item.fillna(0) print(data) loadAndCleanData() # def computerProbability(feature,bin,data): # count = 0.0 # # count the number of datapoints in the bin # for datapoint in data.iterrows(): # # see if the data is in the right bin # if datapoint[feature] >= bin[0] and datapoint[feature] < bin[1]: # count += 1 # return (probability) # # count the total number of datapoints # totalData = len(data) # # divide the number of people in the bin by the total numner of people # probability = count / totalData # # return the result
[ "noreply@github.com" ]
INFO3401.noreply@github.com
d74da5f980c51f8a87e1f3491b38cb906651ba91
995c52ad5a0a3039ad37a4d2f07b06dcbbcf3961
/tantalus/migrations/0059_auto_20180810_1837.py
f4ba3f19bfd13e80fa47e558107374b522b8b533
[]
no_license
nafabrar/tantalus
d02cce3923205191f00b30e80152a0be7c091d6a
d8552d40472c29bc617b45a1edaf87c6624b824d
refs/heads/master
2022-12-24T15:53:52.034999
2020-10-07T22:26:35
2020-10-07T22:26:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
945
py
# -*- coding: utf-8 -*- # Generated by Django 1.11.14 on 2018-08-10 18:37 from __future__ import unicode_literals from django.db import migrations def populate_sequence_file_info(apps, schema_editor): FileResource = apps.get_model('tantalus', 'FileResource') SequenceFileInfo = apps.get_model('tantalus', 'SequenceFileInfo') for file_resource in FileResource.objects.all(): sequence_file_info = SequenceFileInfo( file_resource=file_resource, owner=file_resource.owner, read_end=file_resource.read_end, genome_region=file_resource.genome_region, index_sequence=file_resource.index_sequence, ) sequence_file_info.save() class Migration(migrations.Migration): dependencies = [ ('tantalus', '0058_historicalsequencefileinfo_sequencefileinfo'), ] operations = [ migrations.RunPython(populate_sequence_file_info) ]
[ "andrew.mcpherson@gmail.com" ]
andrew.mcpherson@gmail.com
fe469f3699ada32088a48d4a15051399450c03c8
e211657b291dbcb21ed6c587275ff7168e8413f3
/models/sys_trans_sp_multi.py
7031132db7a25e66d621263bc6d8f743484a1c9a
[]
no_license
jehovahxu/chan
b64b84a96983dc956e746217fb51451d55ea6d1d
572060b45f4a79e09e796ba671851de693817fc2
refs/heads/master
2020-12-02T05:31:20.537309
2020-03-14T15:34:57
2020-03-14T15:34:57
230,905,070
4
0
null
null
null
null
UTF-8
Python
false
false
14,192
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # @Time : 18-12-26 下午3:13 # @Author : Jehovah # @File : systhesis.py # @Software: PyCharm import torch import torch.nn as nn class Sys_Generator(nn.Module): def __init__(self, input_nc, output_nc, ngf=64): super(Sys_Generator, self).__init__() self.en_1 = nn.Sequential( nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True) ) self.en_2 = nn.Sequential( nn.Conv2d(ngf, ngf*2, kernel_size=3, stride=1, padding=1), nn.BatchNorm2d(ngf * 2), nn.LeakyReLU(0.2, True) ) self.resblock = nn.Sequential( ResidualBlock(ngf * 2, ngf * 2), ResidualBlock(ngf * 2, ngf * 2), ResidualBlock(ngf * 2, ngf * 2) ) self.resblock_2 = nn.Sequential( ResidualBlock(ngf * 2, ngf * 2), ResidualBlock(ngf * 2, ngf * 2), ResidualBlock(ngf * 2, ngf * 2) ) self.resblock_1 = nn.Sequential( ResidualBlock(ngf * 2, ngf * 2), ResidualBlock(ngf * 2, ngf * 2), ResidualBlock(ngf * 2, ngf * 2), ResidualBlock(ngf * 2, ngf * 2), ResidualBlock(ngf * 2, ngf * 2), ResidualBlock(ngf * 2, ngf * 2) ) self.resblock1 = ResidualBlock(in_channels=512, out_channels=512) self.resblock2 = ResidualBlock(in_channels=512, out_channels=512) self.resblock3 = ResidualBlock(in_channels=512, out_channels=512) self.en1 = nn.Sequential( nn.Conv2d(input_nc, ngf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True) ) self.en2 = nn.Sequential( nn.Conv2d(ngf, ngf*2, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(ngf*2), nn.LeakyReLU(0.2, True) ) self.en3 = nn.Sequential( nn.Conv2d(ngf*2, ngf * 4, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(ngf * 4), nn.LeakyReLU(0.2, True) ) self.en4 = nn.Sequential( nn.Conv2d(ngf * 4, ngf * 8, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(ngf * 8), nn.LeakyReLU(0.2, True) ) self.en5 = nn.Sequential( nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(ngf * 8), nn.LeakyReLU(0.2, True) ) self.en6 = nn.Sequential( nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(ngf * 8), nn.LeakyReLU(0.2, True) ) self.en7 = nn.Sequential( nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(ngf * 8), nn.LeakyReLU(0.2, True) ) self.en8 = nn.Sequential( nn.Conv2d(ngf * 8, ngf * 8, kernel_size=4, stride=2, padding=1), nn.ReLU(True) ) self.de1 = nn.Sequential( nn.ConvTranspose2d(ngf * 8,ngf * 8, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(ngf * 8), nn.ReLU(True) ) self.de2 = nn.Sequential( nn.ConvTranspose2d(ngf * 8 * 2, ngf * 8, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(ngf * 8), nn.Dropout(0.5), nn.ReLU(True) ) self.de3 = nn.Sequential( nn.ConvTranspose2d(ngf * 8 * 2, ngf * 8, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(ngf * 8), nn.Dropout(0.5), nn.ReLU(True) ) self.de4 = nn.Sequential( nn.ConvTranspose2d(ngf * 8 * 2, ngf * 8, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(ngf * 8), nn.Dropout(0.5), nn.ReLU(True) ) self.de5 = nn.Sequential( nn.ConvTranspose2d(ngf * 8 * 2, ngf * 4, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(ngf * 4), nn.ReLU(True) ) self.de6 = nn.Sequential( nn.ConvTranspose2d(ngf * 8, ngf * 2, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(ngf * 2), nn.ReLU(True) ) self.de7 = nn.Sequential( nn.ConvTranspose2d(ngf * 4, ngf, kernel_size=4, stride=2, padding=1), nn.BatchNorm2d(ngf), nn.ReLU(True) ) self.de8 = nn.Sequential( nn.ConvTranspose2d(ngf * 2, output_nc, kernel_size=4, stride=2, padding=1), nn.Tanh() ) self.de8_1 = nn.Sequential( nn.ConvTranspose2d(ngf * 2, output_nc, kernel_size=4, stride=2, padding=1), nn.Tanh() ) # self.ta2 = Trans_Attn(ngf * 8) # self.ta3 = Trans_Attn(ngf * 8) # self.ta4 = Trans_Attn(ngf * 8) # self.ta5 = Trans_Attn(ngf * 4) self.ta6 = Trans_Attn(ngf * 2) self.sp = Spacial_Attn(ngf * 2) def forward(self, x): out_en1 = self.en1(x) out_en2 = self.en2(out_en1) out_en3 = self.en3(out_en2) out_en4 = self.en4(out_en3) out_en5 = self.en5(out_en4) out_en6 = self.en6(out_en5) out_en7 = self.en7(out_en6) out_en8 = self.en8(out_en7) out_en8 = self.resblock1(out_en8,is_bn=False) out_en8 = self.resblock2(out_en8,is_bn=False) out_en8 = self.resblock3(out_en8,is_bn=False) #decoder out_de1 = self.de1(out_en8) out_de1 = torch.cat((out_de1, out_en7), 1) out_de2 = self.de2(out_de1) # out_de2 = self.ta2(out_en6, out_de2) out_de2 = torch.cat((out_de2, out_en6), 1) out_de3 = self.de3(out_de2) # out_de3 = self.ta3(out_en5, out_de3) out_de3 = torch.cat((out_de3, out_en5), 1) out_de4 = self.de4(out_de3) # out_de4 = self.ta4(out_en4, out_de4) out_de4 = torch.cat((out_de4, out_en4), 1) out_de5 = self.de5(out_de4) # out_de5 = self.ta5(out_en3, out_de5) out_de5 = torch.cat((out_de5, out_en3), 1) out_de6 = self.de6(out_de5) out_de6 = self.ta6(out_en2, out_de6) out_de6 = torch.cat((out_de6, out_en2), 1) out_de7 = self.de7(out_de6) out_de7 = torch.cat((out_de7, out_en1), 1) # out_de8 = self.de8(out_de7) # out_2 = self.de8_1(out_de7) out_1 = self.en_1(x) out_1 = self.en_2(out_1) out_1 = self.resblock_2(out_1) out_1, out_de7 = self.sp(out_1, out_de7) out_1 = out_1+out_de7 out_1 = self.resblock(out_1) out_1 = self.resblock_1(out_1) out_de8 = self.de8(out_1) return out_de8 def conv3x3(in_channels, out_channels, stride=1): return nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False) class ResidualBlock(nn.Module): def __init__(self, in_channels, out_channels, stride=1): super(ResidualBlock, self).__init__() self.conv1 = conv3x3(in_channels, out_channels, stride) self.bn1 = nn.BatchNorm2d(out_channels) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(out_channels, out_channels) self.bn2 = nn.BatchNorm2d(out_channels) def forward(self, x, is_bn=True): residual = x out = self.conv1(x) if is_bn: out = self.bn1(out) out = self.relu(out) out = self.conv2(out) if is_bn: out = self.bn2(out) out += residual out = self.relu(out) return out class Discriminator(nn.Module): def __init__(self, input_nc, output_nc, ndf=64): super(Discriminator, self).__init__() self.cov1 = nn.Sequential( nn.Conv2d(input_nc + output_nc, ndf, kernel_size=4, stride=2, padding=1), nn.LeakyReLU(0.2, True) ) self.cov2 = nn.Sequential( nn.Conv2d(ndf, ndf*2, kernel_size=4, stride=2, padding=1), nn.InstanceNorm2d(ndf * 2), nn.LeakyReLU(0.2, True) ) self.cov3 = nn.Sequential( nn.Conv2d(ndf*2, ndf * 4, kernel_size=4, stride=2, padding=1), nn.InstanceNorm2d(ndf * 4), nn.LeakyReLU(0.2, True) ) self.cov4 = nn.Sequential( nn.Conv2d(ndf * 4, ndf * 8, kernel_size=4, stride=2, padding=1), nn.InstanceNorm2d(ndf * 8), nn.LeakyReLU(0.2, True) ) self.cov5 = nn.Sequential( nn.Conv2d(ndf*8, ndf * 8, kernel_size=3, stride=2, padding=1), nn.InstanceNorm2d(ndf * 8), nn.LeakyReLU(0.2, True) ) self.cov5_1 = nn.Sequential( nn.AvgPool2d(kernel_size=2, stride=2), nn.InstanceNorm2d(ndf * 8), ) self.cov5_2 = nn.Sequential( nn.AvgPool2d(kernel_size=4, stride=4), nn.InstanceNorm2d(ndf * 4), ) self.cov5_3 = nn.Sequential( nn.AvgPool2d(kernel_size=8, stride=8), nn.InstanceNorm2d(ndf * 2), ) self.cls = nn.Sequential( nn.Conv2d(1408, 1, kernel_size=4, stride=1, padding=1), nn.Sigmoid() ) def forward(self, x): out_cov1 = self.cov1(x) out_cov2 = self.cov2(out_cov1) out_cov3 = self.cov3(out_cov2) out_cov4 = self.cov4(out_cov3) out_1 = self.cov5(out_cov4) out_2 = self.cov5_1(out_cov4) out_3 = self.cov5_2(out_cov3) out_4 = self.cov5_3(out_cov2) out = torch.cat((out_1, out_2, out_3, out_4), 1) out = self.cls(out) return out class Spacial_Attn(nn.Module): def __init__(self, in_dim): super(Spacial_Attn, self).__init__() self.chanel_in = in_dim self.conv = nn.Sequential( nn.Conv2d(in_dim*2, 1, kernel_size=3, stride=1, padding=1), nn.Sigmoid() ) def forward(self, x, y): xy = torch.cat((x, y), 1) out = self.conv(xy) y = y * out x = x*(1 - out) return x, y class Self_Attn(nn.Module): """ Self attention Layer""" def __init__(self, in_dim): super(Self_Attn, self).__init__() self.chanel_in = in_dim self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) self.gamma = nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1) # def forward(self, x): """ inputs : x : input feature maps( B X C X W X H) returns : out : self attention value + input feature attention: B X N X N (N is Width*Height) """ m_batchsize, C, width, height = x.size() proj_query = self.query_conv(x).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X CX(N) proj_key = self.key_conv(x).view(m_batchsize, -1, width * height) # B X C x (*W*H) energy = torch.bmm(proj_query, proj_key) # transpose check attention = self.softmax(energy) # BX (N) X (N) proj_value = self.value_conv(x).view(m_batchsize, -1, width * height) # B X C X N out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(m_batchsize, C, width, height) out = self.gamma * out + x return out class Trans_Attn(nn.Module): """ Self attention Layer""" def __init__(self, in_dim): super(Trans_Attn, self).__init__() self.sa1 = Self_Attn(in_dim) self.conv1 = nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=1, padding=1) self.sa2 = Self_Attn(in_dim) self.conv2 = nn.Conv2d(in_dim, in_dim, kernel_size=3, stride=1, padding=1) self.chanel_in = in_dim self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1) self.key_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1) self.value_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1) self.gamma = nn.Parameter(torch.zeros(1)) self.softmax = nn.Softmax(dim=-1) # self.in1 = nn.InstanceNorm2d(in_dim) self.in2 = nn.InstanceNorm2d(in_dim) self.in3 = nn.InstanceNorm2d(in_dim) self.in4 = nn.InstanceNorm2d(in_dim) self.in5 = nn.InstanceNorm2d(in_dim) self.in6 = nn.InstanceNorm2d(in_dim) def forward(self, x, y): """ inputs : x : input feature maps( B X C X W X H) returns : out : self attention value + input feature attention: B X N X N (N is Width*Height) """ out_1 = self.sa1(x) out_1 = self.in1(out_1) out_2 = self.conv1(out_1) out_2 = self.in2(out_1+out_2) out_3 = self.sa2(y) out_3 = self.in3(out_3) m_batchsize, C, width, height = out_2.size() proj_query = self.query_conv(out_2).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X CX(N) proj_key = self.key_conv(out_2).view(m_batchsize, -1, width * height) # B X C x (*W*H) energy = torch.bmm(proj_query, proj_key) # transpose check attention = self.softmax(energy) # BX (N) X (N) proj_value = self.value_conv(out_3).view(m_batchsize, -1, width * height) # B X C X N out = torch.bmm(proj_value, attention.permute(0, 2, 1)) out = out.view(m_batchsize, C, width, height) out = self.gamma * out + y # 归一化 out out = self.in4(out) # out = torch.nn.functional.normalize(out.view(m_batchsize,C, -1), 1).resize(m_batchsize, C, width, height) out = self.in5(out+out_3) out_4 = self.conv2(out) out = self.in6(out+out_4) return out
[ "361857031@qq.com" ]
361857031@qq.com
4148112130c6689e5dadc5cb5afc4ff302c3485c
e4d9141385ace7f178752469aa3f299cc3ffc6a6
/docs/source/conf.py
52d04fd1d5650935005a8bf7ac4e548a117dc1ed
[ "CC-BY-4.0", "MIT" ]
permissive
AJAlabs/kb
44924806fd2d7059fa89377cd650859fd934166d
9d4f13a53ae08616dae6d5560113b0a27881387b
refs/heads/master
2020-04-18T18:51:24.751102
2019-01-26T22:14:56
2019-01-26T22:14:56
167,696,685
0
0
null
null
null
null
UTF-8
Python
false
false
5,440
py
# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = 'AJAlabs Knowledge Base' copyright = '2019, AJ Acevedo' author = 'AJ Acevedo' # The short X.Y version version = '' # The full version, including alpha/beta/rc tags release = '0.0.1' # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.md' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] # The name of the Pygments (syntax highlighting) style to use. pygments_style = None # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'sphinx_rtd_theme' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'AJAlabsKnowledgeBasedoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'AJAlabsKnowledgeBase.tex', 'AJAlabs Knowledge Base Documentation', 'AJ Acevedo', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'ajalabsknowledgebase', 'AJAlabs Knowledge Base Documentation', [author], 1) ] # -- Options for Texinfo output ---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ (master_doc, 'AJAlabsKnowledgeBase', 'AJAlabs Knowledge Base Documentation', author, 'AJAlabsKnowledgeBase', 'One line description of project.', 'Miscellaneous'), ] # -- Options for Epub output ------------------------------------------------- # Bibliographic Dublin Core info. epub_title = project # The unique identifier of the text. This can be a ISBN number # or the project homepage. # # epub_identifier = '' # A unique identification for the text. # # epub_uid = '' # A list of files that should not be packed into the epub file. epub_exclude_files = ['search.html'] # -- Options for both Markdown and reStructuredText support ------------------ from recommonmark.parser import CommonMarkParser source_parsers = { '.md': CommonMarkParser, } source_suffix = ['.rst', '.md']
[ "aj@ajalabs.com" ]
aj@ajalabs.com
1b345f16fe41c4e6c2b77186c46547b36c175f00
72d344286f15d794f08c6964bf3a310fe86c2f67
/TP05/TP01.py
2907b88d09c272e37dd6f11d4993647eaade0128
[]
no_license
boua615/Tps01
c240330967cf4aa79491d75bbb4c82742cd88cae
8c9c06312c1b2b949fb86518758a0c0940d3bcc6
refs/heads/master
2021-02-12T07:03:56.862122
2020-03-05T03:05:12
2020-03-05T03:05:12
244,570,977
0
0
null
null
null
null
UTF-8
Python
false
false
2,322
py
#!/usr/bin/env python #- * -coding: utf - 8 - * -##TP01.py## DATA_FILE = 'annuaire-v0.2.xml' import xml.dom.minidom as minidom import sys def main(): try: xmldoc=minidom.parse(DATA_FILE) except: print("Can't Open the file") sys.exit() print(xmldoc.toxml()) treat_doc(xmldoc) display_tel(xmldoc) display_tel_personne(xmldoc) add_id_personne(xmldoc) return 0 def treat_doc(xmldoc): annuaire= xmldoc.getElementsByTagName('annuaire')[0] print(annuaire) cpt = 0 for personne in annuaire.childNodes: print("-" * 40) print("Personne n°", cpt) print(personne.toxml()) cpt += 1 def display_tel(xmldoc): telephones = xmldoc.getElementsByTagName('telephone') print (telephones) cpt = 0 for tel in telephones: print ("-"*40) print ("Tel n°", cpt) print (tel.toxml()) print ("N°:",tel.firstChild.data) print ("Type:",tel.getAttribute("type")) cpt += 1 def display_tel_personne(xmldoc): personnes = xmldoc.getElementsByTagName('personne') print (personnes) cpt = 0 for personne in personnes: print ("-"*40) print ("Personne n°", cpt) nom = personne.getElementsByTagName('nom')[0] prenom = personne.getElementsByTagName('prenom')[0] tels = personne.getElementsByTagName('telephone') print ("*"*20) print ("Nom:\t",nom.firstChild.data) print ("Prénom:\t",prenom.firstChild.data) for tel in tels: print ("-"*20) print ("N°:",tel.firstChild.data) print ("Type:",tel.getAttribute("type")) cpt += 1 def add_id_personne(xmldoc): personnes = xmldoc.getElementsByTagName('personne') print(personnes) cpt = 0 for personne in personnes: print ("-"*40) print ("Personne n°", cpt, personne.nodeValue, personne.nodeType) personne.setAttribute('id', str(cpt)) cpt += 1 print (personne.toxml()) if __name__ == '__main__': main()
[ "noreply@github.com" ]
boua615.noreply@github.com
9b9a14f2985d9dd1d7bc6ef666b5d40a2a9a5256
a7e0784b697b6c57920e16e2f54ea0ed2225c0e0
/data/clingen_raw_to_training.py
47d0357cb8921e5915cdc80d02e9879fcf3e88c3
[]
no_license
rumeysa77/ClinGenML
17e1a3786b8711387a61707252307aab13e682c5
c3bf6fbf7d0fe6c1311ce0fcfb4e26d8331bbc7d
refs/heads/master
2023-03-22T04:41:40.669592
2021-02-24T09:04:29
2021-02-24T09:04:29
null
0
0
null
null
null
null
UTF-8
Python
false
false
5,815
py
""" This file processes the raw excel sheet and extract data """ import time import csv from collections import defaultdict from Bio import Entrez from pathlib import Path import unicodedata def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically contorl characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False # clean text does not tokenize anything! def clean_text(text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xfffd or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) def reduce_whitespace(text): return ' '.join(text.split()) major_5_panels = {'experimental-studies', 'allele-data', 'segregation-data', 'specificity-of-phenotype', 'case-control'} label_vocab = ['experimental-studies', 'allele-data', 'segregation-data', 'specificity-of-phenotype', 'case-control'] class DatasetExtractor(object): def __init__(self, path=None): self.major_5_pmid_to_panel = defaultdict(set) header = None if path is not None: with open(path, encoding='utf-8', errors='ignore') as f: reader = csv.reader(f) for i, line in enumerate(reader): if i == 0: header = line[:-2] elif line[4] != '': # ClinVar ID cannot be null if line[1] in major_5_panels: self.major_5_pmid_to_panel[line[2]].add(line[1]) def fetch_title_abstract_keywords(self, one_id): ids = one_id Entrez.email = 'leo.niecn@gmail.com' handle = Entrez.efetch(db='pubmed', retmode='xml', id=ids) results = Entrez.read(handle) # retrieving for only 1 result for i, paper in enumerate(results['PubmedArticle']): abstract = [] if 'Abstract' in paper['MedlineCitation']['Article']: for section in paper['MedlineCitation']['Article']['Abstract']['AbstractText']: abstract.append(section) else: continue abstract = " ".join(abstract) title = paper['MedlineCitation']['Article']['ArticleTitle'] keywords = [] for elem in paper['MedlineCitation']['KeywordList']: for e in elem: keywords.append(e) keywords = ' '.join(keywords) return title, abstract, keywords return None def merge_text(self, title, abstract, keywords, entrez=False): # a standard function to map text = '' if not entrez: text = title + " || " + " ".join(keywords.split('/')) + " || " + reduce_whitespace(clean_text(abstract)) else: text = title + " || " + keywords + " || " + reduce_whitespace(clean_text(abstract)) return text def generate_pmid_panel_set(self, log=False, tqdm=False, notebook=False): # will call Entrez BioPython to grab abstracts data = [] pmid_to_data = {} start = time.time() cnt = 0 for k, v in self.major_5_pmid_to_panel.items(): cnt += 1 res = self.fetch_title_abstract_keywords(k) if res is None: continue # 24940364 is not found... text = self.merge_text(*res) # label = ['0'] * len(label_vocab) label = [] for v_i in v: label.append(str(label_vocab.index(v_i))) data.append('\t'.join([text, ' '.join(label)])) pmid_to_data[k] = '\t'.join([text, ' '.join(label)]) if log: if cnt % 100 == 0: print(cnt, time.time() - start, 'secs') return data, pmid_to_data def write_data_to_csv(self, data, csv_file_path): # expect `data` directly from `generate_pmid_panel_set` with open(csv_file_path, encoding='utf-8', errors='ignore', mode='w') as f: for line in data: f.write(line + '\n') def write_pmid_to_list(self, path): # it will directly save as "pmids.txt", which is what PubMunch expects # call this function to generate a list of pmid # so you can use PubMunch to download p = Path(path) p.mkdir(exist_ok=True) with open('{}/pmids.txt'.format(path), 'w') as f: for pmid in self.major_5_pmid_to_panel.keys(): f.write(pmid + '\n') def __sub__(self, other): assert type(other) == type(self) new_pmids = set(list(self.major_5_pmid_to_panel.keys())) - set(list(other.major_5_pmid_to_panel)) de = DatasetExtractor() for pmid in new_pmids: panel = self.major_5_pmid_to_panel[pmid] de.major_5_pmid_to_panel[pmid] = panel return de if __name__ == '__main__': # testing de = DatasetExtractor("../corpus/ML Data (as of 3_17_19).csv") print(de.merge_text(*de.fetch_title_abstract_keywords("10206684")))
[ "leo.niecn@gmail.com" ]
leo.niecn@gmail.com
d834840becfb2b4385634a3d1ae576a2f68a0bac
aa57a888c83252f3e57b5d8f7e61f7c1fe807156
/lfd_hw7/HW7_testing_q8.py
e85c15091ca0dd5d455d06290b3c39eab03cf8f0
[ "MIT" ]
permissive
mosmar99/machine-learning-mooc-caltech
178a88a237078347eba2c76bbd05c2b7b9b6726c
deca978e13f6d6950f06417c4d520e71904962d7
refs/heads/main
2023-07-13T20:36:43.217778
2021-09-04T08:28:07
2021-09-04T08:28:07
null
0
0
null
null
null
null
UTF-8
Python
false
false
299
py
# -*- coding: utf-8 -*- """ Created on: Wed Jul 7 15:30:17 2021 @author: Mahmut Osmanovic """ import matplotlib.pyplot as plt S = 1 P = 6 S_B_P = 0 its = 5 while its: print(S, P) S_B_P += (S > P) S += 1 P -= 1 its -= 1 print("ANS =", S_B_P) plt.plot([-1, 1], [-1,1])
[ "47375043+MahmutOsmanovic@users.noreply.github.com" ]
47375043+MahmutOsmanovic@users.noreply.github.com
5c903175d4a7365e542f92cbcead2a25f9846e4c
128420970c272be8d3b374dbfc9687ab0824bc2b
/blog/migrations/0002_comment.py
57087759a8a568a2f6018d073aa5fc2e52b43604
[]
no_license
ZsZJ/django-blog
2330a8aeb97d005cea592ab3e8d37b0b47575db8
8862936dbc744e19166b8f7d26ccf725f6d70a60
refs/heads/master
2020-05-14T20:00:07.958381
2019-04-17T23:05:42
2019-04-17T23:05:42
181,938,526
0
0
null
null
null
null
UTF-8
Python
false
false
908
py
# Generated by Django 2.0.13 on 2019-04-17 22:39 from django.db import migrations, models import django.db.models.deletion import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('blog', '0001_initial'), ] operations = [ migrations.CreateModel( name='Comment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('author', models.CharField(max_length=200)), ('text', models.TextField()), ('created_date', models.DateTimeField(default=django.utils.timezone.now)), ('approved_comment', models.BooleanField(default=False)), ('post', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments', to='blog.Post')), ], ), ]
[ "ktj.lim@gmail.com" ]
ktj.lim@gmail.com
cdb47a6903ec47437a630d38f1c3ff0a42120182
29061a9fe7701581facbd7eb5066a05c6e3e8878
/fptl_types/__init__.py
db20bc1028a22b8848897fb7bc16f46e01733ea9
[]
no_license
ivan-bocharov/fptl-inferer
4f629da3b4276c2cc20beb4b656a575fcd8887bc
3de88dc2945db6cb3e49eb08bf1c57f1a9a838c4
refs/heads/master
2021-05-26T21:14:40.926757
2013-08-13T20:01:29
2013-08-13T20:01:29
null
0
0
null
null
null
null
UTF-8
Python
false
false
29
py
__author__ = 'ivan-bocharov'
[ "bocharovia@gmail.com" ]
bocharovia@gmail.com
5424dc7cc6bf622625e5e0f9736e273cc18d0a4a
323c59f60860a2dddbecf4c45974bcc80210ae78
/blog/migrations/0002_post_published_date.py
300367b7885f65d66c47ebdbc57833f143e1e821
[]
no_license
rymlassoued/my-first-blog
5f6157fd422fb09339fe4cb7ac0f23d37d711118
1f6022df5c7961b88f8640d2b6359510cb77a2d8
refs/heads/master
2021-01-19T00:30:02.990413
2017-04-04T17:32:46
2017-04-04T17:32:46
87,175,630
0
0
null
null
null
null
UTF-8
Python
false
false
453
py
# -*- coding: utf-8 -*- # Generated by Django 1.10.6 on 2017-04-04 10:27 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('blog', '0001_initial'), ] operations = [ migrations.AddField( model_name='post', name='published_date', field=models.DateTimeField(blank=True, null=True), ), ]
[ "rlaswed@softcatalyst.com" ]
rlaswed@softcatalyst.com
f34593fe377603414090a24989aab7c88d64fa51
f9a579efe76ac5436e767489be6d8143da3c3404
/src/apps/helper/middleware.py
5ce48db802e3d4bacbf60bbfc817bebe7a76d41f
[]
no_license
15879246396/youpai-service
2c4db15f8a50de4b6b2ff4204898c3e5217c0258
fd91b68f35664e7e853c1fd2cd55919a97a87fa2
refs/heads/master
2023-01-10T11:30:50.918568
2019-10-25T07:52:50
2019-10-25T07:52:50
205,700,793
0
0
null
2022-12-27T15:35:27
2019-09-01T16:08:44
Python
UTF-8
Python
false
false
7,401
py
import uuid import sys import datetime import json from django.core.cache import cache from django.utils import timezone from django.conf import settings import requests from helper.conf import helper_settings from helper.log import get_logger from common.mixin import MiddlewareMixin logger = get_logger(__name__) class LoggingMiddleware(MiddlewareMixin): LOGGER = get_logger('actions') RESPONSE_LOG = '%s%s%s' % ( '{user_id}|{ip}|{bid}|{sid}|{kjzd_user_id}', '"{request_method} {request_url}{query_string} {protocol} {status_code} {content_type} {referrer}"', '|{ua}' ) def __init__(self, get_response=None): super(LoggingMiddleware, self).__init__(get_response) self.SGUID_EXPIRIES = 365 * 1 self.RESPONSE_LOG_FORMAT = self.RESPONSE_LOG.format self.SGBID_EXPIRIES = 365 * 1 self.SGSID_EXPIRIES = None self.SGUUID_EXPIRIES = None @staticmethod def save_session(request): if request.user.is_anonymous and request.session.session_key is None: request.session.save() # to identify a session @staticmethod def set_sid(request, response): if request.session.get(helper_settings.logging_session_sid_name, None) is None: request.session[helper_settings.logging_session_sid_name] = uuid.uuid4().hex return request.session[helper_settings.logging_session_sid_name] # to identify a browser @staticmethod def set_bid(request, response): bid = request.COOKIES.get(helper_settings.logging_cookie_bid_name, None) response.set_cookie( helper_settings.logging_cookie_bid_name, domain=helper_settings.logging_cookie_domain, value=bid if bid is not None else uuid.uuid4().hex, expires=timezone.datetime.now() + timezone.timedelta(days=365) ) return bid @staticmethod def set_kjzd_user_id(request, response): if hasattr(request, 'user'): if not request.user.is_anonymous: kjzd_user_id = request.user.kjzd_user_id if kjzd_user_id: response.set_cookie( helper_settings.logging_cookie_kjzd_user_id_name, domain=helper_settings.logging_cookie_domain, value=kjzd_user_id, expires=None) return kjzd_user_id else: response.delete_cookie( helper_settings.logging_cookie_kjzd_user_id_name, domain=helper_settings.logging_cookie_domain) return '' @staticmethod def _get_traceback(exc_info=None): """Helper function to return the traceback as a string""" import traceback return '\n'.join(traceback.format_exception(*(exc_info or sys.exc_info()))) def process_response(self, request, response): if request.session.get('SGUID', None) is None: request.session['SGUID'] = str(uuid.uuid1()) SGUID = request.session['SGUID'] response.set_cookie( 'SGUID', value=SGUID, expires=self.SGUID_EXPIRIES if not self.SGUID_EXPIRIES else datetime.datetime.now() + datetime.timedelta( days=self.SGUID_EXPIRIES) ) SGBID = request.COOKIES.get('SGBID', None) SGBID = SGBID if SGBID and len(SGBID) == 32 else uuid.uuid1().hex response.set_cookie( 'SGBID', value=SGBID, expires=self.SGBID_EXPIRIES if not self.SGBID_EXPIRIES else datetime.datetime.now() + datetime.timedelta( days=self.SGBID_EXPIRIES) ) if hasattr(request, 'user'): if not request.user.is_anonymous: SGUUID = request.user.kjzd_user_id if SGUUID: response.set_cookie( 'SGUUID', value=SGUUID, expires=self.SGUUID_EXPIRIES if not self.SGUUID_EXPIRIES else datetime.datetime.now() + datetime.timedelta( days=self.SGUUID_EXPIRIES) ) else: response.delete_cookie('SGUUID') SGSID = request.COOKIES.get('SGSID', None) if not SGSID: SGSID = uuid.uuid4().hex response.set_cookie( 'SGSID', value=SGSID, expires=self.SGSID_EXPIRIES if not self.SGSID_EXPIRIES else datetime.datetime.now() + datetime.timedelta( days=self.SGSID_EXPIRIES )) user_id = request.session.get('_auth_user_id', '') response.set_cookie( 'user_id', value=user_id, expires=self.SGUUID_EXPIRIES if not self.SGUUID_EXPIRIES else datetime.datetime.now() + datetime.timedelta( days=self.SGUUID_EXPIRIES) ) sid = self.set_sid(request, response) bid = self.set_bid(request, response) kjzd_user_id = self.set_kjzd_user_id(request, response) query_string = request.META.get('QUERY_STRING', None) log_text = self.RESPONSE_LOG_FORMAT( user_id=request.session.get('_auth_user_id', ''), ip=request.META.get('REMOTE_ADDR', ''), request_method=request.method, request_url=request.path, protocol=request.META.get('SERVER_PROTOCOL', ''), status_code=response.status_code, referrer=request.META.get('HTTP_REFERER', ''), ua=request.META.get('HTTP_USER_AGENT', ''), query_string='' if not query_string else ''.join(('?', query_string)), content_type=response.get('content-type', ''), sid=sid, bid=bid, kjzd_user_id=kjzd_user_id, ) self.LOGGER.info(log_text) return response def process_exception(self, request, exception): if isinstance(exception, helper_settings.logging_except_exceptions): return try: request_repr = repr(request) except Exception as e: logger.warning(e) request_repr = "Request repr() unavailable" message = "{{{\n%s\n}}}\n\n{{{\n%s\n}}}" % (self._get_traceback(sys.exc_info()), request_repr) logger.exception(message) class ClientAuthenticationMiddleware(object): def process_request(self, request): access_token = cache.get("CLIENT_SELLERWANT_ACCESS_TOKEN") if not access_token: try: data = { "grant_type": "client_credentials" } reps = requests.post(settings.APP_MANAGE_URL, auth=(settings.CLIENT_ID, settings.CLIENT_SECERT), data=data, headers={"content-type": "application/x-www-form-urlencoded"}) result = json.loads(reps.content) access_token = result.get("access_token") cache.set("CLIENT_SELLERWANT_ACCESS_TOKEN", access_token, result.get("expires_in") - 10 * 60) except Exception: pass def process_response(self, request, response): response.set_cookie('access_token', cache.get("CLIENT_SELLERWANT_ACCESS_TOKEN")) return response
[ "15797731292@163.com" ]
15797731292@163.com
7792395a268d08840768655085318b6c7c6e3cac
0a523ea34500d6c4324fc4b8b5fdf602f08a8a01
/РК1/social/mysocial/views.py
3b8f44af6b4f6cda41e8000be9edd8b68eb2942b
[]
no_license
killkamad/ServerSoftDev
9b733c9f11c3a0db876e86dc5bb81dfaa476aab8
91248da20143c95cf4304d36e06ae9e9bb3bcb76
refs/heads/master
2020-04-17T16:26:11.477846
2019-04-15T20:33:18
2019-04-15T20:33:18
166,739,947
3
0
null
null
null
null
UTF-8
Python
false
false
2,199
py
from .models import Post from django.utils import timezone from django.shortcuts import render, get_object_or_404 from .forms import PostForm, UserRegisterForm from django.shortcuts import redirect from django.contrib import messages # Создание постов, изменение, просмотр def com_list(request): posts = Post.objects.filter(published_date__lte=timezone.now()).order_by('-published_date') return render(request, 'mysocial/com_list.html', {'posts': posts}) def com_detail(request, pk): post = get_object_or_404(Post, pk=pk) return render(request, 'mysocial/com_detail.html', {'post': post}) def com_new(request): if request.method == "POST": form = PostForm(request.POST, request.FILES or None) if form.is_valid(): post = form.save(commit=False) post.author = request.user post.published_date = timezone.now() post.save() return redirect('com_detail', pk=post.pk) else: form = PostForm() return render(request, 'mysocial/com_edit.html', {'form': form}) def com_edit(request, pk): post = get_object_or_404(Post, pk=pk) if request.method == "POST": form = PostForm(request.POST or None, request.FILES or None, instance=post) if form.is_valid(): post = form.save(commit=False) post.author = request.user post.published_date = timezone.now() post.save() return redirect('com_detail', pk=post.pk) else: form = PostForm(instance=post) return render(request, 'mysocial/com_edit.html', {'form': form}) #Форма регистрации def register(request): if request.method == 'POST': form = UserRegisterForm(request.POST) if form.is_valid(): form.save() username = form.cleaned_data.get('username') # messages.success(request, f'Вы зарегестрировались как {username} и можете войти в свой аккаунт!') return redirect('login') else: form = UserRegisterForm() return render(request, 'mysocial/register.html', {'form': form})
[ "killka_m@mail.ru" ]
killka_m@mail.ru
0fca165af2a23670c0fdd4db934637cc1abf3c10
77531ad16a3ddf7aa92b7b4de809cce2a96c88a5
/sitetables/toolbox/sources.py
53a8ff4e69c31bffc800f47c48937200b5f4ad69
[]
no_license
idlesign/django-sitetables
6d3ed6b534e51c67704528d6fa1be0bc6f9f64f4
008b748919ee330da168d4766cd6b3c3c27e45b8
refs/heads/master
2022-02-17T21:25:26.430653
2022-02-04T12:46:19
2022-02-04T12:46:19
164,444,235
4
0
null
null
null
null
UTF-8
Python
false
false
9,028
py
import re from collections import namedtuple from itertools import chain from typing import Optional, List, Union, Dict, Type, Tuple from django.db.models import QuerySet, Model from django.http import HttpRequest, JsonResponse from django.urls import reverse from .columns import TableColumn if False: # pragma: nocover from .tables import Table TypeTableSource = Union[Dict, List[Dict], Type[Model], QuerySet] TypeTableColumns = Dict[str, TableColumn] TypeFilteredItems = Union[QuerySet, List] TypeServerItems = Tuple[int, int, TypeFilteredItems] TypePreparedItems = List[Dict[str, str]] TableItemsFilter = namedtuple('TableItemsFilter', [ 'start', 'length', 'search', 'order', ]) class TableSource: """Base data source for tables.""" columns: TypeTableColumns _columns_by_idx: Dict[int, TableColumn] _url_responder = None _RE_COLUMN_DEF = re.compile(r'\[(\d+)\]\[([a-z]+)\]') def __init__(self, source, options: Optional[dict] = None): self.columns = {} self._columns_by_idx = {} self.row_id = 'DT_RowId' self.options = options or {} self._rows = [] self._bootstrap(source) @classmethod def spawn(cls, source, params: dict) -> 'TableSource': """Alternative constructor. :param source: :param params: """ return cls(source, options=params.get('options')) def _server_get_filter(self, source: dict) -> TableItemsFilter: """Returns a filter object composed from source dictionary (e.g. POST params). :param source: """ by_idx = self._columns_by_idx re_def = self._RE_COLUMN_DEF order = [] length_default = 10 length = int(source.get('length', length_default)) if length > 5000: length = length_default start = int(source.get('start', 0)) items_filter = TableItemsFilter( start=start, length=length, search=source.get('search[value]', '').strip() or '', order=order, ) source = dict(sorted(source.items(), key=lambda item: item[0])) for key, val in source.items(): if key.startswith('order'): match = re_def.search(key) if not match: continue if match.group(2) == 'dir': continue column_idx = int(val) column_name = by_idx.get(column_idx) if not column_name: continue order_desc = source.get(f'order[{match.group(1)}][dir]', 'asc') == 'desc' order.append(f"{'-' if order_desc else ''}{column_name}") return items_filter def _server_get_items(self, items_filter: TableItemsFilter = None) -> TypeServerItems: """Must return serverside items filtered using th given filter. :param items_filter: """ raise NotImplementedError # pragma: nocover def _server_prepare_items(self, items: TypeFilteredItems) -> TypePreparedItems: """Prepares items for on_server response. :param items: """ return items def respond(self, request: HttpRequest) -> JsonResponse: """ https://datatables.net/manual/server-side :param request: """ source = request.POST items_filter = self._server_get_filter(source.dict()) count_total, count_filtered, filtered = self._server_get_items(items_filter) start = items_filter.start filtered = filtered[start:start+items_filter.length] filtered = self._server_prepare_items(filtered) draw = source.get('draw', 1) draw = int(draw) # As per docs. out = { 'data': filtered, 'draw': draw, 'recordsTotal': count_total, 'recordsFiltered': count_filtered, } return JsonResponse(out) def _get_columns(self) -> TypeTableColumns: """Should return columns dictionary.""" columns = {} for name, title in self.options.get('columns_add', {}).items(): columns[name] = TableColumn(name=name, title=title) return columns def _bootstrap(self, source: TypeTableSource): """The place for a source-specific bootstrap.""" columns = self._get_columns() self.columns = columns self._columns_by_idx = {idx: column for idx, column in enumerate(columns)} def contribute_to_config(self, config: dict, table: 'Table'): """Updates table configuration dictionary with source-specific params. :param config: :param table: """ config.update({ 'createdRow': lambda: ( "function(row, data, idx){var v=data['%s']; if (v){$(row).attr('data-id', v);}}" % self.row_id), 'processing': True, 'columns': [column.as_dict() for column in self.columns.values()], }) options = self.options if options.get('on_server', False): url_responder = self._url_responder if url_responder is None: url_responder = self.__class__._url_responder = reverse('sitetables:respond') config.update({ 'serverSide': True, 'ajax': { 'url': url_responder, 'type': 'POST', 'data': { 'tableName': table.name, } }, }) else: if not options.get('init_dom'): # todo maybe use serialization instead of string casting # todo FK support config['data'] = [{k: f'{v}' for k, v in row.items()} for row in self.rows] @property def rows(self) -> List[dict]: """Represents table rows.""" return self._rows class ListDictsSource(TableSource): """Static data source. .. code-block:: python source = [ { 'one': '1', 'two': '2', }, { 'one': '3', 'two': '4', }, ] """ def _bootstrap(self, source: TypeTableSource): names = list(source[0].keys()) self.options['columns_add'] = dict.fromkeys(names, '') self._rows = source self.row_id = names[0] # Use first column value. super()._bootstrap(source) class ModelSource(TableSource): """Django model datasource. .. code-block:: python source = Article # Model class. source = Article.objects.filter(hidden=False) # Or a QuerySet. """ model: Type[Model] = None def _get_columns(self) -> TypeTableColumns: columns = {} meta = self.model._meta for field in chain(meta.concrete_fields, meta.private_fields, meta.many_to_many): name = field.name columns[name] = TableColumn(name=name, title=field.verbose_name, source=field) columns.update(super()._get_columns()) return columns def _bootstrap(self, source: TypeTableSource): if isinstance(source, QuerySet): model = source.model qs = source else: # Model class model = source qs = model.objects.all() self.model = model self.qs = qs self.row_id = model._meta.pk.name super()._bootstrap(source) def _server_get_items(self, items_filter: TableItemsFilter = None) -> TypeServerItems: qs = self.qs filter_kwargs = {} search = items_filter.search if search: filter_kwargs['title__contains'] = search objects = qs.filter(**filter_kwargs) count_total = qs.count() count_filtered = objects.count() order = items_filter.order if order: objects = objects.order_by(*order) return count_total, count_filtered, objects def _server_prepare_items(self, items: TypeFilteredItems) -> TypePreparedItems: dicts = [] columns = self.columns for model in items: item_data = {} for column_name, column in columns.items(): if column.source is None: # Model property. item_data[column_name] = getattr(model, column_name) else: # Model field. item_data[column_name] = column.source.value_from_object(model) dicts.append(item_data) return dicts @property def rows(self) -> List[dict]: columns = self.columns _, _, qs = self._server_get_items(TableItemsFilter( start=0, length=0, search='', order=[], )) result = qs.values(*columns.keys()) return result
[ "idlesign@yandex.ru" ]
idlesign@yandex.ru
ab0d95439f8363b720d81aa80ae3aa74a0432e28
104005986bccea0a4213cbd55d833c95baf2f4fa
/drivers/phot_drivers/LCOGT_template_single_request.py
c6603728c1e635419c96b9c4a2e6edda588ecfe7
[]
no_license
lgbouma/cdips_followup
8a92ec9a31b405d316c668a6d42ce10ad47f0501
99ac6c6c709f96a58083a5ff7c4cf2d4f0b554a8
refs/heads/master
2023-08-14T02:33:17.841926
2023-08-01T00:46:19
2023-08-01T00:46:19
206,371,538
0
0
null
null
null
null
UTF-8
Python
false
false
6,229
py
""" Given a source_id, make LCOGT photometry followup requests, and optionally submit them to the LCOGT API. """ import numpy as np from astropy.time import Time from cdips_followup.manage_ephemerides import ( query_ephemeris, get_ephemeris_uncertainty ) from cdips_followup.LCOGT_dedicated_requests import ( get_dedicated_request, given_dedicated_requests_validate_submit ) from astrobase.services.identifiers import tic_to_gaiadr2 TRANSITTYPEDICT = { 'all': ['OIBEO', 'IBEO', 'OIBE', 'OIB', 'BEO'], 'partials': ['OIB', 'BEO'], 'totals': ['OIBEO', 'IBEO', 'OIBE'], 'fulltotals': ['OIBEO'] } def main(): ########################################## # CHANGE BELOW savstr = '20230419_tic402980664_23B' # eg, 20191207_TOI1098_request_2m_tc_secondary. "ephemupdate" if it is one. (this cancels pending observations) overwrite = 1 validate = 0 submit = 0 tic_id = '402980664' # '120105470' source_id = None # '6113920619134019456' # can use instead of TIC filtermode = 'ip'# 'zs', 'gp', 'ip' #telescope_class = '1m0' # '1m0', '2m0', 'special' telescope_class = 'special' # '1m0', '2m0', 'special' ipp_value = 1 # usually 1 #max_search_time = Time('2022-12-31 23:59:00') max_search_time = Time('2024-01-31 23:59:00') verify_ephemeris_uncertainty = 1 # require t_tra uncertainty < 2 hours inflate_duration = 0 # if t_tra uncertainty > 1 hour, inflate tdur by +/- 45 minutes per side transit_type = 'totals' # see above max_n_events = 99 # else None. n_events is per eventclass. raise_error = False # raise an error if max_duration_error flag raised. max_duration_error = 30 # the submitted LCOGT request must match requested durn to within this difference [minutes] sites = ['Palomar'] # Default None for LCOGT. Could do e.g., 'special' and ['Keck Observatory'] #sites = ['Keck Observatory'] # Default None for LCOGT. Could do e.g., 'special' and ['Keck Observatory'] #sites = ['Cerro Paranal'] # Default None for LCOGT. Could do e.g., 'special' and ['Keck Observatory'] force_acceptability = 50 # None or int. # CHANGE ABOVE ########################################## max_airmass_sched = 2.5 manual_ephemeris = False manual_ephemeris = True # FIXME create_eventclasses = TRANSITTYPEDICT[transit_type] submit_eventclasses = TRANSITTYPEDICT[transit_type] if source_id is None: assert isinstance(tic_id, str) source_id = tic_to_gaiadr2(tic_id) if manual_ephemeris: period = 18.559/24 period_unc = 0.001/24 epoch = 2457000 + 1791.2972827806442 epoch_unc = 1e-5 duration = 1.04 else: # get ephemeris from ephemerides.csv d = query_ephemeris(source_id=source_id) period, epoch, duration = ( d['period'], d['epoch'], d['duration'] ) period_unc, epoch_unc, duration_unc = ( d['period_unc'], d['epoch_unc'], d['duration_unc'] ) if verify_ephemeris_uncertainty: delta_t_tra_today = ( get_ephemeris_uncertainty(epoch, epoch_unc, period, period_unc, epoch_obs='today') ) if delta_t_tra_today*24 < 0: msg = f'ERR! Got negative ephem unc of {delta_t_tra_today*24:.1f} hr. Need to give a believable ephem unc..' raise ValueError(msg) if delta_t_tra_today*24 > 2: msg = f'ERR! Got ephem unc of {delta_t_tra_today*24:.1f} hr. This is too high.' raise ValueError(msg) if delta_t_tra_today*24 > 1: msg = f'WRN! Got ephem unc of {delta_t_tra_today*24:.1f} hr. This is risky.' print(msg) else: msg = f'INFO! Got ephem unc of {delta_t_tra_today*24:.1f} hr. This is fine.' print(msg) if inflate_duration: assert verify_ephemeris_uncertainty if delta_t_tra_today*24 > 1: msg = f'... inflating transit duration for scheduling pursposes by 1.5 hours.' print(msg) duration += 1.5 # add # "requests" is a list of lists. Higher level is each eventclass. Level # below is each event, in that eventclass. requests = get_dedicated_request( savstr, source_id, period, epoch, duration, create_eventclasses, overwrite=overwrite, max_search_time=max_search_time, filtermode=filtermode, telescope_class=telescope_class, ipp_value=ipp_value, sites=sites, force_acceptability=force_acceptability, max_airmass_sched=max_airmass_sched ) # if a maximum number of events is set, impose it! if isinstance(max_n_events, int): _requests = [] for ix in range(len(create_eventclasses)): print('starting with {} {} events.'. format(len(requests[ix]), create_eventclasses[ix]) ) for eventclass in requests: _eventclass = [] starttimes = [] for req in eventclass: starttimes.append(req['requests'][0]['windows'][0]['start']) # sort by start time, cut to get the closest ones. sort_times = np.sort(starttimes) sel_times = sort_times[ : max_n_events] for req in eventclass: starttime = req['requests'][0]['windows'][0]['start'] if starttime in sel_times: _eventclass.append(req) if len(_eventclass) > 0: _requests.append(_eventclass) if len(_requests) == 0: print('WRN!: got no times') return assert len(_requests[0]) <= max_n_events requests = _requests print('WRN!: trimmed to {} events.'.format(len(requests[0]))) if len(sel_times)>0: print('WRN!: max time: \n{}'.format(repr(sel_times[-1]))) print('\nWRN!: selected times: \n{}'.format(repr(sel_times))) else: print('WRN!: got no times') given_dedicated_requests_validate_submit( requests, submit_eventclasses, validate=validate, submit=submit, max_duration_error=max_duration_error, raise_error=raise_error ) if __name__ == "__main__": main()
[ "bouma.luke@gmail.com" ]
bouma.luke@gmail.com
f62e8e139b6ce07d635393ef4c7fe94bede87e36
7f747228fd52c835bbbad1d51674f2991b4e0ccb
/Veriflow_Automation/Veriflow_BDD_Framework/features/steps/login.py
81014629a6cf06c7ce848bf1de42651e05d64f42
[]
no_license
adachenski/Python-Automation
fec234f9983cee4dcbd2e8d158a944e555167f22
2a4bb4c2b33b4ce8a9c66b06f251f6c834973c50
refs/heads/master
2020-03-19T07:39:03.879326
2018-06-16T22:04:02
2018-06-16T22:04:02
136,135,805
0
0
null
null
null
null
UTF-8
Python
false
false
691
py
from selenium_logic import login_logic from behave import step, given, when, then loginPage = login_logic.Login() @given(u'Open Chrome Browser') def open_browser(context): loginPage.open_chrome() @when(u'Navigate to Veriflow Login page at "{url}"') def navigate_to_url(context, url): loginPage.go_to_login(url) @when(u'Enter username "{nas}"') def username(context, nas): loginPage.enter_username(nas) @when(u'Enter password "{password}"') def password(context, password): loginPage.enter_password(password) @then(u'Click on Login Tab') def login(context): loginPage.login() @then(u'I close the browser') def step_impl(contex): loginPage.close_browser()
[ "adachenski@aol.com" ]
adachenski@aol.com
98c818859f8e9449f37db57cf03d9b8bf6bcab06
c0a090c54ee58f26f9f3973a50ffc9423a620fa4
/bloomberg/sound.py
d85772502ebfd0f3a8f17e0b2ae0665c819bea86
[]
no_license
guts2014/going-65-in-a-60-zone
1b4e41ac9d190e706d267466f892b053e6c42576
9958b754ca796c94a6b2dbe09f03549532e93240
refs/heads/master
2020-04-13T01:05:17.389084
2014-10-12T12:12:06
2014-10-12T12:12:06
null
0
0
null
null
null
null
UTF-8
Python
false
false
854
py
from django.shortcuts import render from django.http import HttpResponse from django.template import RequestContext from sound_mixer import SoundMixer from API import * def sounds_page(request): session = get_new_session() # BP historical_data = getCompaniesHistory(session, ['BP/ LN'], 20120101, 20140101, 'MONTHLY') historical_prices = [] for dict in historical_data: historical_prices.append(dict['price']) mixer = SoundMixer() mixer.add_dataset(historical_prices) mixer.generate_file('stock1') # RDSB historical_data = getCompaniesHistory(session, ['RDSB LN'], 20120101, 20140101, 'MONTHLY') historical_prices = [] for dict in historical_data: historical_prices.append(dict['price']) mixer = SoundMixer() mixer.add_dataset(historical_prices, 400) mixer.generate_file('stock2') return render(request, "sounds.html")
[ "velizar.shulev@gmail.com" ]
velizar.shulev@gmail.com
10c75430230872f750e9ed2c0a241436c9120a7f
b509ef07d752e987f4cb84d1abd4c3a98488a6c7
/resources/lib/streamlink/plugins/nownews.py
02bd76def1234a8b05929f26bb670853a147f7ba
[ "BSD-2-Clause" ]
permissive
Twilight0/script.module.streamlink.base
d91245d1a43d6b3191b62a6eb4b1cf70598ed23e
c1e4628715a81806586b10323b8cb01424bbb6fc
refs/heads/master
2021-01-21T04:32:41.658823
2020-09-07T20:56:29
2020-09-07T20:56:29
101,915,967
6
4
BSD-2-Clause
2018-01-14T15:20:47
2017-08-30T18:31:47
Python
UTF-8
Python
false
false
2,149
py
import logging import re import json from streamlink.plugin import Plugin from streamlink.stream import HLSStream log = logging.getLogger(__name__) class NowNews(Plugin): _url_re = re.compile(r"https?://news.now.com/home/live") epg_re = re.compile(r'''epg.getEPG\("(\d+)"\);''') api_url = "https://hkt-mobile-api.nowtv.now.com/09/1/getLiveURL" backup_332_api = "https://d7lz7jwg8uwgn.cloudfront.net/apps_resource/news/live.json" backup_332_stream = "https://d3i3yn6xwv1jpw.cloudfront.net/live/now332/playlist.m3u8" @classmethod def can_handle_url(cls, url): return cls._url_re.match(url) is not None def _get_streams(self): res = self.session.http.get(self.url) m = self.epg_re.search(res.text) channel_id = m and m.group(1) if channel_id: log.debug("Channel ID: {0}".format(channel_id)) if channel_id == "332": # there is a special backup stream for channel 332 bk_res = self.session.http.get(self.backup_332_api) bk_data = self.session.http.json(bk_res) if bk_data and bk_data["backup"]: log.info("Using backup stream for channel 332") return HLSStream.parse_variant_playlist(self.session, self.backup_332_stream) api_res = self.session.http.post(self.api_url, headers={"Content-Type": 'application/json'}, data=json.dumps(dict(channelno=channel_id, mode="prod", audioCode="", format="HLS", callerReferenceNo="20140702122500"))) data = self.session.http.json(api_res) for stream_url in data.get("asset", {}).get("hls", {}).get("adaptive", []): return HLSStream.parse_variant_playlist(self.session, stream_url) __plugin__ = NowNews
[ "twilight@freemail.gr" ]
twilight@freemail.gr
a5a17178600de20cbfc8a242569037482fae9caf
fccb5a43179906ddc3dd37849ac2a89cacf44981
/sphinx/source/exercises/solution/03_os_sub_req/ex5.py
653a604a993839e3b042cfc9ccaf6cd8eba8ff1f
[]
no_license
YasmineOweda/spring2021
a48c1c4eaa525053a0e2188cf088124b004a35d8
072aadba20bfbc659427265fa228518fe4b09ff3
refs/heads/master
2023-04-29T10:20:14.132211
2021-05-11T09:07:40
2021-05-11T09:07:40
null
0
0
null
null
null
null
UTF-8
Python
false
false
435
py
import os #1 os.mkdir('os_exercises.') #2 os.chdir('os_exercises') open('exercise.py', 'w') #3 x = input('Please write something to the file: ') with open('exercise.py', 'w') as f: f.write(x) #4 x = input('Please write something More to anoter file: ') with open('exercise2.py', 'w') as f: f.write(x) #5 with open('exercise.py', 'r') as f1: with open('exercise2.py', 'r' ) as f2: print(f1.read() + f2.read())
[ "clbo@kea.dk" ]
clbo@kea.dk
0695682f12128d61213cc646b2a539b7cd47827c
3f65ba0f9c3217acd418648a04f9dffdb4c35d2d
/AutoSubmission.py
5969b364cce4baf6a4f8b0a87322130e8db505f0
[]
no_license
jorqueraian/AutomaticCanvasUpload
a348e79e0a0c0d8fb3c7c329eef10745507ab498
e3f05a8fdf28eceacd99f4932c5e0eefb506858d
refs/heads/master
2020-09-27T17:11:05.822355
2019-12-09T20:50:03
2019-12-09T20:50:03
226,566,809
1
0
null
null
null
null
UTF-8
Python
false
false
3,117
py
# Import the Canvas class from Student import Student from StringSimilarity import cost_of_alignment import os import sys import re USER_PATH = 'C:\\Users\\jorqu\\' def clean_str(input_str): # Remove .pdf or what ever # Make lowercase and remove whitespace # remove _ or - return str(input_str).split('.')[0].lower().strip().replace('_', '').replace('-', '').replace(',', '') def clean_course_code(course_code): # For a case like CSCI 3104-100--100B--200, this wil return CSCI 3104 cleaned_code = re.sub(r'\D\d{3}\D.*$', '', course_code + ' ') return cleaned_code def try_verify_path(local_path): if os.path.exists(local_path): return True split_path = local_path.split('\\') if split_path[0] == '..': new_path = os.path.abspath(local_path) # For some reason this isn't working, it returns false elif split_path[0] == 'Documents' or split_path[0] == 'Downloads' or split_path[0] == 'Desktop': new_path = USER_PATH + local_path else: return False local_path = new_path if os.path.exists(str(local_path)): return True else: return False def find_assignment(student, file_path: str): file_name = file_path.split('\\')[-1] cleaned_file_name = clean_str(file_name) assignments = student.get_upcoming_undated_assignments() best_match = None for a in assignments: combine_str = clean_str(clean_course_code(a[2])+a[0]) # We have the weights because we really only want move around parts of the string rather than replace. cost = cost_of_alignment(combine_str, cleaned_file_name, 9, 1, 9) cost_per_char = cost / (len(combine_str)+len(cleaned_file_name)) if best_match is None or cost_per_char < best_match[1]: best_match = (a, cost_per_char) return best_match def auto_upload(student, file_path): assignment = find_assignment(student, file_path) course_id = assignment[0][3] assignment_id = assignment[0][1] print(f'Submitting Assignment: {assignment[0][0]}\n' f'Course: {assignment[0][2]}\n' f'File: {file_path}\n' f'Cost per character: {assignment[1]}') confirmation = input('Please confirm(Y/n)').lower() if confirmation == 'y': print('Submitting assignment....') student.make_submission(course_id, assignment_id, file_path) else: print('No Submission made') if __name__ == '__main__': # For reference: Documents\CSCI_3104_Final_Exam.zip # Initialize student API me = Student() # Verify that a path was provided if len(sys.argv) < 2: print('No file selected') input('Press enter key to exit ...') else: path = sys.argv[1] # Verify correctness of path if try_verify_path(path): # Upload to canvas try: auto_upload(me, path) except Exception as e: input(f'Error: {e}\nPress enter key to exit...') else: print(f'File not found: {path}') input('Press enter key to exit ...')
[ "jorqueraian@gmail.com" ]
jorqueraian@gmail.com
aedcc1298924c6a2be19fcb6dd5c47bb3680c3c3
394c88bbe556c98ace0301eea9e41410040cfb63
/double_dqn/model.py
143d562371876c6f8fe4ecc4a9a52cc5ddf4d286
[ "MIT" ]
permissive
1980744819/playing-mario-with-DQN
8b3dda28e107ff27b3c307cf979179729d352f9f
f263e3615bf4439ad17d95a9f449c6145792402b
refs/heads/master
2020-04-29T14:52:44.215593
2019-05-21T13:08:00
2019-05-21T13:08:00
176,210,471
4
1
null
null
null
null
UTF-8
Python
false
false
1,252
py
#!/usr/bin/env python # -*- coding: utf-8 -*- # @File : model.py # @Author: zixiao # @Date : 2019-04-02 # @Desc : from torch import nn import torch.nn.functional as F import torch class CNN(nn.Module): def __init__(self, in_channels, num_action): super(CNN, self).__init__() self.conv1 = nn.Sequential( nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=8, stride=4), nn.ReLU(), nn.MaxPool2d(kernel_size=2) ) self.conv2 = nn.Sequential( nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2), nn.ReLU(), nn.MaxPool2d(kernel_size=2) ) self.conv3 = nn.Sequential( nn.Conv2d(in_channels=64, out_channels=128, kernel_size=2, stride=1), nn.ReLU(), nn.MaxPool2d(kernel_size=2) ) self.fc4 = nn.Linear(in_features=2 * 2 * 128, out_features=256) self.fc5 = nn.Linear(in_features=256, out_features=num_action) def forward(self, x): # 8 200 200 x = self.conv1(x) # 32 24,24 x = self.conv2(x) # 64 5 5 x = self.conv3(x) # 128 2 2 x = self.fc4(x.view(x.size(0), -1)) x = self.fc5(x) return x
[ "1980744819@qq.com" ]
1980744819@qq.com
db3b4d13adbd04eba6106f6e0d8559771deadcd5
61699048dc567cd3a814e5b987599dae175bed19
/Python/month01/day15/exercise02.py
ba4af22e18080c30f44bdc184166efdfe0b8e96a
[]
no_license
Courage-GL/FileCode
1d4769556a0fe0b9ed0bd02485bb4b5a89c9830b
2d0caf3a422472604f073325c5c716ddd5945845
refs/heads/main
2022-12-31T17:20:59.245753
2020-10-27T01:42:50
2020-10-27T01:42:50
null
0
0
null
null
null
null
UTF-8
Python
false
false
611
py
""" 练习2:定义函数,根据生日(年月日),计算活了多天. 输入:2010 1 1 输出:从2010年1月1日到现在总共活了3910天 """ import time def life_days(year, month, day): # 当前 - 出生时间 # time_tuple = time.strptime("%d-%d-%d" % (year, month, day), "%Y-%m-%d") time_tuple = (year, month, day, 0, 0, 0, 0, 0, 0) life_second = time.time() - \ time.mktime(time_tuple) return life_second / 60 / 60 / 24 y = 1990 m = 9 d = 18 result = life_days(y, m, d) print(f"从{y}年{m}月{d}日到现在总共活了{result:.0f}天")
[ "1450030827@qq.com" ]
1450030827@qq.com
acf4d5704a2b5145e70f57bf4bd46fcc5c62fa9d
a40ea9fa24e25e7dd047c20593762ec39749207a
/etc/week3pytyon.py
43e3553d0c6bd756ea586fceb6793d18147089bd
[]
no_license
mjstella/Sparta_MJ
16fbacf7ae08405dad68df3d1fc1d41d2c9d5d5e
0f90c058c601cd35572e48d3c593594356526655
refs/heads/master
2022-10-12T22:33:18.962042
2020-06-08T09:12:39
2020-06-08T09:12:39
265,854,949
0
0
null
null
null
null
UTF-8
Python
false
false
817
py
from pymongo import MongoClient # pymongo를 임포트 하기(패키지 인스톨 먼저 해야겠죠?) client = MongoClient('localhost', 27017) # mongoDB는 27017 포트로 돌아갑니다. db = client.dbsparta # 'dbsparta'라는 이름의 db를 만듭니다. # db.users.insert_one({'name':'bobby','age':21}) # db.users.insert_one({'name':'kay','age':27}) # db.users.insert_one({'name':'john','age':30}) all_users = list(db.users.find()) # print(all_users) bobbys = list(db.users.find({'name':'bobby'})) print(bobbys) # 그 중 특정 키 값을 빼고 보기 kay = db.users.find_one({'name':'kay'}, {'_id': False}) print(kay) # db.users.update_one({'name':'john'}, {'$set':{'name':'james', 'age':'20'}}) db.users.update_many({'name':'bobby'}, {'$set':{'occupation':'student'}})
[ "mjstella918@gmail.com" ]
mjstella918@gmail.com
64d618b9411d6abdd7dd197355558092f58401ff
c0b34870921e11882dfacc5cc67e347c3160ed1a
/train.py
aa9223224e8334d31cedd2412d290c9c3365a3e5
[]
no_license
chris-thomas/idn-superresolution-tf2
ad2f59c6da43e670a4255335d0edfc86a0dce6b1
0b97c859daa80797958f0ea68a6ef385c4fa4335
refs/heads/main
2023-04-01T15:14:43.809934
2021-04-08T14:47:04
2021-04-08T14:47:04
332,895,607
2
0
null
null
null
null
UTF-8
Python
false
false
4,413
py
import os os.environ["CUDA_VISIBLE_DEVICES"] = '0' from numba import cuda device = cuda.get_current_device() device.reset() import time import tensorflow as tf from model import evaluate, evaluate_ssim from tensorflow.keras.applications.vgg19 import preprocess_input from tensorflow.keras.losses import BinaryCrossentropy from tensorflow.keras.losses import MeanAbsoluteError from tensorflow.keras.losses import MeanSquaredError from tensorflow.keras.metrics import Mean from tensorflow.keras.optimizers import Adam from tensorflow.keras.optimizers.schedules import PiecewiseConstantDecay physical_devices = tf.config.experimental.list_physical_devices('GPU') print(physical_devices) tf.config.experimental.set_memory_growth(physical_devices[0], True) tf.config.set_soft_device_placement(False) class Trainer: def __init__(self, model, loss, learning_rate, checkpoint_dir='./ckpt/idn'): self.now = None self.loss = loss optimizer = Adam(learning_rate) self.checkpoint = tf.train.Checkpoint(step=tf.Variable(0), validation=tf.Variable(-1.0), optimizer=optimizer, model=model) self.checkpoint_manager = tf.train.CheckpointManager(checkpoint=self.checkpoint, directory=checkpoint_dir, max_to_keep=3) self.restore() @property def model(self): return self.checkpoint.model def train(self, train_dataset, valid_dataset, steps, evaluate_every=1000, save_best_only=False): loss_mean = Mean() ckpt_mgr = self.checkpoint_manager ckpt = self.checkpoint self.now = time.perf_counter() for lr, hr in train_dataset.take(steps - ckpt.step.numpy()): ckpt.step.assign_add(1) step = ckpt.step.numpy() loss = self.train_step(lr, hr) loss_mean(loss) if step % evaluate_every == 0: loss_value = loss_mean.result() loss_mean.reset_states() # Compute metric on validation dataset validation_value = self.evaluate_ssim(valid_dataset) duration = time.perf_counter() - self.now print(f'{step}/{steps}: loss = {loss_value.numpy():.3f}, SSIM = {validation_value.numpy():3f} ({duration:.2f}s)') if save_best_only and validation_value <= ckpt.validation: self.now = time.perf_counter() # skip saving checkpoint, no validation improvement continue ckpt.validation = validation_value ckpt_mgr.save() self.now = time.perf_counter() @tf.function def train_step(self, lr, hr): with tf.GradientTape() as tape: lr = tf.cast(lr, tf.float32) hr = tf.cast(hr, tf.float32) sr = self.checkpoint.model(lr, training=True) loss_value = self.loss(hr, sr) gradients = tape.gradient(loss_value, self.checkpoint.model.trainable_variables) self.checkpoint.optimizer.apply_gradients(zip(gradients, self.checkpoint.model.trainable_variables)) return loss_value def evaluate(self, dataset): return evaluate(self.checkpoint.model, dataset) def evaluate_ssim(self, dataset): return evaluate_ssim(self.checkpoint.model, dataset) def restore(self): if self.checkpoint_manager.latest_checkpoint: self.checkpoint.restore(self.checkpoint_manager.latest_checkpoint) print(f'Model restored from checkpoint at step {self.checkpoint.step.numpy()}.') class IdnTrainer(Trainer): def __init__(self, model, checkpoint_dir, learning_rate=PiecewiseConstantDecay(boundaries=[50000], values=[1e-4, 5e-5])): super().__init__(model, loss=MeanAbsoluteError(), learning_rate=learning_rate, checkpoint_dir=checkpoint_dir) def train(self, train_dataset, valid_dataset, steps=100000, evaluate_every=1000, save_best_only=True): super().train(train_dataset, valid_dataset, steps, evaluate_every, save_best_only)
[ "cdt@christhomas.co.uk" ]
cdt@christhomas.co.uk
3d59d39806860870c249b7daa8a0f68c3b343f39
15c70a52bb2a4b5b3bdd4a62d97bbe27e3989a24
/src_old/manual_colony_picking.py
5fb61b2e322da571f18b6cbdaeddc1d0a3c7526e
[ "MIT" ]
permissive
nadimest/opentrons-colony-picker
91cbcc7a80fe9cefa0c8f45bab7ac51a715b87e6
8097f12cab91398377463a3a76367b73fa0a2318
refs/heads/master
2021-01-01T13:29:13.253905
2020-07-29T14:27:49
2020-07-29T14:27:49
239,299,577
0
0
null
null
null
null
UTF-8
Python
false
false
980
py
#!/usr/bin/env python3 import subprocess from lib.coordinatesManagement import CoordinatesManager from lib.imageHandling import ImageHandler coordinates=CoordinatesManager(calibration_filename="calib/calibration.json") def main(): # takeImage=subprocess.run("./server_scripts/moveAndTakePicture.sh") fetchImage=subprocess.run("./server_scripts/fetchPicturefromServer.sh") image=ImageHandler("data/colonies.jpg",coordinates) while True: image.showImage(circles=coordinates.getPoints()) c=image.getPressedKey() if c==ord('q'): break if c==ord('s'): coordinates.writeCoordinatesFile(filename="data/coordinates.json") image.saveImage("data/colonies_processed.jpg") for coord in coordinates.coord_transformed: print(coord) fetchImage=subprocess.run("./server_scripts/pushColoniesToServer.sh") break if __name__ == "__main__": main()
[ "nadim@enginzyme.com" ]
nadim@enginzyme.com
1abcb520636d6bdcf87fda919e2807ba2c94bbaf
f9b14f7c366dc16c5dfc24a2478332a8ad14aea3
/tests/test_user_func.py
e62c3a3eaf07a429e7b28b1e28386eecde4fd4e0
[ "MIT" ]
permissive
aleobb/dataframe_expressions
fbd2a95de883c87fb4e93195ce719e7ead967231
cf135415f739377e9c2accb82606957417c7e0e6
refs/heads/master
2022-12-29T15:25:54.771541
2020-10-21T01:27:15
2020-10-21T01:27:15
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,331
py
import ast import inspect import pytest from dataframe_expressions import ( DataFrame, ast_FunctionPlaceholder, ast_DataFrame, render, user_func) def test_DF_user_func(): @user_func def func1(x: float) -> float: assert False d = DataFrame() d1 = func1(d) assert isinstance(d1, DataFrame) assert d1.child_expr is not None assert isinstance(d1.child_expr, ast.Call) f_c = d1.child_expr.func assert isinstance(f_c, ast_FunctionPlaceholder) f_sig = inspect.signature(f_c.callable) # type: ignore assert str(f_sig) == "(x: float) -> float" args = d1.child_expr.args assert len(args) == 1 a1 = args[0] assert isinstance(a1, ast_DataFrame) def test_DF_user_number_arg(): @user_func def func1(x: float, y: float) -> float: assert False d = DataFrame() d1 = func1(d, 10.0) assert isinstance(d1, DataFrame) assert d1.child_expr is not None assert isinstance(d1.child_expr, ast.Call) f_c = d1.child_expr.func assert isinstance(f_c, ast_FunctionPlaceholder) f_sig = inspect.signature(f_c.callable) # type: ignore assert str(f_sig) == "(x: float, y: float) -> float" args = d1.child_expr.args assert len(args) == 2 a1 = args[0] assert isinstance(a1, ast_DataFrame) a2 = args[1] assert isinstance(a2, ast.Num) assert a2.n == 10.0 def test_DF_user_wrong_number_args(): @user_func def func1(x: float, y: float) -> float: assert False d = DataFrame() with pytest.raises(Exception): func1(d) def test_DF_user_two_funcs(): @user_func def func1(x: float) -> float: assert False @user_func def func2(x: float, y: float) -> float: assert False # There should be no confusion between the two functions due to # some funny lambda semantics d = DataFrame() func2(func1(d), func1(d)) def test_DF_user_render(): @user_func def func1(x: float) -> float: assert False d = DataFrame() d1 = func1(d) chain, context = render(d1) assert chain is not None assert context is not None assert isinstance(chain, ast.Call) call = chain # type: ast.Call assert len(call.args) == 1 a1 = call.args[0] # type: ast.AST assert isinstance(a1, ast_DataFrame) assert a1.dataframe is d assert isinstance(call.func, ast_FunctionPlaceholder) callable = call.func f = callable.callable # type: ignore assert f.__name__ == 'func1' def test_df_user_render_args(): @user_func def func1(x: float) -> float: assert False d = DataFrame() d1 = func1(d.jets) chain, _ = render(d1) assert chain is not None assert isinstance(chain, ast.Call) call = chain # type: ast.Call assert len(call.args) == 1 a1 = call.args[0] # type: ast.AST assert isinstance(a1, ast.Attribute) def test_df_user_render_2args(): @user_func def func1(x1: float, x2: float) -> float: assert False d = DataFrame() d1 = func1(d.jets, d.jets) chain, _ = render(d1) assert chain is not None assert isinstance(chain, ast.Call) call = chain # type: ast.Call assert len(call.args) == 2 a1 = call.args[0] # type: ast.AST a2 = call.args[1] # type: ast.AST assert a1 is a2
[ "gwatts@uw.edu" ]
gwatts@uw.edu
3f252f1404c6461dd77efe3c577a6021a96fd1fc
57f4110f8252496142f043dce929c15df43f0e99
/niwo/spiders/niwobbs.py
36c18ff00dcd5fd500d26781a373ec5f38b113da
[]
no_license
lanluyu/niwo_bbs
0c69a6fcfcf1bcf52096124721ed051a9b677b59
bfb28d8050a6a1a74b2c199dbbbeee2baf4d40f2
refs/heads/master
2020-03-21T18:44:37.519262
2018-06-28T05:21:44
2018-06-28T05:21:44
138,910,125
0
0
null
null
null
null
UTF-8
Python
false
false
1,512
py
# -*- coding: utf-8 -*- from scrapy import Spider,Request from niwo.items import NiwoItem class NiwobbsSpider(Spider): name = 'niwobbs' allowed_domains = ['http://bbs.niiwoo.com'] def start_requests(self): for i in range(3,449): basic_url = 'http://bbs.niiwoo.com/forum.php?gid=1&page=' start_url = basic_url+str(i) yield Request(url=start_url,callback=self.parse,dont_filter=True) def parse(self, response): a = response.xpath('.//div[@class="pg"]/strong/text()').extract_first() print('正在爬取第',a,'页') posts = response.xpath('.//table[@class="wtab"]/tbody') for post in posts: item = NiwoItem() item['title'] = ''.join(post.xpath('.//div[@class="thread-tit"]/a/text()').extract()) item['author'] = ''.join(post.xpath('.//div[@class="thread-nums"]/a[1]/text()').extract()) item['visitors'] = ''.join(post.xpath('.//div[@class="thread-nums"]/a[2]/@title').extract()) # 第1,2页的时间提取规则 #item['lasttime'] = '最后回复时间:'+''.join(post.xpath('.//a[@class="time"]/span/span/@title').extract()) #第三页之后的时间提取规则 item['lasttime'] = ''.join(post.xpath('.//a[@class="time"]/span/text()').extract()) item['url'] = 'http://bbs.niiwoo.com/'+''.join(post.xpath('.//div[@class="thread-tit"]/a/@href').extract()) print(item) yield item
[ "noreply@github.com" ]
lanluyu.noreply@github.com
ebce17fb0dd02ef5af320607dbcfad78bb6aec8c
dcd0fb6bdcb488dd2046778eb02edce8f4623b58
/object_follow_edgetpu/detect_standalone.py
7e196dbb4d1727616b1a5ec9f56384351df24223
[]
no_license
openbsod/Adeept_AWR
12f2df24bfcf85d7965a425bb0078b2c858e807a
92ca5e7147a9cb44ad55f55a467371648dc76b3c
refs/heads/master
2023-04-09T07:06:35.772918
2021-04-15T21:20:40
2021-04-15T21:20:40
284,012,618
1
0
null
2020-07-31T10:46:50
2020-07-31T10:46:49
null
UTF-8
Python
false
false
4,801
py
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Object detection demo. This demo script requires Raspberry Pi Camera, and pre-compiled mode. Get pre-compiled model from Coral website [1] [1]: https://dl.google.com/coral/canned_models/mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite """ from edgetpu.detection.engine import DetectionEngine from PIL import Image from PIL import ImageDraw from PIL import ImageFont import numpy as np import time import io import picamera # https://github.com/waveform80/picamera/issues/383 def _monkey_patch_picamera(): original_send_buffer = picamera.mmalobj.MMALPortPool.send_buffer def silent_send_buffer(zelf, *args, **kwargs): try: original_send_buffer(zelf, *args, **kwargs) except picamera.exc.PiCameraMMALError as error: if error.status != 14: raise error picamera.mmalobj.MMALPortPool.send_buffer = silent_send_buffer # Read labels.txt file provided by Coral website def _read_label_file(file_path): with open(file_path, 'r', encoding="utf-8") as f: lines = f.readlines() ret = {} for line in lines: pair = line.strip().split(maxsplit=1) ret[int(pair[0])] = pair[1].strip() return ret # Main loop def main(): model_filename = "mobilenet_ssd_v2_coco_quant_postprocess_edgetpu.tflite" label_filename = "coco_labels.txt" engine = DetectionEngine(model_filename) labels = _read_label_file(label_filename) CAMERA_WIDTH = 640 CAMERA_HEIGHT = 480 fnt = ImageFont.load_default() # To view preview on VNC, # https://raspberrypi.stackexchange.com/a/74390 with picamera.PiCamera() as camera: _monkey_patch_picamera() camera.resolution = (CAMERA_WIDTH, CAMERA_HEIGHT) camera.framerate = 15 camera.rotation = 180 _, width, height, channels = engine.get_input_tensor_shape() print("{}, {}".format(width, height)) overlay_renderer = None camera.start_preview() try: stream = io.BytesIO() for foo in camera.capture_continuous(stream, format='rgb', use_video_port=True): # Make Image object from camera stream stream.truncate() stream.seek(0) input = np.frombuffer(stream.getvalue(), dtype=np.uint8) input = input.reshape((CAMERA_HEIGHT, CAMERA_WIDTH, 3)) image = Image.fromarray(input) # image.save("out.jpg") # Make overlay image plane img = Image.new('RGBA', (CAMERA_WIDTH, CAMERA_HEIGHT), (255, 0, 0, 0)) draw = ImageDraw.Draw(img) # Run detection start_ms = time.time() results = engine.DetectWithImage(image, threshold=0.2, top_k=10) elapsed_ms = (time.time() - start_ms)*1000.0 if results: for obj in results: box = obj.bounding_box.flatten().tolist() box[0] *= CAMERA_WIDTH box[1] *= CAMERA_HEIGHT box[2] *= CAMERA_WIDTH box[3] *= CAMERA_HEIGHT # print(box) # print(labels[obj.label_id]) draw.rectangle(box, outline='red') draw.text((box[0], box[1]-10), labels[obj.label_id], font=fnt, fill="red") camera.annotate_text = "{0:.2f}ms".format(elapsed_ms) if not overlay_renderer: overlay_renderer = camera.add_overlay( img.tobytes(), size=(CAMERA_WIDTH, CAMERA_HEIGHT), layer=4, alpha=255) else: overlay_renderer.update(img.tobytes()) finally: if overlay_renderer: camera.remove_overlay(overlay_renderer) camera.stop_preview() if __name__ == "__main__": main()
[ "you@example.com" ]
you@example.com
124368ed9467c6666327662c5ff9d8beeeb3a9f4
cb82e798d1ea875e87d973d87602baa07166fb7b
/net/ssl/tls_ecdhe_rsa_with_aes_128_gcm_sha256/prf-frame139.py
618a7f98604c83ebcb46dd238066e8646a60e1c1
[]
no_license
rowanpang/noteGit
e9470be20bfdb04ac6b80c93f0f1cd3fd97ef565
120ca5329addf3a780b2299a0ab74de997b77785
refs/heads/master
2023-05-31T05:04:58.731953
2023-05-31T02:34:14
2023-05-31T02:34:14
52,506,290
1
0
null
2021-06-04T01:08:05
2016-02-25T07:41:49
C
UTF-8
Python
false
false
7,989
py
#!/usr/bin/python3 # -*- coding: utf-8 -*- import hashlib import hmac import unittest import os import binascii import sys from cryptography.hazmat.backends import default_backend from cryptography.hazmat.primitives.ciphers import ( Cipher, algorithms, modes ) def encrypt(key, plaintext, associated_data,iv): # Generate a random 96-bit IV. # iv = os.urandom(12) # Construct an AES-GCM Cipher object with the given key and a # randomly generated IV. encryptor = Cipher( algorithms.AES(key), modes.GCM(iv), backend=default_backend() ).encryptor() # associated_data will be authenticated but not encrypted, # it must also be passed in on decryption. encryptor.authenticate_additional_data(associated_data) # Encrypt the plaintext and get the associated ciphertext. # GCM does not require padding. ciphertext = encryptor.update(plaintext) + encryptor.finalize() return (iv, ciphertext, encryptor.tag) def decrypt(key, associated_data, iv, ciphertext, tag): # Construct a Cipher object, with the key, iv, and additionally the # GCM tag used for authenticating the message. decryptor = Cipher( algorithms.AES(key), modes.GCM(iv, tag), backend=default_backend() ).decryptor() # We put associated_data back in or the tag will fail to verify # when we finalize the decryptor. decryptor.authenticate_additional_data(associated_data) # Decryption gets us the authenticated plaintext. # If the tag does not match an InvalidTag exception will be raised. return decryptor.update(ciphertext) + decryptor.finalize() def TLSv1_0_PRF(outlen, secret, label, seed): ls = len(secret) ls1 = ls2 = (ls + 1) // 2 def xor(xx, yy): o = [] for i in range(len(xx)): o.append(xx[i] ^ yy[i]) return bytes(o) md5 = TLSv1_2_PRF(outlen, secret[:ls1], label, seed, hashlib.md5) sha1 = TLSv1_2_PRF(outlen, secret[-ls2:], label, seed, hashlib.sha1) return xor(md5, sha1) def TLSv1_2_PRF(outlen, secret, label, seed, h): label = bytes(label, 'ASCII') secret = bytes(secret) seed = bytes(seed) def p_hash(hashfn, outlen, k, pt): o = [] a_im = pt for i in range(0, outlen, hashfn().digest_size): a_i = hmac.new(k, a_im, hashfn).digest() output = hmac.new(k, a_i + pt, hashfn).digest() o.append(output) a_im = a_i return bytes(b''.join(o))[:outlen] return p_hash(h, outlen, secret, label + seed) def prfTest(): out = TLSv1_2_PRF(70, bytes('keyforhmac','ASCII'), 'msg-for-hmac-sha256', bytes('','ASCII'),hashlib.sha256) print(out.hex()) def test(): prfTest() # sys.exit() print('--------prf test ok----------') rdClihexStr = 'f77182ed908b500c8b1ad6ad8754329d63ad8704ae8901149727d7257bcf8878' #frame 132 rdSvrhexStr = '59604cc213be22157934682d82a9dbf4cba3f53cc10f6a89d4270bb87a4ebb8c' #frame 135 pre_master_secret_hexStr = '50891929d1f6b3507dfef2416057abb452116d5210c91a2d1c6b2ac4e9df23eeba718ac6b9bd5506479dd99b7585c983' pre_master_secret = bytes.fromhex(pre_master_secret_hexStr) #from ./firefox-sslkey.log is master_secret length = 48 if len(pre_master_secret) == length: #长度是48就认为是master key. firefox sslkeylog 文件中的就是master key master_secret = pre_master_secret else: seedhexStr = rdClihexStr + rdSvrhexStr secret = pre_master_secret label = "master secret" seed = bytes.fromhex(seedhexStr) master_secret = TLSv1_2_PRF(length, secret, label, seed, hashlib.sha256) print('master: ' + master_secret.hex()) key_block_secret = master_secret seedhexStr = rdSvrhexStr + rdClihexStr # TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 # sha256 so mac_key_len = 32 # aes_128_gcm so key_len = 16, iv_len = 16 maclen = 32 keylen = 16 ivlen = 4 length = (maclen + keylen + ivlen)*2 """gen key block""" secret = key_block_secret label = "key expansion" seed = bytes.fromhex(seedhexStr) key_block = TLSv1_2_PRF(length, secret, label, seed, hashlib.sha256) print('keyblock: ' + key_block.hex()) print('----split-----') maclen = 0 #for gcm not need mackey # 1 lenTmp = maclen start = 0 end = start + lenTmp client_write_mac_key = bytes(key_block[start:end]) print('cwmk: '+client_write_mac_key.hex()) # 2 start = end end = start + lenTmp server_write_mac_key = bytes(key_block[start:end]) print('swmk: '+server_write_mac_key.hex()) # 3 lenTmp = keylen start = end end = start + lenTmp client_write_key = bytes(key_block[start:end]) print(' cwk: '+client_write_key.hex()) # 4 start = end end = start + lenTmp server_write_key = bytes(key_block[start:end]) print(' swk: '+server_write_key.hex()) # 5 lenTmp = ivlen start = end end = start + lenTmp client_write_iv = bytes(key_block[start:end]) print(' cwi: '+client_write_iv.hex()) # 6 start = end end = start + lenTmp server_write_iv = bytes(key_block[start:end]) print(' swi: '+server_write_iv.hex()) plainhexStr = '1400000c4bb5c78b0c01d695180f5ea4' plaintext = binascii.unhexlify(plainhexStr) #from wireshark after import ./.firefox-sslkey.log # frame139 # Ciphertext[40]: # | 00 00 00 00 00 00 00 00 2b 83 0d 98 5e c2 81 6e |........+...^..n| # | bd 6d e8 92 bf d3 b4 08 da e0 2d ee a4 aa 98 f1 |.m........-.....| # | 8d 48 77 62 fd 72 24 a6 |.Hwb.r$. | # ssl_decrypt_record: allocating 72 bytes for decrypt data (old len 32) # Plaintext[32]: # | 14 00 00 0c 4b b5 c7 8b 0c 01 d6 95 18 0f 5e a4 |....K.........^.| # | a5 f0 cf 18 da 34 6b 5c f9 4b 0e 6b a2 15 f1 6e |.....4k\.K.k...n| # 1603030028 #record head # 0000000000000000 #explicit nonce # 2b830d985ec2816ebd6de892bfd3b408 #cip # dae02deea4aa98f18d487762fd7224a6 #tag nonceExplicithexStr = '0000000000000000' #8bytes nonceCounter = '' nonce = server_write_iv[:4] + binascii.unhexlify(nonceExplicithexStr) + bytes.fromhex(nonceCounter) print('non: ' + nonce.hex()) #ADDlen = 8seq + 1type + 2ver + 2len, seq_num = '0000000000000000' #need be lsb.finished record 为0. #8byts,一次tls会话中,key exchange等完成之后,finished record 的seqnum 为0. tlsCompressedType = '16' tlsCompressedVersion = '0303' tlsCompressedLength = '0010' #没有加密之前的compress length associateStr = seq_num + \ tlsCompressedType + \ tlsCompressedVersion + \ tlsCompressedLength associateData = binascii.unhexlify(associateStr) print('aso: ' + associateData.hex()) open('./plaintxt',"bw+").write(plaintext) open("./swk","bw+").write(server_write_key) open("./swi","bw+").write(server_write_iv) open("./associate","bw+").write(associateData) open("./nonce","bw+").write(nonce) iv, ciphertext, tag = encrypt( server_write_key, plaintext, associateData, nonce ) print(' iv: ' + iv.hex()) print('cip: ' + ciphertext.hex()) print('tag: ' + tag.hex()) print('-----decrypt-----') plaintext = decrypt( server_write_key, associateData, iv, ciphertext, tag ) print(b'plain: ' + binascii.hexlify(plaintext)) if __name__ == '__main__': # unittest.main() test()
[ "pangweizhen.2008@hotmail.com" ]
pangweizhen.2008@hotmail.com
3f0caf57cc2e796c4b731bb6d978430bedfcd7f9
0a473b06d45b4697b124859c21f11ca833da70b4
/chemprop_fda/features/morgan_fingerprint.py
908906f82f12c072955083133788ac0841179257
[ "MIT" ]
permissive
AayushGrover/ViscaNet
e0085f3549a35447d0ef497fb9ee25fe8a625b73
41786e10b84f2264b638567bdce1c189c1b66b00
refs/heads/main
2023-08-28T21:45:14.598705
2021-10-31T21:53:42
2021-10-31T21:53:42
286,003,354
1
0
null
null
null
null
UTF-8
Python
false
false
981
py
import numpy as np from rdkit import Chem, DataStructs from rdkit.Chem import AllChem def morgan_fingerprint(smiles: str, radius: int = 2, num_bits: int = 2048, use_counts: bool = False) -> np.ndarray: """ Generates a morgan fingerprint for a smiles string. :param smiles: A smiles string for a molecule. :param radius: The radius of the fingerprint. :param num_bits: The number of bits to use in the fingerprint. :param use_counts: Whether to use counts or just a bit vector for the fingerprint :return: A 1-D numpy array containing the morgan fingerprint. """ if type(smiles) == str: mol = Chem.MolFromSmiles(smiles) else: mol = smiles if use_counts: fp_vect = AllChem.GetHashedMorganFingerprint(mol, radius, nBits=num_bits) else: fp_vect = AllChem.GetMorganFingerprintAsBitVect(mol, radius, nBits=num_bits) fp = np.zeros((1,)) DataStructs.ConvertToNumpyArray(fp_vect, fp) return fp
[ "AAYUSH@staff-net-etx-1580.intern.ethz.ch" ]
AAYUSH@staff-net-etx-1580.intern.ethz.ch
32b6e1ea78845b7ff7ac88c40a72e2096200219f
eaaa1bbceb9a867e08769aae07c37c5e1107e430
/mitx_6.00.2x_ict_ds/week3/lstNone.py
fc381eea3af3ec7be312d603b0b86e04fe2eeb3d
[]
no_license
nwinds/jugar
c8f9ce11dd95c6e0dda22e22a69dd598b34d9d3c
db4d362d9d1dd04f2c6c013d4462de0c892b8314
refs/heads/master
2020-12-24T16:35:03.951580
2016-03-10T09:25:06
2016-03-10T09:25:06
41,397,534
0
0
null
null
null
null
UTF-8
Python
false
false
178
py
lst1 = [1,2,3] lst2 = [] lst3 = lst1+lst2 print('lst1'), print(lst1) print('lst2'), if len(lst2) > 0: print(lst2) else: print('len(lst2) == 0') print('lst3'), print(lst3)
[ "namingwinds@gmail.com" ]
namingwinds@gmail.com
12d5ee953deb6d64391cd697be86d87d92cfae26
aef8fe58f3c272e87dd166e3b9eb3ee4bc7e2d21
/To_do/toDoList/myList/forms.py
c81e8cef0fec422c82a8c7dad0648d564e02ef20
[]
no_license
Olga20011/Django-ToDoList
6c43bf83c4f79e38f63a5854d4f6653bf71c6e10
758797763135d5e6567ecdc83a30244934778682
refs/heads/master
2023-08-13T23:13:33.931349
2021-09-16T14:21:41
2021-09-16T14:21:41
392,290,709
0
0
null
null
null
null
UTF-8
Python
false
false
310
py
from django import forms from django.db.models import fields from django.forms import ModelForm from .models import * class TaskForm(forms.ModelForm): # title=forms.CharField(widget=forms.TextInput(attrs={'placeholder':'Add new task...'})) class Meta: model=MyTask fields ="__all__"
[ "olgaestherakello@gmail.com" ]
olgaestherakello@gmail.com
a76bbe862fc2f943b5866b00388228264612f33d
6d4af63e07a137d382ef61afe8276f7470b7af59
/wsgistate/__init__.py
742cd2a8b2a8e916a3427188ed7f1c260ff1b2b1
[]
no_license
Cromlech/wsgistate
142c7016c74fc28e6c56368f018bf113c379118c
d730ee47a4a43efbd20bcb9623e76bedeeb8c62b
refs/heads/master
2023-04-11T14:10:20.522520
2023-04-11T10:06:10
2023-04-11T10:06:10
15,806,829
0
0
null
null
null
null
UTF-8
Python
false
false
4,085
py
# Copyright (c) 2005 Allan Saddi <allan@saddi.com> # Copyright (c) 2005, the Lawrence Journal-World # Copyright (c) 2006 L. C. Rees # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of Django nor the names of its contributors may # be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. '''Base Cache class''' __all__ = ['BaseCache', 'db', 'file', 'memory', 'memcached', 'session', 'simple', 'cache'] def synchronized(func): '''Decorator to lock and unlock a method (Phillip J. Eby). @param func Method to decorate ''' def wrapper(self, *__args, **__kw): self._lock.acquire() try: return func(self, *__args, **__kw) finally: self._lock.release() wrapper.__name__ = func.__name__ wrapper.__dict__ = func.__dict__ wrapper.__doc__ = func.__doc__ return wrapper class BaseCache(object): '''Base Cache class.''' def __init__(self, *a, **kw): super(BaseCache, self).__init__() timeout = kw.get('timeout', 300) try: timeout = int(timeout) except (ValueError, TypeError): timeout = 300 self.timeout = timeout def __getitem__(self, key): '''Fetch a given key from the cache.''' return self.get(key) def __setitem__(self, key, value): '''Set a value in the cache. ''' self.set(key, value) def __delitem__(self, key): '''Delete a key from the cache.''' self.delete(key) def __contains__(self, key): '''Tell if a given key is in the cache.''' return self.get(key) is not None def get(self, key, default=None): '''Fetch a given key from the cache. If the key does not exist, return default, which itself defaults to None. @param key Keyword of item in cache. @param default Default value (default: None) ''' raise NotImplementedError() def set(self, key, value): '''Set a value in the cache. @param key Keyword of item in cache. @param value Value to be inserted in cache. ''' raise NotImplementedError() def delete(self, key): '''Delete a key from the cache, failing silently. @param key Keyword of item in cache. ''' raise NotImplementedError() def get_many(self, keys): '''Fetch a bunch of keys from the cache. Returns a dict mapping each key in keys to its value. If the given key is missing, it will be missing from the response dict. @param keys Keywords of items in cache. ''' d = dict() for k in keys: val = self.get(k) if val is not None: d[k] = val return d
[ "trollfot@gmail.com" ]
trollfot@gmail.com
f153b13f654cbb1a501907cba37eddc072dc7fb0
edb69ef057593343c86bfc08024422cd8292207f
/users/views.py
cb18f70307655a0b4d01550adccdde99c04a2aec
[]
no_license
MVNDAY/LearningLog
27005b3a411bdabaa56c23258d893e06f09caa0d
1d4436fdea5610b0e63cdb55ebbd2d112115d74a
refs/heads/master
2023-04-13T10:22:05.160792
2021-04-26T17:46:26
2021-04-26T17:46:26
361,832,448
0
0
null
null
null
null
UTF-8
Python
false
false
817
py
from django.shortcuts import render from django.http import HttpResponseRedirect from django.urls import reverse from django.contrib.auth import login, logout, authenticate from django.contrib.auth.forms import UserCreationForm # Create your views here. def logout_view(request): logout(request) return HttpResponseRedirect(reverse('learning_logs:index')) def register(request): if request.method != 'POST': form = UserCreationForm() else: form = UserCreationForm(data=request.POST) if form.is_valid(): new_user = form.save() authentificated_user = authenticate(username=new_user.username, password=request.POST['password1']) login(request,authentificated_user) return HttpResponseRedirect(reverse('learning_logs:index')) context = {'form' : form} return render(request, 'register.html', context)
[ "83238300+MVNDAY@users.noreply.github.com" ]
83238300+MVNDAY@users.noreply.github.com
c27f4477eaa529faa393007fcdb5b9fda759771e
b5865b795c4e743cca80a6e0ea480ecc0d0a35fd
/My_Second_Project/Login_app/admin.py
fc666635eae6f732ecd40552bd516ac51555026b
[]
no_license
kazi-akib-abdullah/django-deployment
384ee355efacf58a66c66b4d768b86750c70d98e
c2ef330018b71fbb42c3ee9a5a0fba78d1d33473
refs/heads/master
2023-02-26T03:01:39.140332
2021-01-30T16:42:06
2021-01-30T16:42:06
334,459,092
0
0
null
null
null
null
UTF-8
Python
false
false
178
py
from My_Second_Project.settings import USE_I18N from django.contrib import admin from Login_app.models import UserInfo # Register your models here. admin.site.register(UserInfo)
[ "45953236+kazi-akib-abdullah@users.noreply.github.com" ]
45953236+kazi-akib-abdullah@users.noreply.github.com
f837708b75c33e3c2f20e04b15c464ef277c72b6
6c919bb579dd639d53f097d4b8b1b6f2bb830efb
/testfiles/interface_test.py
67d10a6aac8edb7aa6be430cf991c4d6067d7f37
[]
no_license
aj132608/SpicyGlass
b258f0282e713d555489c4ab106c9008f6965f31
8b1d4f5ccf6f8ed4c0b9a65f042af505ab852c31
refs/heads/master
2021-01-14T01:45:18.047779
2020-03-09T18:48:46
2020-03-09T18:48:46
242,560,382
0
0
null
2020-03-07T00:43:38
2020-02-23T17:29:42
Python
UTF-8
Python
false
false
1,149
py
from firebaseinterface.firebase_interface import FirebaseInterface import json if __name__ == "__main__": with open('creds.json') as file: creds_dict = json.load(file) interface_obj = FirebaseInterface(creds_dict=creds_dict) # Fetch the current database held locally as a dictionary current_database = interface_obj.get_database_dict() print(f"database: {current_database}") # Perform a GET request to retrieve a dictionary from the # database itself current_database = interface_obj.get_data(key='') print(f"database: {current_database}") # Get the value of a key car_on = interface_obj.get_data(key='car-on') print(f"car-on: {car_on}") # Get a nested value using subkey front_defrost = interface_obj.get_data(key='defrost', subkey='front') print(f"front defrost: {front_defrost}") # response = interface_obj.change_value(key='car-on', val=True) # # print(f"PUT request response: {response}") # # current_database = interface_obj.get_database_dict() # # print(f"new database: {current_database}")
[ "32584157+aj132608@users.noreply.github.com" ]
32584157+aj132608@users.noreply.github.com
3c86e7fd1292b6f7b57fbcee485151b7beb95814
7310067f47b4626d7afe71ccac591febe3441b1f
/knn.py
5803d62dd80406b76d0ed7834ee6004fd47ba612
[]
no_license
anisrini/KNN
1b0f13e5cacae6834dead55d84b2000590d69814
596f2ba78b66b4e902f11c512d760ace65422775
refs/heads/master
2016-08-12T04:38:55.625423
2016-04-01T09:08:13
2016-04-01T09:08:13
55,217,302
0
0
null
null
null
null
UTF-8
Python
false
false
2,444
py
import pandas as pd import helpers import cdist from sklearn.metrics import accuracy_score from sklearn.metrics import confusion_matrix from sklearn.cross_validation import StratifiedKFold from sklearn.cross_validation import LeaveOneOut from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA def knn(trains, test, kvals, loo_flag): print "Classification" for k in range(len(kvals)): print "K: \n", kvals[k] knn = cdist.KNNClassifier(kvals[k]) for i in range(len(trains)): for j in range(len(trains[i])): print "2-Fold" print "i, j ", i, j print skf = StratifiedKFold(trains[i][j]['labels'], n_folds=2) for train_index, test_index in skf: fold_train = trains[i][j].ix[train_index] fold_test = trains[i][j].ix[test_index] fold_train.index = range(len(fold_train)) fold_test.index = range(len(fold_test)) knn.train(fold_train.iloc[:,:-1], fold_train['labels']) print "Accuracy: ", accuracy_score(fold_test['labels'], knn.test(fold_test.iloc[:,:-1])) print "Confusion Matrix: \n", confusion_matrix(fold_test['labels'], knn.test(fold_test.iloc[:,:-1])) print print "5-Fold" print "i, j ", i, j print skf = StratifiedKFold(trains[i][j]['labels'], n_folds=5) for train_index, test_index in skf: fold_train = trains[i][j].ix[train_index] fold_test = trains[i][j].ix[test_index] fold_train.index = range(len(fold_train)) fold_test.index = range(len(fold_test)) knn.train(fold_train.iloc[:,:-1], fold_train['labels']) print "Accuracy: ", accuracy_score(fold_test['labels'], knn.test(fold_test.iloc[:,:-1])) print "Confusion Matrix: \n", confusion_matrix(fold_test['labels'], knn.test(fold_test.iloc[:,:-1])) print if loo_flag == 1: print "Leave One Out: " print "i, j ", i, j print loo = LeaveOneOut(len(trains[i][j].iloc[:,:-1])) for train_index, test_index in loo: fold_train = trains[i][j].ix[train_index] fold_test = trains[i][j].ix[test_index] fold_train.index = range(len(fold_train)) fold_test.index = range(len(fold_test)) knn.train(fold_train.iloc[:,:-1], fold_train['labels']) print "Accuracy: ", accuracy_score(fold_test['labels'], knn.test(fold_test.iloc[:,:-1])) print "Confusion Matrix: \n", confusion_matrix(fold_test['labels'], knn.test(fold_test.iloc[:,:-1])) print print
[ "anisrini93@gmail.com" ]
anisrini93@gmail.com
cecf868f2be1629e1555d5d8b1f75d9ff586a4ce
0f33457b2fead035730e86176092fe5abe532f51
/XOR EQUALITY Code Shef/xor_bitwise.py
4bcffdcbf523f72a4bd1af3310e94be558160e7e
[]
no_license
tb123-coder/May-Long-Challenge-Codechef-
dbb0cd30e0862cebeef6b79de9a20e7f5cb98e0a
bf710b287bd12638d0dc7308367259b57af9f7ef
refs/heads/master
2023-04-28T05:28:19.517262
2021-05-13T12:32:18
2021-05-13T12:32:18
367,038,309
0
0
null
null
null
null
UTF-8
Python
false
false
428
py
def power(x, y, p): res = 1 x = x % p if (x == 0): return 0 while (y > 0): # If y is odd, multiply # x with result #if((y%2==1) if((y & 1)==1): res=(res*x)%p #y=y/2 y=y>>1 #y must be even x=(x*x)%p return res T = int(input()) while (T): T -= 1 N = int(input()) p=10**9+7 ans=power(2,N-1,p) print(ans)
[ "batra.tushar12dec1999@gmail.com" ]
batra.tushar12dec1999@gmail.com
17bc87c11112d5f9e4a92ca75a101c66480bc5b1
34284fd6cd6c97bad8d1fa422e9279600d1218a7
/labs/lab6.py
14d6e6bfc0fe5bfe06087b9ea12d415c2f280f08
[]
no_license
Yui-Ezic/Numerical-Methods
1c2ac92fdf9bb75924e9ac14bac7c5033e634436
da2ba5a3f2a17a947d1240a2a3154b70d9d8c916
refs/heads/master
2020-04-14T13:40:06.138283
2019-01-02T18:27:14
2019-01-02T18:27:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,400
py
import numpy as np import math import matplotlib.pyplot as plt def my_function(x): return x * x + math.sin(x) # Границы функции a1 = 1 b1 = 3 # Шаг H = (b1 - a1) / 10 # Количество точек m = 11 arrSize = m # Таблица значений функций Xt = np.zeros(arrSize) Yt = np.zeros(arrSize) # Создание таблицы for i in range(arrSize): Xt[i] = a1 + i*H Yt[i] = my_function(Xt[i]) n = 4 a = np.zeros(n + 1) aFlag = np.zeros(n + 1) for i in range(m): a[0] += Yt[i] a[0] /= m aFlag[0] = 1 b = np.zeros(n + 1) bFlag = np.zeros(n + 1) bFlag[0] = 1 def g(j, x): if j == -1: return 0 if j == 0: return 1 if aFlag[j] == 0: c = 0 d = 0 for i in range(m): f = g(j - 1, Xt[i]) f_2 = f * f c += Xt[i] * f_2 d += f_2 a[j] = c / d aFlag[j] = 1 c = 0 d = 0 for i in range(m): f = g(j, Xt[i]) f_2 = f * f c += f * Yt[i] d += f_2 a[j] = c / d if bFlag[j - 1] == 0: c = 0 d = 0 for i in range(m): f = g(j - 1, Xt[i]) f1 = g(j - 2, Xt[i]) c += Xt[i] * f * f1 d += f1 * f1 b[j - 1] = c / d bFlag[j - 1] = 1 return (x - a[j]) * g(j-1, x) - b[j - 1] * g(j - 2, x) def calculate_polynomial(x, n): result = 0 for i in range(n + 1): result += a[i] * g(i, x) return result # для графика основной функции h1 = (b1 - a1) / 20 H2 = 2*H start = a1 - H2 end = b1 + H2 xlist = [] ylist1 = [] while start <= end: f = my_function(start) xlist.append(start) ylist1.append(f) start += h1 plt.subplot(2, 1, 1) plt.plot(xlist, ylist1, 'k', label='f(x)') for i in range(n + 1): #Выводим таблицу print("For n = {0}".format(i)) print("----------------------------------------------------------------------") print("| | | | | f(xj) - Pn(xj) |") print("| xj | f(xj) | Pn(xj) | f(xj) - Pn(xj) | -------------- * 100 |") print("| | | | | Pn(xj) |") print("----------------------------------------------------------------------") start = a1 - H2 ylist2 = [] while start <= end: f = my_function(start) p = calculate_polynomial(start, i) ylist2.append(p) print("|{0:5.2f} | {1:8.3f} | {2:8.3f} | {3:14.9f} | {4:21.16f}|".format(start, f, p, p - f, (p-f) * 100 / f)) start += h1 plt.plot(xlist, ylist2, '--', label='P{0}(x)'.format(i)) print("----------------------------------------------------------------------\n") plt.xlabel(r'$x$') plt.ylabel(r'$y$') plt.title(r'$y = f(x), y = Pn(x)$') plt.legend(loc='best', ncol=2) # Additional task plt.subplot(2, 1, 2) plt.xlabel(r'$x$') plt.ylabel(r'$y$') plt.title(r'$y = f(x), y = Pn(x), y = f\'(x), y = P\'(x)$') plt.plot(xlist, ylist1, label='f(x)') ylist = [calculate_polynomial(x, 3) for x in xlist] plt.plot(xlist, ylist, '--', label='P3(x)') ylist = [2 * x * x - 12 * x + 11.288 for x in xlist] plt.plot(xlist, ylist, ':', label='P3\'(x)') ylist = [2*x + math.cos(x) for x in xlist] plt.plot(xlist, ylist, label='f\'(x)') plt.legend() plt.show()
[ "mishan221199@gmail.com" ]
mishan221199@gmail.com
a658a0212b71fb6327314f0662b6143017559bc1
df2cbe914f463ad050d7ed26194424afbe3a0a52
/addons/snailmail/models/mail_notification.py
a368c0a778338b68f037181c93c3d78bffc3f691
[ "Apache-2.0" ]
permissive
SHIVJITH/Odoo_Machine_Test
019ed339e995be980606a2d87a63312ddc18e706
310497a9872db7844b521e6dab5f7a9f61d365a4
refs/heads/main
2023-07-16T16:23:14.300656
2021-08-29T11:48:36
2021-08-29T11:48:36
401,010,175
0
0
Apache-2.0
2021-08-29T10:13:58
2021-08-29T10:13:58
null
UTF-8
Python
false
false
719
py
# -*- coding: utf-8 -*- from odoo import fields, models class Notification(models.Model): _inherit = 'mail.notification' notification_type = fields.Selection(selection_add=[('snail', 'Snailmail')], ondelete={'snail': 'cascade'}) letter_id = fields.Many2one('snailmail.letter', string="Snailmail Letter", index=True, ondelete='cascade') failure_type = fields.Selection(selection_add=[ ('sn_credit', "Snailmail Credit Error"), ('sn_trial', "Snailmail Trial Error"), ('sn_price', "Snailmail No Price Available"), ('sn_fields', "Snailmail Missing Required Fields"), ('sn_format', "Snailmail Format Error"), ('sn_error', "Snailmail Unknown Error"), ])
[ "36736117+SHIVJITH@users.noreply.github.com" ]
36736117+SHIVJITH@users.noreply.github.com
d830aecde642375024c94874c404b42184b66447
3f382f9edd21be130dafe26e79ee1081b9626673
/movieClasses/Inheritance.py
3b6b6971025ac1a642042079dca57f25c35739ed
[]
no_license
ceewick/introClasses
090d41a5ba9e151ca136ee8d23f9fbc4d9a5f7d7
3acc7bac15bc3bcaebfa39b438e8033ec900d5ec
refs/heads/master
2020-03-15T15:36:33.511733
2018-05-06T19:51:36
2018-05-06T19:51:36
132,216,033
0
0
null
null
null
null
UTF-8
Python
false
false
678
py
class Parent(): def __init__(self, last_name, eye_color): print('Parent Constructor Called') self.last_name = last_name self.eye_color = eye_color class Child(Parent): def __init__(self,last_name, eye_color, number_of_toys): print('Child Constructor Called') Parent.__init__(self, last_name, eye_color) self.number_of_toys = number_of_toys ## Typically definition of class and instance in seperate files, but for demonstation.. here #billy_cyrus = Parent('Cyrus','blue') #print(billy_cyrus.last_name) miley_cyrus = Child('Cyrus','Blue',5) print(miley_cyrus.last_name) print(miley_cyrus.number_of_toys)
[ "noreply@github.com" ]
ceewick.noreply@github.com
e509256c393ec76ad4d8aa28753b3613e2457c80
84fcfed46e03d4936f1a5d82624fd43a4f415d72
/Client.py
719ea7f6319f406dac03306172f4f54dc2038d55
[]
no_license
Shruti-Pattajoshi/Computer-Networks-Socket-Programming-
9a60fa750adf7bee98d64cbe95de22a67e93ccb6
8daf2051fb412e555952cdde0dd0e3554c2231be
refs/heads/master
2022-06-20T04:06:32.277348
2020-05-14T07:03:52
2020-05-14T07:03:52
263,840,098
0
0
null
null
null
null
UTF-8
Python
false
false
258
py
# client.py import socket sp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) host = socket.gethostname() port = 13 sp.connect((host, port)) msg = sp.recv(1024) sp.close() print("The time recieved from the server is: %s" % msg.decode('ascii'))
[ "noreply@github.com" ]
Shruti-Pattajoshi.noreply@github.com
de8b449316abbe86696e3641635d94af6d290c5d
8acffb8c4ddca5bfef910e58d3faa0e4de83fce8
/ml-flask/Lib/site-packages/caffe2/python/operator_test/stats_put_ops_test.py
2ce56248c5dd0116931f91de9b4b556dd881e73b
[ "MIT" ]
permissive
YaminiHP/SimilitudeApp
8cbde52caec3c19d5fa73508fc005f38f79b8418
005c59894d8788c97be16ec420c0a43aaec99b80
refs/heads/master
2023-06-27T00:03:00.404080
2021-07-25T17:51:27
2021-07-25T17:51:27
389,390,951
0
0
null
null
null
null
UTF-8
Python
false
false
129
py
version https://git-lfs.github.com/spec/v1 oid sha256:86a74bb87f96bd8ebf2fa9ae72729c5cbe121a32edc1fb034496e084703631b3 size 6596
[ "yamprakash130@gmail.com" ]
yamprakash130@gmail.com
a35e6a756f615aca80c4b91a8b264a5aa0cd6d0e
9cd00edd008ce38ea3127f090b6867a91fe7193d
/src/plot_Qle_at_all_events_above_Tthreh.py
382993ac07bd63823ff8cd12124f714a8056199b
[]
no_license
shaoxiuma/heatwave_coupling
c5a2a2bba53351597f4cb60ecb446bfb9629812f
459f6bc72402b5dd3edf49bc3b9be380b5f54705
refs/heads/master
2021-09-13T06:50:48.733659
2018-04-26T06:09:54
2018-04-26T06:09:54
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,338
py
#!/usr/bin/env python """ For each of the OzFlux/FLUXNET2015 sites, plot the TXx and T-4 days Qle and bowen ratio That's all folks. """ __author__ = "Martin De Kauwe" __version__ = "1.0 (20.04.2018)" __email__ = "mdekauwe@gmail.com" import os import sys import glob import netCDF4 as nc import numpy as np import xarray as xr import matplotlib.pyplot as plt import pandas as pd import re import constants as c def main(fname): plot_dir = "plots" if not os.path.exists(plot_dir): os.makedirs(plot_dir) df = pd.read_csv(fname) df = df[df.pft == "EBF"] df = df[~np.isnan(df.temp)] #width = 12.0 #height = width / 1.618 #print(width, height) #sys.exit() width = 14 height = 10 fig = plt.figure(figsize=(width, height)) fig.subplots_adjust(hspace=0.05) fig.subplots_adjust(wspace=0.05) plt.rcParams['text.usetex'] = False plt.rcParams['font.family'] = "sans-serif" plt.rcParams['font.sans-serif'] = "Helvetica" plt.rcParams['axes.labelsize'] = 14 plt.rcParams['font.size'] = 14 plt.rcParams['legend.fontsize'] = 10 plt.rcParams['xtick.labelsize'] = 14 plt.rcParams['ytick.labelsize'] = 14 count = 0 sites = np.unique(df.site) for site in sites: site_name = re.sub(r"(\w)([A-Z])", r"\1 \2", site) ax = fig.add_subplot(3,3,1+count) df_site = df[df.site == site] events = int(len(df_site)/4) cnt = 0 for e in range(0, events): from scipy import stats x = df_site["temp"][cnt:cnt+4] y = df_site["Qle"][cnt:cnt+4] slope, intercept, r_value, p_value, std_err = stats.linregress(x,y) if slope > 0.0 and p_value <= 0.05: ax.plot(df_site["temp"][cnt:cnt+4], df_site["Qle"][cnt:cnt+4], label=site, ls="-", marker="o", zorder=100) elif slope > 0.0 and p_value > 0.05: ax.plot(df_site["temp"][cnt:cnt+4], df_site["Qle"][cnt:cnt+4], label=site, ls="-", marker="o", color="lightgrey", zorder=1) cnt += 4 if count == 0: ax.set_ylabel("Qle (W m$^{-2}$)", position=(0.5, 0.0)) if count == 4: #ax.set_xlabel('Temperature ($^\circ$C)', position=(1.0, 0.5)) ax.set_xlabel('Temperature ($^\circ$C)') if count < 3: plt.setp(ax.get_xticklabels(), visible=False) if count != 0 and count != 3: plt.setp(ax.get_yticklabels(), visible=False) props = dict(boxstyle='round', facecolor='white', alpha=1.0, ec="white") ax.text(0.04, 0.95, site_name, transform=ax.transAxes, fontsize=14, verticalalignment='top', bbox=props) from matplotlib.ticker import MaxNLocator ax.yaxis.set_major_locator(MaxNLocator(4)) ax.set_ylim(0, 280) ax.set_xlim(15, 50) count += 1 ofdir = "/Users/mdekauwe/Dropbox/fluxnet_heatwaves_paper/figures/figs" fig.savefig(os.path.join(ofdir, "all_events.pdf"), bbox_inches='tight', pad_inches=0.1) #plt.show() if __name__ == "__main__": data_dir = "outputs/" fname = "ozflux_all_events.csv" fname = os.path.join(data_dir, fname) main(fname)
[ "mdekauwe@gmail.com" ]
mdekauwe@gmail.com
69aee45e204f7d1a92722373c63d4388faf5d6c9
d3542f9f10ecc1c8c2eefe954a386dbbb584cf3b
/pykl/kit/models.py
b87fd808ea27f116e2c5feb7e8de0737d3b99f86
[ "MIT" ]
permissive
wowngasb/pykl
7322fa36c0529526273486ea7ffc52108b78cd6f
872f97dfaab92cce309078940d1273cf26daed37
refs/heads/master
2023-06-01T08:19:55.962876
2023-05-19T07:36:52
2023-05-19T07:36:52
101,482,272
0
0
MIT
2023-01-11T23:45:19
2017-08-26T11:43:49
Python
UTF-8
Python
false
false
14,637
py
# coding: utf-8 import random import time import hashlib from inspect import isclass from git import Repo as GitRepo from sqlalchemy.inspection import inspect as sqlalchemyinspect from sqlalchemy.ext.declarative import declarative_base from pykl.tiny.grapheneinfo import ( _is_graphql, _is_graphql_cls, _is_graphql_mutation ) from pykl.tiny.codegen.utils import ( name_from_repr, camel_to_underline, underline_to_camel, ) from base_type import * from cmd import db, app Base = db.Model class MigrateVersion(Base): u"""table migrate_version""" __tablename__ = 'migrate_version' repository_id = Column(String(191), primary_key=True, doc=u"""field repository_id""", info=CustomField | SortableField) repository_path = Column(Text, doc=u"""field repository_path""", info=CustomField | SortableField) version = Column(Integer, doc=u"""field version""", info=CustomField | SortableField) class Actor(Base): u"""table kit_actor""" __tablename__ = 'kit_actor' actor_id = Column(Integer, primary_key=True, doc=u"""对应 actor_id""", info=SortableField | InitializeField) actor_name = Column(String(64), doc=u"""field actor_name""", info=CustomField | SortableField) actor_email = Column(String(64), doc=u"""field actor_email""", info=CustomField | SortableField) class Blob(Base): u"""table kit_blob""" __tablename__ = 'kit_blob' blob_id = Column(Integer, primary_key=True, doc=u"""对应 blob_id""", info=SortableField | InitializeField) blob_path = Column(String(64), doc=u"""field blob_path""", info=CustomField | SortableField) blob_hash = Column(String(40), doc=u"""field blob_hash""", info=CustomField | SortableField) blob_mode = Column(Integer, doc=u"""field blob_mode""", info=CustomField | SortableField) blob_size = Column(Integer, doc=u"""field blob_size""", info=CustomField | SortableField) class Tree(Base): u"""table kit_tree""" __tablename__ = 'kit_tree' tree_id = Column(Integer, primary_key=True, doc=u"""对应 tree_id""", info=SortableField | InitializeField) tree_path = Column(String(64), doc=u"""field tree_path""", info=CustomField | SortableField) tree_hash = Column(String(40), doc=u"""field tree_hash""", info=CustomField | SortableField) tree_mode = Column(Integer, doc=u"""field tree_mode""", info=CustomField | SortableField) tree_size = Column(Integer, doc=u"""field tree_size""", info=CustomField | SortableField) @classmethod def info(cls): class Tree(SQLAlchemyObjectType): class Meta: model = cls trees = List(lambda :cls, description=u'trees') def resolve_trees(self, args, context, info): return [_Tree(tree) for tree in self._tree.trees] blobs = List(lambda :Blob, description=u'blobs') def resolve_blobs(self, args, context, info): return [_Blob(blob) for blob in self._tree.blobs] blobfile = Field(lambda :Blob, description=u'对应 blob', path=g.Argument(g.String, default_value="", description=u'input you file name') ) def resolve_blobfile(self, args, context, info): path = args.get('path', '') return search_blobfile(self._tree, path) treedir = Field(lambda :Tree, description=u'对应 blob', path=g.Argument(g.String, default_value="", description=u'input you file name') ) def resolve_treedir(self, args, context, info): path = args.get('path', '') return search_treedir(self._tree, path) return Tree class Commit(Base): u"""table kit_commit""" __tablename__ = 'kit_commit' commit_id = Column(Integer, primary_key=True, doc=u"""对应 commit_id""", info=SortableField | InitializeField) commit_hash = Column(String(40), doc=u"""field commit_hash""", info=CustomField | SortableField) commit_message = Column(String(191), doc=u"""field repo_path""", info=CustomField | SortableField) committed_date = Column(Integer, doc=u"""field repo_path""", info=CustomField | SortableField) @classmethod def info(cls): class Commit(SQLAlchemyObjectType): class Meta: model = cls author = Field(lambda :Actor, description=u'对应 author') def resolve_author(self, args, context, info): author = self._commit.author return _Actor(author) committer = Field(lambda :Actor, description=u'对应 committer') def resolve_committer(self, args, context, info): committer = self._commit.committer return _Actor(committer) parents = List(lambda :cls, description=u'parents commits') def resolve_parents(self, args, context, info): return [_Commit(commit) for commit in self._commit.parents] tree = Field(lambda :Tree, description=u'对应 tree') def resolve_tree(self, args, context, info): tree = self._commit.tree return _Tree(tree) blobfile = Field(lambda :Blob, description=u'对应 blob', path=g.Argument(g.String, default_value="", description=u'input you file name') ) def resolve_blobfile(self, args, context, info): path = args.get('path', '') return search_blobfile(self._commit.tree, path) treedir = Field(lambda :Tree, description=u'对应 blob', path=g.Argument(g.String, default_value="", description=u'input you file name') ) def resolve_treedir(self, args, context, info): path = args.get('path', '') return search_treedir(self._commit.tree, path) return Commit class Ref(Base): u"""table kit_ref""" __tablename__ = 'kit_ref' ref_id = Column(Integer, primary_key=True, doc=u"""对应 repo_id""", info=SortableField | InitializeField) ref_path = Column(String(191), doc=u"""field repo_path""", info=CustomField | SortableField) ref_name = Column(String(191), doc=u"""field repo_path""", info=CustomField | SortableField) @classmethod def info(cls): class Ref(SQLAlchemyObjectType): class Meta: model = cls commit = Field(lambda :Commit, description=u'对应 commit') def resolve_commit(self, args, context, info): commit = self._ref.commit return _Commit(commit) blobfile = Field(lambda :Blob, description=u'对应 blob', path=g.Argument(g.String, default_value="", description=u'input you file name') ) def resolve_blobfile(self, args, context, info): path = args.get('path', '') return search_blobfile(self._ref.commit.tree, path) treedir = Field(lambda :Tree, description=u'对应 blob', path=g.Argument(g.String, default_value="", description=u'input you file name') ) def resolve_treedir(self, args, context, info): path = args.get('path', '') return search_treedir(self._ref.commit.tree, path) commits = List(lambda :Commit, description=u'往前推算 commits', max_count=g.Argument(g.Int, description=u'input max_count') ) def resolve_commits(self, args, context, info): max_count = args.get('max_count', 10) if max_count <= 0: return [] return [_Commit(commit) for commit in self._ref.repo.iter_commits(self._ref.name, max_count=max_count)] return Ref class Repo(Base): u"""table kit_repo""" __tablename__ = 'kit_repo' repo_id = Column(Integer, primary_key=True, doc=u"""对应 repo_id""", info=SortableField | InitializeField) repo_path = Column(String(191), doc=u"""field repo_path""", info=CustomField | SortableField) @classmethod def info(cls): class Repo(SQLAlchemyObjectType): class Meta: model = cls head = Field(lambda :Ref, description=u'查找 引用', name=g.Argument(g.String, default_value="master", description=u'input you name') ) def resolve_head(self, args, context, info): name = args.get('name', '') if not name: return None ref = self._repo.heads[name] return _Ref(ref) heads = List(lambda :Ref, description=u'引用') def resolve_heads(self, args, context, info): return [_Ref(ref) for ref in self._repo.heads] master = Field(lambda :Ref, description=u'master 引用') def resolve_master(self, args, context, info): ref = self._repo.heads.master return _Ref(ref) tag = Field(lambda :Ref, description=u'查找 tag', name=g.Argument(g.String, description=u'input you tag') ) def resolve_tag(self, args, context, info): name = args.get('name', '') if not name: return None ref = self._repo.tags[name] return _Ref(ref) tags = List(lambda :Ref, description=u'tag') def resolve_tags(self, args, context, info): return [_Ref(ref) for ref in self._repo.tags] return Repo def search_blobfile(_tree, path): if not path: return None def _resolve_blobfile(blobs, trees): for blob in blobs: if path == blob.path: return _Blob(blob) for tree in trees: ret = _resolve_blobfile(tree.blobs, tree.trees) if path.startswith(tree.path) else None if ret: return ret return _resolve_blobfile(_tree.blobs, _tree.trees) def search_treedir(_tree, path): if not path: return None def _resolve_treedir(trees): for tree in trees: if path == tree.path: return _Tree(tree) for tree in trees: ret = _resolve_treedir(tree.trees) if path.startswith(tree.path) else None if ret: return ret return _resolve_treedir(_tree.trees) def _Actor(actor, actor_id=0): obj = Actor(actor_id=actor_id, actor_name=actor.name, actor_email=actor.email) obj._actor = actor return obj def _Blob(blob, blob_id=0): obj = Blob(blob_id=0, blob_path=blob.path, blob_hash=blob.hexsha, blob_mode=blob.mode, blob_size=blob.size) obj._blob = blob return obj def _Tree(tree, tree_id=0): obj = Tree(tree_id=tree_id, tree_path=tree.path, tree_hash=tree.hexsha, tree_mode=tree.mode, tree_size=tree.size) obj._tree = tree return obj def _Commit(commit, commit_id=0): obj = Commit(commit_id=commit_id, commit_hash=commit.hexsha, commit_message=commit.message, committed_date=commit.committed_date) obj._commit = commit return obj def _Ref(ref, ref_id=0): obj = Ref(ref_id=ref_id, ref_name=ref.name, ref_path=ref.path) obj._ref = ref return obj def _Repo(repo, repo_id=0): obj = Repo(repo_id=repo_id, repo_path=repo.working_dir) obj._repo = repo return obj ############################################################## ################### 根查询 Query ###################### ############################################################## class Query(g.ObjectType): hello = g.String(name=g.Argument(g.String, default_value="world", description=u'input you name')) deprecatedField = Field(g.String, deprecation_reason = 'This field is deprecated!') fieldWithException = g.String() migrateVersion = Field(MigrateVersion, description=u'migrate_version') repo = Field(Repo, description=u'load repo by path', repo_path=g.Argument(g.String, description=u'input repo path'), ) def resolve_repo(self, args, context, info): repo_path = args.get('repo_path', '') repo = GitRepo(repo_path) return _Repo(repo) curRepo = Field(Repo, description=u'this repo') def resolve_curRepo(self, args, context, info): repo = app.config.get('REPO') return _Repo(repo) def resolve_hello(self, args, context, info): return 'Hello, %s!' % (args.get('name', ''), ) def resolve_deprecatedField(self, args, context, info): return 'You can request deprecated field, but it is not displayed in auto-generated documentation by default.' def resolve_fieldWithException(self, args, context, info): raise ValueError('Exception message thrown in field resolver') def resolve_migrateVersion(self, args, context, info): return MigrateVersion.query.first() ############################################################## ################### Mutations ###################### ############################################################## def build_input(dao, bit_mask): return {k: BuildArgument(v) for k, v in mask_field(dao, bit_mask).items()} class CreateMigrateVersion(g.Mutation): Input = type('Input', (), build_input(MigrateVersion, InitializeField)) ok = g.Boolean() msg = g.String() migrateVersion = Field(MigrateVersion) @staticmethod def mutate(root, args, context, info): return CreateMigrateVersion(ok=True, msg='suc', migrateVersion=MigrateVersion.query.first()) class UpdateMigrateVersion(g.Mutation): Input = type('Input', (), build_input(MigrateVersion, EditableField)) ok = g.Boolean() msg = g.String() migrateVersion = Field(MigrateVersion) @staticmethod def mutate(root, args, context, info): return UpdateMigrateVersion(ok=True, msg='suc', migrateVersion=MigrateVersion.query.first()) ############################################################## ################### 根查询 Mutations ###################### ############################################################## Mutations = type('Mutations', (g.ObjectType, ), {camel_to_underline(name_from_repr(v)):v.Field() for _, v in globals().items() if _is_graphql_mutation(v)}) tables = [tbl if BuildType(tbl) else tbl for _, tbl in globals().items() if isclass(tbl) and issubclass(tbl, Base) and tbl != Base] schema = g.Schema(query=Query, mutation=Mutations, types=[BuildType(tbl) for tbl in tables] + [cls for _, cls in globals().items() if _is_graphql_cls(cls)], auto_camelcase = False)
[ "wuyou7410@gmail.com" ]
wuyou7410@gmail.com
74b32b4c2a6853ebc774d3f547b249081b9289cd
4ee9de394b2650d7cca19b2848100e8db9edc596
/solution-python/001_two_sum.py
0fab498f08572f102087ec4c7aa040b784330840
[]
no_license
gongfuPanada/notes-leetcode
b99d5fd18a14558c1cbeb1538fd8bddbfdeb3fdd
0bc611efcee8286c07fd5af1257a27a2575363b0
refs/heads/master
2020-12-29T00:59:00.512133
2016-04-24T12:54:22
2016-04-24T12:54:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
444
py
""" 001. Two Sum @name: li jin @date: Feb 19, 2016 @link: https://leetcode.com/problems/two-sum/ @time: 56 ms """ class Solution(object): def twoSum(self, nums, target): """ :type nums: List[int] :type target: int :rtype: List[int] """ d = {} for i, n in enumerate(nums): if n in d: return [d[n], i] d[target - n] = i
[ "lijinwithyou@gmail.com" ]
lijinwithyou@gmail.com
65ea64bcdab906276d4068b3999cba770c961f77
14f078c2f88ac656b56ee8a9a39eaf0135de4704
/VideoxD/__main__.py
c4fa566bb02076067104650f3c8fd3e7a30230d2
[ "MIT" ]
permissive
Saksham07529/VideoChatStreamBot
dd39a7d1e16f21995893184a9e394659054073b2
72dd8f24d38494a683606118f34154177a643fb7
refs/heads/main
2023-08-17T17:05:56.326866
2021-09-14T06:02:06
2021-09-14T06:02:06
null
0
0
null
null
null
null
UTF-8
Python
false
false
850
py
import asyncio from misc import Calls, app, bot from pyrogram import idle async def init(): await app.start() print("𝙐𝙨𝙚𝙧 𝙖𝙘𝙘𝙤𝙪𝙣𝙩 𝙄𝙣𝙞𝙩𝙞𝙖𝙡𝙞𝙯𝙚𝙙.") await bot.start() print("𝘽𝙤𝙩 𝙄𝙣𝙞𝙩𝙞𝙖𝙡𝙞𝙯𝙚𝙙.") print( "𝙔𝙤𝙪 𝙈𝙞𝙜𝙝𝙩 𝙨𝙚𝙚 𝙉𝙤 𝙋𝙡𝙪𝙜𝙞𝙣𝙨 𝙇𝙤𝙖𝙙𝙚𝙙 𝙏𝙝𝙖𝙩𝙨 𝘼 𝘽𝙪𝙜 𝘽𝙮 𝙡𝙖𝙩𝙚𝙨𝙩 𝙫𝙚𝙧𝙨𝙞𝙤𝙣 𝙤𝙛 𝙋𝙮𝙧𝙤𝙜𝙧𝙖𝙢, 𝙋𝙡𝙪𝙜𝙞𝙣𝙨 𝙝𝙖𝙫𝙚 𝘽𝙚𝙚𝙣 𝙇𝙤𝙖𝙙𝙚𝙙 𝙎𝙪𝙘𝙘𝙚𝙨𝙨𝙛𝙪𝙡𝙡𝙮." ) await idle() loop = asyncio.get_event_loop() if __name__ == "__main__": loop.run_until_complete(init())
[ "noreply@github.com" ]
Saksham07529.noreply@github.com
394ff5431df2a4e88c516043b26846577d06ec92
91e389e4fc6a91874f4d4665bc0986e00f66074c
/downloadXkcd.py
b7a47af06c98d6e79909227078a06e4a10a26b7d
[]
no_license
Sajmon25/Automate-the-Boring-Stuff-with-Python
5cb36496d5a64988f50656b2f6d3172ab05c9c5b
8fb51eb0e9e558f939d5a7a4038257d7c02a9165
refs/heads/master
2020-04-20T12:18:54.236263
2019-05-09T21:31:33
2019-05-09T21:31:33
168,838,932
0
0
null
null
null
null
UTF-8
Python
false
false
1,355
py
#! python3 # downloadXkcd.py - Download every single XKCD comic. import requests import os import bs4 url = 'http://xkcd.com' os.makedirs('xkcd', exist_ok=True) while not url.endswitch('#'): # TODO: Download the page print('Download page %s...' % url) res = requests.get(url) res.raise_for_status() soup = bs4.BeautifulSoup(res.text) # TODO: Find the URL of the comic image. comicElem = soup.select('#comic img') if comicElem == []: print('Could not find comic image.') else: try: comicUrl = 'http:' + comicElem[0].get('src') # TODO: Download the image. print('Downloading image %s...' % {comicUrl}) res = requests.get(comicUrl) res.raise_for_status() except requests.exceptions.MissingSchema: # skip this comic prevLink = soup.select('a[rel="prev"]')[0] url = 'http://xkcd.com' + prevLink.get('href') continue # TODO: Save the image to ./xkcd. imageFile = open(os.path.join('xkcd', os.path.basename(comicUrl)), 'wb') for chunk in res.iter_content(100000): imageFile.write(chunk) imageFile.close() # TODO: Get the Prev button's url. prevLink = soup.select('a[rel="prev"]')[0] url = 'http://xkcd.com' + prevLink.get('href') print('Done')
[ "szymon.grzedi@gmail.com" ]
szymon.grzedi@gmail.com
8710f437a1fb5651739771f1ae3cb10e87729160
1385d5c2bff76949f139951d5422ee4c9df13129
/135_webRead.py
c7a5360cbb079c5e77989ac3f6859a9d3cf602b9
[]
no_license
luongs/pyPractice
b28a11b6b8909ac7b873184fd9be4c292c8559b5
775b8e608c7bfb4f43bdc91f1f6f20d82c9f43fc
refs/heads/master
2021-05-26T13:24:48.500697
2013-04-09T02:21:28
2013-04-09T02:21:28
null
0
0
null
null
null
null
UTF-8
Python
false
false
236
py
# Reads txt from the web and displays result in python # Author: Sebastien Luong import urllib2 #library handles url stuff f=urllib2.urlopen("http://cs.leanderisd.org/mitchellis.txt") print f.readline(), print f.readline(), f.close()
[ "sebastienluong@gmail.com" ]
sebastienluong@gmail.com
90f553a76ab38014ddd6d73cdf2088433003db3d
ade74e6b497703ef01fe267b665261e90e335a18
/studyvisapp/views.py
c5ca19a415afad73451833bc2c565ededea24865
[]
no_license
TMdiesel/study-vis-app
0492ff8bdda5f1c2596cca0ac3642b6c81691f9a
a0440a8ffd21d5848faf8eeabfed4c7be38be90f
refs/heads/main
2023-07-08T04:32:48.564623
2021-08-15T12:08:53
2021-08-15T12:08:53
395,558,322
0
0
null
null
null
null
UTF-8
Python
false
false
7,290
py
import datetime from datetime import timedelta from django.shortcuts import render, redirect from django.views import View from django.views.generic import ( ListView, CreateView, DeleteView, UpdateView, TemplateView, ) from django.urls import reverse_lazy from django.utils.timezone import make_aware import plotly.graph_objects as go import pandas as pd import jpholiday from django_pandas.io import read_frame from .models import TimeModel from .forms import HomeForm # Create your views here. class StudyList(ListView): template_name = "list.html" model = TimeModel paginate_by = 100 queryset = model.objects.order_by("-starttime") def get_queryset(self): if self.request.GET.get("yearmonth") is None: year = datetime.date.today().year month = datetime.date.today().month else: year = self.request.GET.get("yearmonth").split("-")[0] month = self.request.GET.get("yearmonth").split("-")[1] object_list = self.model.objects.filter( starttime__year=year, starttime__month=month ).order_by("-starttime") return object_list class StudyCreate(CreateView): template_name = "create.html" model = TimeModel success_url = reverse_lazy("list") fields = ("item", "memo", "starttime", "endtime") class StudyDelete(DeleteView): template_name = "delete.html" model = TimeModel success_url = reverse_lazy("list") class StudyUpdate(UpdateView): template_name = "update.html" model = TimeModel success_url = reverse_lazy("list") fields = ("item", "memo", "starttime", "endtime", "duration") class StudyHome(CreateView): template_name = "home.html" model = TimeModel success_url = reverse_lazy("list") fields = ("item",) def post(self, request): form = HomeForm(request.POST) now = datetime.datetime.now() object = form.save(commit=False) object.starttime = make_aware(now) object.isactive = True object.save() return redirect("list") class StudyEnd(UpdateView): template_name = "end.html" model = TimeModel success_url = reverse_lazy("list") fields = ("item", "memo", "starttime", "endtime", "duration", "isactive") def post(self, request, pk): article = TimeModel.objects.get(pk=pk) form = HomeForm(request.POST, instance=article) now = datetime.datetime.now() object = form.save(commit=False) object.endtime = make_aware(now) object.isactive = False object.save() return redirect("list") def StudyTimer(request): return render(request, "timer.html") class StudyVis(TemplateView): template_name = "vis.html" def get_context_data(self, **kwargs): context = super(StudyVis, self).get_context_data(**kwargs) context["plot1"], context["plot2"] = self._create_graph() return context def _create_graph(self): # specify the date range if self.request.GET.get("yearmonth") is None: year = datetime.date.today().year month = datetime.date.today().month else: year = self.request.GET.get("yearmonth").split("-")[0] month = self.request.GET.get("yearmonth").split("-")[1] # read&create data qs = TimeModel.objects.filter(starttime__year=year, starttime__month=month) df = read_frame(qs, verbose=True) df["duration"] = df["duration"].apply(lambda x: x.total_seconds() / 3600) df["date"] = df["starttime"].apply(lambda x: x.date()) date_df = df.groupby("date").sum()[["duration"]] date_df = self._complement_date(date_df) task_num_gdf = df.groupby("item").sum()[["duration"]] _, holiday_index = self._create_biz_hol_index( date_df.index.min(), date_df.index.max() ) # create graph fig1 = go.Figure( go.Scatter( x=date_df.index, y=date_df["duration"].round(decimals=1), mode="lines+markers", marker=dict( size=7, ), name="all", ), layout=go.Layout( title=f"勉強時間の推移({year}/{month})", width=800, height=400, xaxis=dict( range=[ date_df.index.min() - timedelta(days=1), date_df.index.max() + timedelta(days=1), ], dtick="D", tickformat="%d", ), ), ) fig1.add_trace( go.Scatter( x=date_df.index[holiday_index], y=date_df["duration"][holiday_index].round(decimals=1), mode="markers", marker=dict( size=7, ), name="休日", ), ) fig2 = go.Figure( go.Bar( x=task_num_gdf.index, y=task_num_gdf["duration"].round(decimals=1), ), layout=go.Layout( title=f"項目ごとの勉強時間({year}/{month})", width=800, height=400, ), ) return fig1.to_html(include_plotlyjs=False), fig2.to_html( include_plotlyjs=False ) def _complement_date(self, s: pd.Series) -> pd.DataFrame: """ 日付がindexのSeriesを入力して、 欠けている日付をmin_dateからmax_dateの範囲で埋める """ str_min_date = s.index.min().strftime("%Y-%m-%d") str_max_date = s.index.max().strftime("%Y-%m-%d") dates_df = pd.DataFrame( index=pd.date_range(str_min_date, str_max_date, freq="D") ) return ( pd.DataFrame(s) .merge(dates_df, how="outer", left_index=True, right_index=True) .fillna(0) ) def _create_biz_hol_index( self, start_date: datetime.date, end_date: datetime.date ) -> pd.date_range: """ 平日と休日のindexを返す """ year = start_date.year holiday = [] holiday_dict = jpholiday.year_holidays(year) for i in range(len(holiday_dict)): holiday.append(holiday_dict[i][0]) holiday = holiday + [ datetime.date(year, 1, 1), datetime.date(year, 1, 2), datetime.date(year, 1, 3), datetime.date(year, 12, 31), ] # 年末年始追加 holiday = sorted(list(set(holiday))) # for uniqueness holiday = pd.to_datetime(holiday) calendar_full = pd.date_range(start_date, end_date, freq="D") business_index = [] holiday_index = [] for idx, calendar in enumerate(calendar_full): if ( (not calendar in holiday) and (calendar.weekday() >= 0) and (calendar.weekday() <= 4) ): business_index.append(idx) else: holiday_index.append(idx) return business_index, holiday_index
[ "cerezzodora2262@icloud.com" ]
cerezzodora2262@icloud.com
fbcd98a2b32c59fa1729e7b1bce3bb8639dc8ab9
51412575c66152170bfcbf91ee09954d162a4643
/arg.py
ca5cd4c062f6dfff6dbedd4034ba3e1274de78d0
[]
no_license
tladydrl/ch1.2
918f064efab84c771f17ea17503ac897d16f83ba
1acbc0d9c8195cb9bee7b3d40feff940ef764431
refs/heads/master
2020-05-20T13:03:30.806733
2019-05-09T12:55:01
2019-05-09T12:55:01
185,587,401
0
0
null
null
null
null
UTF-8
Python
false
false
202
py
import sys #print(sys.argv) # 뭐가들어오는지 실행 # 파이썬파일? args = sys.argv[1:] # 첫번째부터,, 0번째는 자기 파일이름. print(args) # args는 내가 만든 변수이다.
[ "tladydrl12@naver.com" ]
tladydrl12@naver.com
298bdb7986c7ce282903098e71efc3e61ebde167
4b0c57dddf8bd98c021e0967b5d94563d15372e1
/run_MatrixElement/test/emptyPSets/emptyPSet_qqH125_cfg.py
1925d9eb5134f84222300788d85f42237860a66f
[]
no_license
aperloff/TAMUWW
fea6ed0066f3f2cef4d44c525ee843c6234460ba
c18e4b7822076bf74ee919509a6bd1f3cf780e11
refs/heads/master
2021-01-21T14:12:34.813887
2018-07-23T04:59:40
2018-07-23T04:59:40
10,922,954
0
1
null
null
null
null
UTF-8
Python
false
false
896
py
import FWCore.ParameterSet.Config as cms import os #! #! PROCESS #! process = cms.Process("MatrixElementProcess") #! #! SERVICES #! #process.load('Configuration.StandardSequences.Services_cff') process.load('FWCore.MessageLogger.MessageLogger_cfi') process.MessageLogger.cerr.FwkReport.reportEvery = 5000 process.load('CommonTools.UtilAlgos.TFileService_cfi') process.TFileService.fileName=cms.string('qqH125.root') #! #! INPUT #! inputFiles = cms.untracked.vstring( 'root://cmsxrootd.fnal.gov//store/user/aperloff/MatrixElement/Summer12ME8TeV/MEInput/qqH125.root' ) process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(10)) process.source = cms.Source("PoolSource", skipEvents = cms.untracked.uint32(0), fileNames = inputFiles ) process.options = cms.untracked.PSet( wantSummary = cms.untracked.bool(True) )
[ "aperloff@physics.tamu.edu" ]
aperloff@physics.tamu.edu
afbde151e2e1473b1d6aa573579299dc0eb3ce8d
18c03a43ce50ee0129f9f45ada1bdaa2ff4f5774
/epistasis/__init__.py
4f9536d756aca5c653b3e69bbff59937aa2ff678
[ "Unlicense" ]
permissive
harmsm/epistasis
acf7b5678b328527b2c0063f81d512fcbcd78ce1
f098700c15dbd93977d797a1a1708b4cfb6037b3
refs/heads/master
2022-04-30T13:09:49.106984
2022-03-19T05:29:37
2022-03-19T05:29:37
150,969,948
0
2
null
null
null
null
UTF-8
Python
false
false
1,105
py
"""\ A Python API for modeling statistical, high-order epistasis in genotype-phenotype maps. This library provides methods for: 1. Decomposing genotype-phenotype maps into high-order epistatic interactions 2. Finding nonlinear scales in the genotype-phenotype map 3. Calculating the contributions of different epistatic orders 4. Estimating the uncertainty of epistatic coefficients amd 5. Interpreting the evolutionary importance of high-order interactions. For more information about the epistasis models in this library, see our Genetics paper: `Sailer, Z. R., & Harms, M. J. (2017). "Detecting High-Order Epistasis in Nonlinear Genotype-Phenotype Maps." Genetics, 205(3), 1079-1088.`_ .. _`Sailer, Z. R., & Harms, M. J. (2017). "Detecting High-Order Epistasis in Nonlinear Genotype-Phenotype Maps." Genetics, 205(3), 1079-1088.`: http://www.genetics.org/content/205/3/1079 Currently, this package works only as an API and there is no command-line interface. Instead, we encourage you use this package inside `Jupyter notebooks`_ . """ from .__version__ import __version__
[ "zachsailer@gmail.com" ]
zachsailer@gmail.com
d8e42f2ce2432b336adb63018b3a51e93aacef6d
1c0542cef2ac6a5fb691602887236bf70f9bf71f
/speed_test_sar/sfsi_speed/mmcls/models/backbones/utils/gumbel_sigmoid.py
6610270f02c80a91e8e61cd013f8b7dff68c6ba3
[ "Apache-2.0" ]
permissive
yizenghan/sarNet
683f45620013f906cb8a550713e786787074a8ae
d47a6e243677811b259a753233fbbaf86d2c9c97
refs/heads/master
2023-07-16T02:09:11.913765
2021-08-30T02:04:02
2021-08-30T02:04:02
299,276,627
11
1
null
null
null
null
UTF-8
Python
false
false
1,723
py
import torch from torch import nn class GumbelSigmoid(nn.Module): def __init__(self, max_T, decay_alpha, decay_method='exp', start_iter=0): super(GumbelSigmoid, self).__init__() self.max_T = max_T self.cur_T = max_T self.step = 0 self.decay_alpha = decay_alpha self.decay_method = decay_method self.softmax = nn.Softmax(dim=1) self.p_value = 1e-8 # self.cur_T = (self.decay_alpha ** start_iter) * self.cur_T assert self.decay_method in ['exp', 'step', 'cosine'] def forward(self, x): # Shape <x> : [N, C, H, W] # Shape <r> : [N, C, H, W] r = 1 - x x = (x + self.p_value).log() r = (r + self.p_value).log() # Generate Noise x_N = torch.rand_like(x) r_N = torch.rand_like(r) x_N = -1 * (x_N + self.p_value).log() r_N = -1 * (r_N + self.p_value).log() x_N = -1 * (x_N + self.p_value).log() r_N = -1 * (r_N + self.p_value).log() # Get Final Distribution x = x + x_N x = x / (self.cur_T + self.p_value) r = r + r_N r = r / (self.cur_T + self.p_value) x = torch.cat((x, r), dim=1) x = self.softmax(x) x = x[:, [0], :, :] if self.training: self.cur_T = self.cur_T * self.decay_alpha # if self.cur_T < 0.5 or not self.training: # print('cur_T:{0}'.format(self.cur_T)) # self.step += 1 # if self.step % 50 == 0: # print('cur_T:{0}'.format(self.cur_T)) # return x if __name__ == '__main__': pass # ToDo: Test Code Here. # _test_T = 0.6 # Block = GumbelSigmoid(_test_T, 1.0)
[ "yizeng38@gmail.com" ]
yizeng38@gmail.com
d8e6d6bc745881e200737675ec2cd28b084d364d
68c003a526414fef3c23ad591982f1113ca8a72c
/api/urls.py
6287d8ae58d870352565ce7f626f9a3aa7037130
[]
no_license
pawanpaudel93/NepAmbulance
9d99ef3a3592b3a17091889d9db32aa952974400
b07dba43926c3f5a350b0acd75ac90b4842e3e32
refs/heads/master
2020-06-14T08:59:03.523102
2020-01-07T09:05:03
2020-01-07T09:05:03
194,965,063
0
0
null
null
null
null
UTF-8
Python
false
false
761
py
from django.contrib import admin from django.urls import path from .views import ListCreateAmbulance, RetrieveUpdateDeleteAmbulance, ListDistrict, ListProvince urlpatterns = [ path('ambulance/<int:province>/<slug:district>/<slug:city>/<int:ward>/', ListCreateAmbulance.as_view(), name="list-create-api"), path('ambulance/<int:province>/<slug:district>/<slug:city>/<int:ward>/<int:pk>/', RetrieveUpdateDeleteAmbulance.as_view()), # path('get/wards/<slug:city>/', ListWard.as_view(), name="get-wards"), # path('get/cities/<slug:district>/', ListCity.as_view(), name='get-cities'), path('get/districts/<slug:province>/', ListDistrict.as_view(), name='get-districts'), path('get/provinces/', ListProvince.as_view(), name='get-provinces'), ]
[ "pawanpaudel93@gmail.com" ]
pawanpaudel93@gmail.com
b684e9ee901a0010e93ec30efff577b9d58c68ef
d3ae9c6377109a4edea819c3d574e6ac529c98ba
/read_csv_pandas.py
25cdfa66408f6a0fb0f59558db628c2803ad4c55
[]
no_license
vmburbinamx/textParsing
9d8696f798217f7124501ac1756019a95166d79f
46d6f872de79879cd6129c2ace2684409f02851c
refs/heads/master
2023-01-02T20:50:45.745922
2020-10-26T02:40:29
2020-10-26T02:40:29
307,169,054
0
0
null
null
null
null
UTF-8
Python
false
false
265
py
import pandas as pd import os #Set path to 'data' folder' path_to_data_in_current_directory = os.getcwd() + '\\data\\' #Set full path name fullFileName = path_to_data_in_current_directory+'titanic_sub_just_numbers.csv' #read file data = pd.read_csv(fullFileName)
[ "victormanueluj@gmail.com" ]
victormanueluj@gmail.com
49617de351135a141ec72527b03b3dde22f0125d
422dd5d3c48a608b093cbfa92085e95a105a5752
/students/nDruP/lesson06/calculator/calculator.py
d29df83130fa0fb445a7b6fea8d286044b6917c7
[]
no_license
UWPCE-PythonCert-ClassRepos/SP_Online_Course2_2018
a2052fdecd187d7dd6dbe6f1387b4f7341623e93
b1fea0309b3495b3e1dc167d7029bc9e4b6f00f1
refs/heads/master
2021-06-07T09:06:21.100330
2019-11-08T23:42:42
2019-11-08T23:42:42
130,731,872
4
70
null
2021-06-01T22:29:19
2018-04-23T17:24:22
Python
UTF-8
Python
false
false
1,658
py
""" InsufficientOperands Exception and the Calculator class """ class InsufficientOperands(Exception): """ Create the InsufficientOperands exception for use in Calculator. """ pass class Calculator(object): """ Create the calculator object. """ def __init__(self, adder, subtracter, multiplier, divider): """ Initialize the calculator """ self.adder = adder self.subtracter = subtracter self.multiplier = multiplier self.divider = divider self.stack = [] def enter_number(self, num): """ Insert the input to the front of self.stack """ self.stack.insert(0, num) def _do_calc(self, operator): """ Return result of the operation of the first 2 elements of self.stack """ try: result = operator.calc(self.stack[1], self.stack[0]) except IndexError: raise InsufficientOperands self.stack = [result] return result def subtract(self): """ Return the difference of the first 2 elements of self.stack """ return self._do_calc(self.subtracter) def add(self): """ Return the sum of the first 2 elements of self.stack """ return self._do_calc(self.adder) def multiply(self): """ Return the product of the first 2 elements of self.stack """ return self._do_calc(self.multiplier) def divide(self): """ Return the quotient of the first 2 elements of self.stack """ return self._do_calc(self.divider)
[ "apark46.work@gmail.com" ]
apark46.work@gmail.com
a25e040005de4ab4ceb6b75d24ad6378699d31d5
acb8e84e3b9c987fcab341f799f41d5a5ec4d587
/langs/0/xt.py
2cabdcd454cba0a0dfcd2847512439922cc7dc0c
[]
no_license
G4te-Keep3r/HowdyHackers
46bfad63eafe5ac515da363e1c75fa6f4b9bca32
fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2
refs/heads/master
2020-08-01T12:08:10.782018
2016-11-13T20:45:50
2016-11-13T20:45:50
73,624,224
0
1
null
null
null
null
UTF-8
Python
false
false
485
py
import sys def printFunction(lineRemaining): if lineRemaining[0] == '"' and lineRemaining[-1] == '"': if len(lineRemaining) > 2: #data to print lineRemaining = lineRemaining[1:-1] print ' '.join(lineRemaining) else: print def main(fileName): with open(fileName) as f: for line in f: data = line.split() if data[0] == 'XT': printFunction(data[1:]) else: print 'ERROR' return if __name__ == '__main__': main(sys.argv[1])
[ "juliettaylorswift@gmail.com" ]
juliettaylorswift@gmail.com
b97c469f8a12dbf8a8265a2bb6073036cda1fc81
25fd32f0c46b5883a820fd62aeceff6e1f38af1a
/02_python_excercise/swampy/World.py
ed16b2e8ebaf4295cde888b8cb65aa2753356e28
[]
no_license
snowcool1/python_starter_basic
fb6572b1c3f378813a99ac23c3ef130143c9f340
73b7919f4c20f3de953d8ea50dd7a8a883b04657
refs/heads/master
2023-03-23T13:40:18.881335
2020-12-12T00:46:59
2020-12-12T00:46:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
7,936
py
#!/usr/bin/python """ This module is part of Swampy, a suite of programs available from allendowney.com/swampy. Copyright 2005 Allen B. Downey Distributed under the GNU General Public License at gnu.org/licenses/gpl.html. """ import math import random import time import threading import sys import tkinter from swampy.Gui import Gui class World(Gui): """Represents the environment where Animals live. A World usually includes a canvas, where animals are drawn, and sometimes a control panel. """ current_world = None def __init__(self, delay=0.5, *args, **kwds): Gui.__init__(self, *args, **kwds) self.delay = delay self.title('World') # keep track of the most recent world World.current_world = self # set to False when the user presses quit. self.exists = True # list of animals that live in this world. self.animals = [] # if the user closes the window, shut down cleanly self.protocol("WM_DELETE_WINDOW", self.quit) def wait_for_user(self): """Waits for user events and processes them.""" try: self.mainloop() except KeyboardInterrupt: print('KeyboardInterrupt') def quit(self): """Shuts down the World.""" # tell other threads that the world is gone self.exists = False # destroy closes the window self.destroy() # quit terminates mainloop (but since mainloop can get called # recursively, quitting once might not be enough!) Gui.quit(self) def sleep(self): """Updates the GUI and sleeps. Calling Tk.update from a function that might be invoked by an event handler is generally considered a bad idea. For a discussion, see http://wiki.tcl.tk/1255 However, in this case: 1) It is by far the simplest option, and I want to keep this code readable. 2) It is generally the last thing that happens in an event handler. So any changes that happen during the update won't cause problems when it returns. Sleeping is also a potential problem, since the GUI is unresponsive while sleeping. So it is probably a good idea to keep delay less than about 0.5 seconds. """ self.update() time.sleep(self.delay) def register(self, animal): """Adds a new animal to the world.""" self.animals.append(animal) def unregister(self, animal): """Removes an animal from the world.""" self.animals.remove(animal) def clear(self): """Undraws and removes all the animals. And deletes anything else on the canvas. """ for animal in self.animals: animal.undraw() self.animals = [] try: self.canvas.delete('all') except AttributeError: print('Warning: World.clear: World must have a canvas.') def step(self): """Invoke the step method on every animal.""" for animal in self.animals: animal.step() def run(self): """Invoke step intermittently until the user presses Quit or Stop.""" self.running = True while self.exists and self.running: self.step() self.update() def stop(self): """Stops running.""" self.running = False def map_animals(self, callable): """Apply the given callable to all animals. Args: callable: any callable object, including Gui.Callable """ return list(map(callable, self.animals)) def make_interpreter(self, gs=None): """Makes an interpreter for this world. Creates an attribute named inter. """ self.inter = Interpreter(self, gs) def run_text(self): """Executes the code from the TextEntry in the control panel. Precondition: self must have an Interpreter and a text entry. """ source = self.te_code.get(1.0, tkinter.END) self.inter.run_code(source, '<user-provided code>') def run_file(self): """Read the code from the filename in the entry and runs it. Precondition: self must have an Interpreter and a filename entry. """ filename = self.en_file.get() fp = open(filename) source = fp.read() self.inter.run_code(source, filename) class Interpreter(object): """Encapsulates the environment where user-provided code executes.""" def __init__(self, world, gs=None): self.world = world # if the caller didn't provide globals, use the current env if gs == None: self.globals = globals() else: self.globals = gs def run_code_thread(self, *args): """Runs the given code in a new thread.""" return MyThread(self.run_code, *args) def run_code(self, source, filename): """Runs the given code in the saved environment.""" code = compile(source, filename, 'exec') try: exec(code, self.globals) except KeyboardInterrupt: self.world.quit() except tkinter.TclError: pass class MyThread(threading.Thread): """Wrapper for threading.Thread. Improves the syntax for creating and starting threads. """ def __init__(self, target, *args): threading.Thread.__init__(self, target=target, args=args) self.start() class Animal(object): """Abstract class, defines the methods child classes need to provide. Attributes: world: reference to the World the animal lives in. x: location in Canvas coordinates y: location in Canvas coordinates """ def __init__(self, world=None): self.world = world or World.current_world if self.world: self.world.register(self) self.x = 0 self.y = 0 def set_delay(self, delay): """Sets delay for this animal's world. delay is made available as an animal attribute for backward compatibility; ideally it should be considered an attribute of the world, not an animal. Args: delay: float delay in seconds """ self.world.delay = delay delay = property(lambda self: self.world.delay, set_delay) def step(self): """Takes one step. Subclasses should override this method. """ pass def draw(self): """Draws the animal. Subclasses should override this method. """ pass def undraw(self): """Undraws the animal.""" if self.world.exists and hasattr(self, 'tag'): self.world.canvas.delete(self.tag) def die(self): """Removes the animal from the world and undraws it.""" self.world.unregister(self) self.undraw() def redraw(self): """Undraws and then redraws the animal.""" if self.world.exists: self.undraw() self.draw() def polar(self, x, y, r, theta): """Converts polar coordinates to cartesian. Args: x, y: location of the origin r: radius theta: angle in degrees Returns: tuple of x, y coordinates """ rad = theta * math.pi/180 s = math.sin(rad) c = math.cos(rad) return [ x + r * c, y + r * s ] def wait_for_user(): """Invokes wait_for_user on the most recent World.""" World.current_world.wait_for_user() if __name__ == '__main__': # make a generic world world = World() # create a canvas and put a text item on it ca = world.ca() ca.text([0,0], 'hello') # wait for the user wait_for_user()
[ "doan.nguyen@datalogic.com" ]
doan.nguyen@datalogic.com
8f8251d41d03992c97c4284cab8980b06dce2ee6
c36e8ac0ccfd34a7d4245068b3d4ed6199927f9b
/main.py
0359c0eb11ccf5be256bc113ac8c06421867203b
[]
no_license
avec140/project
332d9a87c09400ef52e90ca5b2f60c9643531591
d3e60766b81c8fcfff61dabdd5849ec10ce4fba0
refs/heads/master
2023-04-20T12:27:36.575993
2021-04-30T09:24:39
2021-04-30T09:24:39
363,083,929
0
0
null
null
null
null
UTF-8
Python
false
false
1,749
py
from tkinter import * from tkinter.colorchooser import askcolor DEFAULT_PEN_SIZE = 1.0 DEFAULT_COLOR = "black" mode = "pen" old_x = None old_y = None mycolor = DEFAULT_COLOR erase_on = False def use_pen(): global mode mode = "pen" def use_brush(): global mode mode = "brush" def choose_color(): global mycolor mycolor = askcolor(color=mycolor)[1] def use_eraser(): global mode mode = "erase" def paint(event): global var, erase_on, mode, old_x, old_y fill_color = 'white' if mode == "erase" else mycolor if old_x and old_y: canvas.create_line(old_x, old_y, event.x, event.y, caspstyle=ROUND, width=var.get(), fill=fill_color) old_x = event.x old_y = event.y def reset(event): global old_x, old_y old_x, old_y = None, None def clear_all(): global canvas canvas.delete('all') window = Tk() var = DoubleVar() penButton = Button(window, text='펜', command=use_pen) penButton.grid(row=0, column=0, sticky=W + E) brushButton = Button(window, text='브러쉬', command=use_brush) brushButton.grid(row=0, column=1, sticky=W + E) colorButton = Button(window, text='색상선택', command=choose_color) colorButton.grid(row=0, column=2, sticky=W + E) eraseButton = Button(window, text='지우개', command=use_eraser) eraseButton.grid(row=0, column=3, sticky=W + E) clearButton = Button(window, text='모두삭제', command=clear_all) clearButton.grid(row=0, column=4, sticky=W + E) scale = Scale(window, variable=var, orient=VERTICAL) scale.grid(row=1, column=5, sticky=N + S) canvas = Canvas(window, bg='white', width=600, height=400) canvas.grid(row=1, columnspan=5) canvas.bind('<B1-Motion>', paint) canvas.bind('<ButtonRelease-1>', reset) window.mainloop()
[ "avec140@naver.com" ]
avec140@naver.com
d9378d480308166701e5c54976dd75940bd624ed
4f01aff7aaaa979e80ee6bd02d01b24dfbfc0e9d
/scripts/easy_update.py
e883650bdfb287e1ae42d467098920d8a2ad51ff
[]
no_license
jakevc/easybuild-life-sciences
8d49a923a5448690d890205e4a85308bebd718c0
fef9de66a7a08ac3492eb38fe881fc74745aec61
refs/heads/master
2020-05-05T04:38:36.415766
2019-04-04T18:21:35
2019-04-04T18:21:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
32,541
py
#!/usr/bin/env python import re import os import sys import argparse import imp import requests # from pprint import pprint # from pprint import pformat """EasyUpdate performs package version updating for EasyBuild easyconfig files. Automates the the updating of version information for R, Python and bundels that extend R and Python. Pakage version information is updated for modules in exts_list. Use langugue specific APIs for resolving current version for each package. Release Notes 2.0.0 2019-02-26 New feature to resolve dependent packages for R and Python bundles. Read exts_list for R and Python listed in depenendencies. Refactor code into Two magor classes: FrameWork and UpdateExts. Rename subclasses for for R and Python: UpdateR UpdatePython. This will help with migration into the EB FrameWork. Fix bug with pkg_update counter 1.3.2 2018-12-19 follow "LinkingTo" for BioConductor packages reported by Maxime Boissonneault 1.3.1 2018-11-28 fix bugs with pypi easy_update was adding incorrect package names from requests_dist. Verify package names and update easyconfig with name corrections. Package names from pypi.requests_dist are not always correct. Pypi package names are changing from dashes to underscores ipython-genutils -> ipython_genutils jupyter-core -> jupyter_core jipython-genutils -> ipython_genutils pyncacl -> PyNaCl 1.3.0 July 2018 update to use pypi.org JSON API Project API: GET /pypi/<project_name>/json Release API: GET /pypi/<project_name>/<version>/json """ __version__ = '2.0.0' __maintainer__ = 'John Dey jfdey@fredhutch.org' __date__ = 'Feb 26, 2019' class FrameWork: """provide access to EasyBuild Config file variables name, version, toolchain, eb.exts_list, dependencies, modulename, biocver, methods: print_update() """ def __init__(self, args, filename, primary): self.debug = False self.code = None self.pyver = None self.search_pkg = None self.indent_n = 4 self.indent = ' ' * self.indent_n self.ptr_head = 0 self.modulename = None # update EasyConfig exts_list or check single package if args.easyconfig: eb = self.parse_eb(filename, primary=True) self.exts_list = eb.exts_list self.toolchain = eb.toolchain self.name = eb.name self.version = eb.version self.modulename = eb.name + '-' + eb.version self.modulename += '-' + eb.toolchain['name'] self.modulename += '-' + eb.toolchain['version'] self.interpolate = {'name': eb.name, 'namelower': eb.name.lower(), 'version': eb.version, 'pyver': None, 'rver': None} self.parse_dependencies(eb) # exts_defaultclass = 'PythonPackage' | 'RPackage' | 'PerlModule' try: self.versionsuffix = eb.versionsuffix self.modulename += eb.versionsuffix except (AttributeError, NameError): self.versionsuffix = None self.modulename = self.modulename % self.interpolate if self.debug: sys.stderr.write('debug - modulename: %s\n' % self.modulename) sys.stderr.write('debug - file: %s\n' % filename[:-3]) try: self.dependencies = eb.dependencies except (AttributeError, NameError): self.dependencies = None try: self.biocver = eb.biocver if self.debug: print('biocver: %s' % self.biocver) except (AttributeError, NameError): pass if primary: self.check_eb_package_name(args.easyconfig) self.out = open(args.easyconfig[:-3] + ".update", 'w') elif args.search_pkg: self.search_pkg = args.search_pkg if args.biocver: self.biocver = args.biocver if args.pyver: self.name = "Python" self.version = args.pyver elif args.rver: self.name = "R" self.version = args.rver else: print('Languange and version must be specified with ' + '[--pyver x.x | --rver x.x | --biocver x.x]') sea_pkg = {'name': args.search_pkg, 'version': 'x', 'type': 'add', 'spec': {}, 'meta': {} } self.search_ext = sea_pkg def parse_eb(self, file_name, primary): """ interpret EasyConfig file with 'exec'. Interperting fails if constants that are not defined within the EasyConfig file. Add undefined constants to <header>. """ header = 'SOURCE_TGZ = "%(name)s-%(version)s.tgz"\n' header += 'SOURCE_TAR_GZ = "%(name)s-%(version)s.tar.gz"\n' header += 'SOURCELOWER_TAR_GZ = "%(namelower)s-%(version)s.tar.gz"\n' header += 'PYPI_SOURCE = "https://pypi.python.org/packages/source/%(nameletter)s/%(name)s"\n' header += 'SOURCEFORGE_SOURCE = "https://download.sourceforge.net/%(namelower)s"\n' eb = imp.new_module("EasyConfig") try: with open(file_name, "r") as f: code = f.read() except IOError as err: print("opening %s: %s" % (file_name, err)) sys.exit(1) try: exec (header + code, eb.__dict__) except Exception as err: print("interperting EasyConfig error: %s" % err) sys.exit(1) if primary: # save original text of source code self.code = code return eb def parse_dependencies(self, eb): try: dependencies = eb.dependencies except NameError: return for dep in dependencies: if dep[0] == 'Python': self.interpolate['pyver'] = dep[1] if dep[0] == 'R': self.interpolate['rver'] = dep[1] def check_eb_package_name(self, easyconfig): """" check that easybuild filename matches package name easyconfig is original filename """ f_name = os.path.basename(easyconfig)[:-3] name_classification = f_name.split('-') if f_name != self.modulename: sys.stderr.write("Warning: file name does not match easybuild " + "module name\n"), if f_name != self.modulename or self.debug: sys.stderr.write(" file name: %s\n module name: %s\n" % ( f_name, self.modulename)) def write_chunk(self, indx): self.out.write(self.code[self.ptr_head:indx]) self.ptr_head = indx def rewrite_extension(self, pkg): name = pkg['name'] name_indx = self.code[self.ptr_head:].find(name) name_indx += self.ptr_head + len(name) + 1 indx = self.code[name_indx:].find("'") + name_indx + 1 self.write_chunk(indx) self.out.write("%s'" % pkg['version']) self.ptr_head = self.code[self.ptr_head:].find("'") + self.ptr_head + 1 indx = self.code[self.ptr_head:].find('),') + self.ptr_head + 3 self.write_chunk(indx) def output_module(self, lang, pkg): """write exts_list entry """ if lang == 'R': output = "%s('%s', '%s')," % (self.indent, pkg['name'], pkg['version']) elif lang == 'Python': pkg_fmt = self.indent + "('%s', '%s', {\n" item_fmt = self.indent + self.indent + "'%s': %s,\n" list_fmt = self.indent + self.indent + "'%s': ['%s'],\n" output = pkg_fmt % (pkg['name'], pkg['version']) for item in pkg.keys(): if item in ['name', 'version', 'action', 'type', 'orig_ver', 'processed', 'meta', 'spec']: continue output += item_fmt % (item, pkg[item]) for item in pkg['spec'].keys(): output += item_fmt % (item, pkg['spec'][item]) output += self.indent + "})," return output def print_update(self, lang, exts_list): """ this needs to be re-written in a Pythonesque manor if module name matches extension name then skip """ indx = self.code.find('exts_list') indx += self.code[indx:].find('[') indx += self.code[indx:].find('\n') + 1 self.write_chunk(indx) for extension in exts_list: name = extension['name'] if 'action' not in extension: sys.stderr.write('No action: %s\n' % name) extension['action'] = 'keep' if self.name.lower() == name.lower(): # speical case for bundles, if "name" is used in exts_list indx = self.code[self.ptr_head:].find('),') + 2 indx += self.ptr_head self.write_chunk(indx) elif extension['type'] == 'base': # base library with no version indx = self.code[self.ptr_head:].find(name) indx += self.ptr_head + len(name) + 2 self.write_chunk(indx) elif extension['action'] in ['keep', 'update']: self.rewrite_extension(extension) elif extension['action'] == 'duplicate': print('Duplicate: %s' % name) name_indx = self.code[self.ptr_head:].find(name) name_indx += self.ptr_head + len(name) indx = self.code[name_indx:].find('),') + name_indx + 3 self.ptr_head = indx continue elif extension['action'] in ['add', 'dep']: output = self.output_module(lang, extension) self.out.write("%s\n" % output) self.out.write(self.code[self.ptr_head:]) class UpdateExts: """ """ def __init__(self, args, eb, dep_eb): """ """ self.debug = False self.verbose = args.verbose self.meta = args.meta self.search_pkg = args.search_pkg self.ext_counter = 0 self.pkg_update = 0 self.pkg_new = 0 self.pkg_duplicate = 0 self.indent_n = 4 self.indent = ' ' * self.indent_n self.ext_list_len = 1 self.exts_dep = list() self.depend_exclude = list() if dep_eb: for exten in dep_eb.exts_list: if isinstance(exten, tuple): self.exts_dep.append(exten[0]) else: self.exts_dep.append(exten) if args.easyconfig: self.exts_orig = eb.exts_list self.interpolate = {'name': eb.name, 'namelower': eb.name.lower(), 'version': eb.version} if self.search_pkg: self.search_pkg = args.search_pkg if args.biocver: self.biocver = args.biocver if args.pyver: self.name = "Python" self.version = args.pyver elif args.rver: self.name = "R" self.version = args.rver else: print('Languange and version must be specified with ' + '[--pyver x.x | --rver x.x | --biocver x.x]') sea_pkg = {'name': args.search_pkg, 'version': 'x', 'type': 'add', 'spec': {}, 'meta': {} } self.search_ext = sea_pkg self.exts_processed = list() def is_processed(self, pkg): """ check if package has been previously processed if package exists AND is in the original exts_lists Mark as 'duplicate' updated July 2018 """ name = pkg['name'] found = False if name in self.exts_dep: found = True else: for p_pkg in self.exts_processed: if 'spec' in p_pkg and 'modulename' in p_pkg['spec']: modulename = p_pkg['spec']['modulename'] else: modulename = '' if (str(name) == str(p_pkg['name'])) or (name == modulename): found = True break if found: if pkg['type'] == 'orig': pkg['action'] = 'duplicate' self.pkg_duplicate += 1 self.processed(pkg) if self.verbose: self.print_status(pkg) return True return found def processed(self, pkg): """ save Processed packages save a normalize version of packae name to <exts_search> for Python updated July 2018 """ pkg['processed'] = True pkg2 = dict(pkg) self.exts_processed.append(pkg2) def print_status(self, pkg): """ print one line status for each package if --verbose updated July 2018 """ if pkg['action'] == 'update': version = '%s -> %s' % (pkg['orig_ver'], pkg['version']) else: version = pkg['version'] action = '(%s)' % pkg['action'] tmpl = "%20s : %-20s %12s [%2d, %d]" print(tmpl % (pkg['name'], version, action, self.ext_list_len, self.ext_counter)) def print_meta(self, info): """ print meta data from repository this is broken for R :param info: dict """ pass def check_package(self, pkg): """query package authority [Pypi, CRAN, Bio] to get the latest version information for a package. This is the heart of the program. input: pkg{} check that all dependancies are meet for each package. check_package can be called recursivly. pkg['type'] is used to track status. - 'orig' is also used to track recursion - 'dep' package that is added as result of dependancy - 'add' packages read from file pkg['action'] What action will be take to exts_list. - 'add'; new package - 'keep'; no update required - 'update'; version change - 'duplicate' package appears twice """ if self.debug: print('check_package: %s' % pkg['name']) if self.is_processed(pkg): return status = self.get_package_info(pkg) if status in ["error", 'not found']: if pkg['type'] == 'orig': pkg['action'] = 'keep' self.processed(pkg) return else: msg = " Warning: %s is dependency, but can't be found!" print(msg % pkg['name']) return if 'version' in pkg['meta']: version = pkg['meta']['version'] else: print('version not in %s' % pkg['name']) version = pkg['version'] if pkg['version'] == version: pkg['action'] = 'keep' else: pkg['orig_ver'] = pkg['version'] pkg['version'] = pkg['meta']['version'] if pkg['type'] == 'orig': pkg['action'] = 'update' self.pkg_update += 1 elif pkg['type'] in ['dep', 'add']: if self.debug: print('check_package; dep or add') pkg['action'] = 'add' self.pkg_new += 1 if 'requires' in pkg['meta'] and pkg['meta']['requires'] is not None: for depend in pkg['meta']['requires']: if depend not in self.depend_exclude: dep_pkg = {'name': depend, 'version': 'x', 'type': 'dep', 'spec': {}, 'meta': {}} self.check_package(dep_pkg) self.processed(pkg) self.ext_counter += 1 if self.search_pkg: output = self.output_module(pkg) print(output) if self.verbose: self.print_status(pkg) if self.meta: self.print_meta(pkg['meta']) def updateexts(self): """Loop through exts_list and check which packages need to be updated. this is an external method for the class """ if self.search_pkg: self.check_package(self.search_ext) else: self.ext_list_len = len(self.exts_orig) for ext in self.exts_orig: if isinstance(ext, tuple): name = ext[0] % self.interpolate version = ext[1] % self.interpolate pkg = {'name': name, 'version': version, 'type': 'orig'} if len(ext) > 2: pkg['spec'] = ext[2] pkg['meta'] = {} self.check_package(pkg) else: self.processed({'name': ext, 'type': 'base'}) if self.verbose: self.stats() def stats(self): sys.stderr.write("Updated Packages: %d\n" % self.pkg_update) sys.stderr.write("New Packages: %d\n" % self.pkg_new) sys.stderr.write("Dropped Packages: %d\n" % self.pkg_duplicate) def get_package_info(self, pkg): pass class UpdateR(UpdateExts): """extend UpdateExts class to update package names from CRAN and BioCondutor """ def __init__(self, args, eb, deps_eb): UpdateExts.__init__(self, args, eb, deps_eb) self.debug = False self.bioc_data = {} self.depend_exclude = ['R', 'base', 'compiler', 'datasets', 'graphics', 'grDevices', 'grid', 'methods', 'parallel', 'splines', 'stats', 'stats4', 'tcltk', 'tools', 'utils', ] try: self.biocver = args.biocver except NameError: self.biocver = None try: self.biocver = eb.biocver except NameError: self.biocver = None print('BioCondutor version: biocver not set') if self.biocver: self.read_bioconductor_pacakges() self.updateexts() if eb: eb.print_update('R', self.exts_processed) def read_bioconductor_pacakges(self): """ read the Bioconductor package list into bio_data dict """ base_url = 'https://bioconductor.org/packages/json/%s' % self.biocver bioc_urls = ['%s/bioc/packages.json' % base_url, '%s/data/annotation/packages.json' % base_url, '%s/data/experiment/packages.json' % base_url] for url in bioc_urls: resp = requests.get(url) if resp.status_code != 200: print('Error: %s %s' % (resp.status_code, url)) sys.exit(1) self.bioc_data.update(resp.json()) if self.debug: print('reading Bioconductor Package inf: %s' % url) pkgcount = len(self.bioc_data.keys()) print('size: %s' % pkgcount) def get_cran_info(self, pkg): """ MD5sum, Description, Package, releases[] """ cran_list = "http://crandb.r-pkg.org/" resp = requests.get(url=cran_list + pkg['name']) if resp.status_code != 200: return "not found" cran_info = resp.json() pkg['meta']['version'] = cran_info['Version'] if u'License' in cran_info and u'Part of R' in cran_info[u'License']: return 'base package' pkg['meta']['requires'] = [] if u"LinkingTo" in cran_info: pkg['meta']['requires'].extend(cran_info[u"LinkingTo"].keys()) if u"Depends" in cran_info: pkg['meta']['requires'].extend(cran_info[u"Depends"].keys()) if u"Imports" in cran_info: pkg['meta']['requires'].extend(cran_info[u"Imports"].keys()) return 'ok' def get_bioc_info(self, pkg): """Extract <Depends> and <Imports> from BioCondutor json metadata Example: bioc_data['pkg']['Depends'] [u'R (>= 2.10)', u'BiocGenerics (>= 0.3.2)', u'utils'] interesting fields from BioCoductor: bioc_data['pkg']['Depends', 'Imports', 'Biobase', 'graphics', 'URL'] """ status = 'ok' if pkg['name'] in self.bioc_data: pkg['meta']['version'] = self.bioc_data[pkg['name']]['Version'] if 'LinkingTo' in self.bioc_data[pkg['name']]: pkg['meta']['requires'].extend( [re.split('[ (><=,]', s)[0] for s in self.bioc_data[pkg['name']]['LinkingTo']]) if 'Depends' in self.bioc_data[pkg['name']]: pkg['meta']['requires'].extend( [re.split('[ (><=,]', s)[0] for s in self.bioc_data[pkg['name']]['Depends']]) if 'Imports' in self.bioc_data[pkg['name']]: pkg['meta']['requires'].extend( [re.split('[ (><=,]', s)[0] for s in self.bioc_data[pkg['name']]['Imports']]) else: status = "not found" return status def print_depends(self, pkg): """ used for debugging """ for p in pkg['meta']['requires']: if p not in self.depend_exclude: print("%20s : requires %s" % (pkg['name'], p)) def get_package_info(self, pkg): """R version, check CRAN and BioConductor for version information """ if self.debug: print('get_package_info: %s' % pkg['name']) pkg['meta']['requires'] = [] status = self.get_bioc_info(pkg) if status == 'not found': status = self.get_cran_info(pkg) if self.debug: self.print_depends(pkg) return status def output_module(self, pkg): output = "%s('%s', '%s')," % (self.indent, pkg['name'], pkg['version']) return output class UpdatePython(UpdateExts): """extend ExtsList class to update package names from PyPI Python Issues There are many small inconsistancies with PyPi which make it difficult to fully automate building of EasyConfig files. - dependancy checking - check for extras=='all' - pypi projects names do not always match module names and or file names project: liac-arff, module: arff, file name: liac_arff.zip """ def __init__(self, args, eb, deps_eb): UpdateExts.__init__(self, args, eb, deps_eb) self.pkg_dict = None if eb: (nums) = eb.version.split('.') else: (nums) = args.pyver.split('.') self.python_version = "%s.%s" % (nums[0], nums[1]) self.pymajornum = nums[0] self.pyminor = nums[1] # Python >3.3 has additional built in modules if nums[0] == 3 and nums[1] > 3: self.depend_exclude.extends(['argparse', 'asyncio']) if self.debug and self.search_pkg: print('Python Search PyPi: %s' % self.search_pkg) self.updateexts() if eb: eb.print_update('Python', self.exts_processed) def get_pypi_pkg_data(self, pkg, version=None): """ return meta data from PyPi.org """ if version: req = 'https://pypi.org/pypi/%s/%s/json' % (pkg['name'], version) else: req = 'https://pypi.org/pypi/%s/json' % pkg['name'] resp = requests.get(req) if resp.status_code != 200: msg = "API error: %s GET release %s\n" sys.stderr.write(msg % (resp.status_code, pkg['name'])) return 'not found' project = resp.json() # verify that package name is correct if pkg['name'] != project['info']['name']: sys.stderr.write('package name mismatch: %s -> %s\n' % ( pkg['name'], project['info']['name'])) pkg['name'] = project['info']['name'] return project def check_package_name(self, pkg_name): """ verify that package name from EasyConfig matches package name from PyPi """ pkg = {} pkg['name'] = pkg_name response = self.get_pypi_pkg_data(pkg) if response == 'not found': return response else: return response['info']['name'] def parse_pypi_requires(self, requires): """requires_dist uses distutils for version format and is defined in PEP 404. The project name must be as specified at pypi.org. requires_dist: <name> <version>[; Environment Markers] Only install the latest version so ignore all version information input: 'numpy (>=1.7.1)' output: 'numpy' Test that <python_version> and <sys_platform> conform. If <extra> is present and required check that extra is contained in "exts_list". wincertstore (==0.2); sys_platform=='win32' and extra == 'ssl' futures (>=3.0); (python_version=='2.7' or python_version=='2.6') requests-kerberos (>=0.6); extra == 'kerberos' trollius; python_version == "2.7" and extra == 'asyncio' asyncio; python_version == "3.3" and extra == 'asyncio' """ if requires is None: return [] dists = [] sys_platform = 'Linux' python_version = self.python_version platform_python_implementation = 'CPython' extra_re = re.compile("and\sextra\s==\s'([A-Za-z0-9_\-\.]+)'") for req in requires: pkg_name = req.split()[0] # test for Environment Marker (stuff after ;) fields = req.split(';') if len(fields) > 1: env = re.sub(extra_re, fields[1], '') if len(env) > 1: try: if eval(env): name = self.check_pkg_name(pkg_name) if name != 'not found': dists.append(name) except NameError as e: msg = 'Error: Unable to evaluate: <%s> ' msg += 'for requirement: %s\n' sys.stderr.write(msg % (env, pkg_name)) else: # only add pkg_name if found in pypi name = self.check_package_name(pkg_name) if name != 'not found': dists.append(name) return dists def print_meta(self, meta): """ Display meta from pypi.org """ tags = ['filename', 'packagetype', 'url', 'python_version', 'requires_dist', 'summary', 'requires_python'] for tag in tags: if tag in meta: print("%s'%s': '%s'" % (self.indent, tag, meta[tag])) def get_package_info(self, pkg): """Python version Get package meta data via PyPi.org pkg is a dict; {'name', 'version', 'spec'} return metadata as dict pkg['meta']['version'] if source package is not found look for whl if pyver == ['3.5, '3.6', '3.7']: arch = 'linux' ['manylinux', 'anylinux', 'linux'] """ status = self.get_pypi_info(pkg) return status def get_pypi_release(self, pkg, version): """ if source dist is not available from pypi search the release for for a whl file. """ release = self.get_pypi_pkg_data(pkg, version) if release == 'not found': return 'not found' cplist = ['cp35', 'cp36', 'cp37'] for rel in release['releases'][version]: if any(cver in rel['python_version'] for cver in cplist): if 'manylinux' in rel['filename']: pkg['meta'].update(rel) return 'ok' return 'not found' def get_pypi_info(self, pkg): """get version information from pypi. If <pkg_name> is not processed seach pypi. pkg_name is now case sensitive and must match info['digests']['sha256'], 'summary', 'url', 'filename', 'home_page' """ project = self.get_pypi_pkg_data(pkg) if project == 'not found': return 'not found' status = 'not found' pkg['meta'] = {} pkg['meta'].update(project['info']) new_version = pkg['meta']['version'] requires = project['info']['requires_dist'] pkg['meta']['requires'] = self.parse_pypi_requires(requires) for ver in project['releases'][new_version]: if 'packagetype' in ver and ver['packagetype'] == 'sdist': pkg['meta']['url'] = ver['url'] pkg['meta']['filename'] = ver['filename'] status = 'ok' break # one last try to find package release data if status != 'ok': status = self.get_pypi_release(pkg, new_version) # only set this if not set if 'source_urls' not in pkg['spec'] and new_version != pkg['version']: url = "['https://pypi.io/packages/source/%s/%s']" pkg['spec']['source_urls'] = url % (pkg['name'][0], pkg['name']) return status def help(): print("usage: easy_update EasyConfig.eb [flags]") print("easy_update Updates ext_list information of EasyBuild"), print(" EasyConfig files") print("easy_update works with R, Python and R-bioconductor"), print(" EasyConfig files") print(" --verbose diplay status for each package") print(" --add [filename] filename contains list of package"), print(" names to add") sys.exit() def main(): """ main """ parser = argparse.ArgumentParser(description='Update EasyConfig extslist') parser.add_argument('--version', action='version', version='%(prog)s ' + __version__) parser.add_argument( '-v', '--verbose', dest='verbose', required=False, action='store_true', help='Verbose; print lots of extra stuff, (default: false)') parser.add_argument( '--rver', dest='rver', required=False, action='store', help='Set R version (major.minor) example 3.4') bioc_help = 'Set BioConductor version (major.minor) example 3.6. ' bioc_help += 'Use with --rver' parser.add_argument('--biocver', dest='biocver', required=False, action='store', help=bioc_help) parser.add_argument( '--pyver', dest='pyver', required=False, action='store', help='Set Python version [2.7 or 3.6]') search_help = 'Search for single package. requires --rver or --pyver' parser.add_argument( '--search', dest='search_pkg', required=False, action='store', help=search_help) parser.add_argument( '--meta', dest='meta', required=False, action='store_true', help='output all meta data keys from Pypi, (default: false)') parser.add_argument('easyconfig', nargs='?') args = parser.parse_args() lang = None dep_eb = None if args.easyconfig: eb_name = os.path.basename(args.easyconfig) eb = FrameWork(args, eb_name, True) elif args.search_pkg: eb_name = '' eb = None else: print('If no EasyConfig is given, a module name must be ' + 'specified with --search pkg_name') sys.exit() if args.rver or eb_name[:3] == 'R-3': lang = 'R' elif args.pyver or eb_name[:7] == 'Python-': lang = 'Python' elif lang is None: if eb.dependencies: for x in eb.dependencies: if x[0] == 'R' or x[0] == 'Python': if lang is None: lang = x[0] dep_filename = '%s-%s-%s-%s.eb' % (x[0], x[1], eb.toolchain['name'], eb.toolchain['version']) dep_eb = FrameWork(args, dep_filename, False) else: print('Could not determine language [R, Python]') sys.exit(1) if lang == 'R': module = UpdateR(args, eb, dep_eb) elif lang == 'Python': module = UpdatePython(args, eb, dep_eb) if __name__ == '__main__': main()
[ "john@fuzzdog.com" ]
john@fuzzdog.com
f7af79f56a51603282e3089490b050ca604d2712
1410d7722dd22c1ecd2aee0f4c59cf482846f445
/models/rbm.py
05e4c797eace5d0b7ec5a60c952e55268a115ae6
[]
no_license
funzi-son/DRBM
95a1cb3d504746836d5d8dc2d9fb7b7eeae3fc8c
1a7c40d46b86ed4d4a8610f3979e94e5e297429b
refs/heads/master
2021-07-19T16:17:56.423241
2017-10-24T03:23:58
2017-10-24T03:23:58
null
0
0
null
null
null
null
UTF-8
Python
false
false
8,384
py
"""The restricted Boltzmann machine""" # Author: Srikanth Cherla # City University London (2014) # Contact: abfb145@city.ac.uk from models import np from models import theano from models import T theano.config.exception_verbosity = 'high' def build_model(n_input, n_class, hypers, init_params): """Function to build the Theano graph for the RBM. Input ----- n_input : integer Dimensionality of input features to the model. n_class : integer Number of class-labels. hypers : dict Model hyperparameters. init_params : list A list of initial values for the model parameters. Output ------ x : T.matrix Input matrix (with number of data points as first dimension). y : T.ivector Class labels corresponding to x. p_y_given_x : T.nnet.softmax Posterior probability of y given x. cost: ??? Cost function of the DRBM which is to be optimized. params: list(T.shared) A list containing the parameters of the model. grads: list(T.grad) A list containing the gradients of the parameters of the model. """ n_visible = n_input + n_class n_hidden = int(hypers['n_hidden']) L1_decay = float(hypers['weight_decay']) L2_decay = float(hypers['weight_decay']) n_gibbs = int(hypers['n_gibbs']) activation = str(hypers['activation']) # Random number generators T_RNG = T.shared_randomstreams.RandomStreams(hypers['seed']) N_RNG = np.random.RandomState(hypers['seed']) # 1. Initialize visible layer, inputs and targets x = T.matrix(name='x', dtype=theano.config.floatX) y = T.ivector(name='y') # XXX: What should be the type of this? Y = T.eye(n_class)[y] v = T.concatenate((x, Y), axis=1) # Initialize model parameters if init_params is None: W_init = np.asarray( N_RNG.normal(size=(n_visible, n_hidden), scale=0.01), dtype=theano.config.floatX) bv_init = np.zeros((n_visible,), dtype=theano.config.floatX) bh_init = np.zeros((n_hidden,), dtype=theano.config.floatX) else: W_init = init_params[0] bv_init = init_params[1] bh_init = init_params[2] W = theano.shared(W_init, name='W') # RBM weight matrix bv = theano.shared(bv_init, name='bv') # Visible biases bh = theano.shared(bh_init, name='bh') # Hidden biases params = [W, bv, bh] # Build Gibbs chain and graph to compute the cost function v_sample, cost, updates_train = build_chain(v, n_input, n_class, W, bv, bh, k=n_gibbs, activation=activation, T_RNG=T_RNG) # Add weight decay (regularization) to cost. L1 = abs(W).sum() L2_sqr = (W**2).sum() cost += (L1_decay*L1 + L2_decay*L2_sqr) grads = T.grad(cost, params, consider_constant=v_sample) # Expressions to compute conditional distribution. p_y_given_x = drbm_fprop(x, params, n_class, activation) return (x, y, p_y_given_x, cost, params, grads) def build_chain(v, n_input, n_class, W, bv, bh, k=1, activation='sigmoid', T_RNG=None): """Construct a k-step Gibbs chain starting at v for an RBM. Input ----- v : T.matrix or T.vector If a matrix, multiple chains will be run in parallel (batch). n_input : int Dimensionality of input feature. n_class : int Number of output classes. W : T.matrix Weight matrix of the RBM. bv : T.vector Visible bias vector of the RBM. bh : T.vector Hidden bias vector of the RBM. k : int Length of the Gibbs chain (number of sampling steps). activation : str Type of activation function. T_RNG : T.streams.RandomStreams Theano random number generator. Output ------ v_sample : Theano vector or matrix with the same shape as `v` Corresponds to the generated sample(s). cost : Theano scalar Expression whose gradient with respect to W, bv, bh is the CD-k approximation to the log-likelihood of `v` (training example) under the RBM. The cost is averaged in the batch case. updates: dictionary of Theano variable -> Theano variable The `updates` object returned by scan.""" if T_RNG is None: T_RNG = T.shared_randomstreams.RandomStreams(860331) # One iteration of the Gibbs sampler. def gibbs_step(v): """One step of Gibbs sampling in the RBM.""" # Compute hidden layer activations given visible layer if activation == 'sigmoid': mean_h = T.nnet.sigmoid(T.dot(v, W) + bh) h = T_RNG.binomial(size=mean_h.shape, n=1, p=mean_h, dtype=theano.config.floatX) elif activation == 'tanh': raise NotImplementedError elif activation == 'relu': # XXX: Not working mean_h = T.maximum(0, T.dot(v, W) + bh) h = T.maximum(0, mean_h + T_RNG.normal(size=mean_h.shape, avg=0.0, std=T.nnet.sigmoid(mean_h))) else: raise NotImplementedError # Compute visible layer activations given hidden layer acts_v = T.dot(h, W.T) + bv # # Multinomial visible units sampling (equally sized) # # TODO: Make this an if-else section based on an input hyperparameter # acts_in = acts_v[:, :n_input] # probs_in = T.nnet.softmax(acts_in) # v_in = T_RNG.multinomial(n=1, pvals=probs_in, # dtype=theano.config.floatX) # acts_out = acts_v[:, -n_class:] # probs_out = T.nnet.softmax(acts_out) # v_out = T_RNG.multinomial(n=1, pvals=probs_out, # dtype=theano.config.floatX) # mean_v = T.concatenate((probs_in, probs_out), axis=1) # v = T.concatenate((v_in, v_out), axis=1) # Binomial visible units sampling mean_v = T.nnet.sigmoid(acts_v) v = T_RNG.binomial(size=mean_v.shape, n=1, p=mean_v, dtype=theano.config.floatX) return mean_v, v # k-step Gibbs sampling loop chain, updates = theano.scan(lambda v: gibbs_step(v)[1], outputs_info=[v], non_sequences=[], n_steps=k) v_sample = chain[-1] def free_energy(v): """Free energy of RBM visible layer.""" return -(v * bv).sum() - T.log(1 + T.exp(T.dot(v, W) + bh)).sum() cost = (free_energy(v) - free_energy(v_sample)) / v.shape[0] return v_sample, cost, updates def drbm_fprop(x, params, n_class, activation): """Posterior probability of classes given inputs and model parameters. Input ----- x: T.matrix (of type theano.config.floatX) Input data matrix. params: list A list containing the four parameters of the DRBM (see class definition). n_class: integer Number of classes. Output ------ p_y_given_x: T.nnet.softmax Posterior class probabilities of the targets given the inputs. """ # Initialize DRBM parameters and binary class-labels. U = params[0][-n_class:, :] # or, U = W[n_input:, :] W = params[0][:-n_class, :] # or, V = W[:n_input, :] d = params[1][-n_class:] # or, d = bv[:n_input] c = params[2] Y_class = theano.shared(np.eye(n_class, dtype=theano.config.floatX), name='Y_class') # Compute hidden state activations and energies. s_hid = T.dot(x, W) + c energies, _ = theano.scan(lambda y_class, U, s_hid: s_hid + T.dot(y_class, U), sequences=[Y_class], non_sequences=[U, s_hid]) # Compute log-posteriors and then posteriors. if activation == 'sigmoid': log_p, _ = theano.scan( lambda d_i, e_i: d_i + T.sum(T.log(1+T.exp(e_i)), axis=1), sequences=[d, energies], non_sequences=[]) elif activation == 'tanh': raise NotImplementedError elif activation == 'relu': raise NotImplementedError else: raise NotImplementedError p_y_given_x = T.nnet.softmax(log_p.T) # XXX: Can the transpose be avoided? return p_y_given_x
[ "Son.Tran@csiro.au" ]
Son.Tran@csiro.au
6810448a2a2f895bb4d8c9a6ddda997f4967d5d2
99b8b8f06f2248a8ef940c0b5ba90d05f0362ba0
/src/python/strelka/scanners/scan_pe.py
626e9df031e01b48ea3c146b00d52c99f1d0d331
[ "Apache-2.0", "LicenseRef-scancode-unknown-license-reference" ]
permissive
thezedwards/strelka
b5d794198791f04a9473ae4b7b2f8a75b7ccac9b
9791ec50354459b4c80df6e95887e0d6bd58729a
refs/heads/master
2020-05-24T12:34:15.926932
2019-05-16T20:51:40
2019-05-16T20:51:40
null
0
0
null
null
null
null
UTF-8
Python
false
false
9,150
py
import binascii from datetime import datetime import hashlib import struct import pefile from strelka import strelka IMAGE_MAGIC_LOOKUP = { 0x10b: '32_BIT', 0x20b: '64_BIT', 0x107: 'ROM_IMAGE', } class ScanPe(strelka.Scanner): """Collects metadata from PE files.""" def scan(self, data, file, options, expire_at): self.event['total'] = {'sections': 0} try: pe = pefile.PE(data=data) pe_dict = pe.dump_dict() self.event['total']['sections'] = pe.FILE_HEADER.NumberOfSections self.event['warnings'] = pe.get_warnings() self.event['timestamp'] = datetime.utcfromtimestamp(pe.FILE_HEADER.TimeDateStamp).isoformat() machine = pe.FILE_HEADER.Machine self.event['machine'] = { 'id': machine, 'type': pefile.MACHINE_TYPE.get(machine), } # Reference: http://msdn.microsoft.com/en-us/library/windows/desktop/ms680339%28v=vs.85%29.aspx self.event['image_magic'] = IMAGE_MAGIC_LOOKUP.get(pe.OPTIONAL_HEADER.Magic, 'Unknown') subsystem = pe.OPTIONAL_HEADER.Subsystem self.event['subsystem'] = pefile.SUBSYSTEM_TYPE.get(subsystem) self.event['stack_reserve_size'] = pe.OPTIONAL_HEADER.SizeOfStackReserve self.event['stack_commit_size'] = pe.OPTIONAL_HEADER.SizeOfStackCommit self.event['heap_reserve_size'] = pe.OPTIONAL_HEADER.SizeOfHeapReserve self.event['heap_commit_size'] = pe.OPTIONAL_HEADER.SizeOfHeapCommit self.event['image_base'] = pe.OPTIONAL_HEADER.ImageBase self.event['entry_point'] = pe.OPTIONAL_HEADER.AddressOfEntryPoint self.event['image_characteristics'] = pe_dict.get('Flags') self.event['dll_characteristics'] = pe_dict.get('DllCharacteristics') try: self.event['imphash'] = pe.get_imphash() except AttributeError: self.flags.append('no_imphash') self.event.setdefault('export_functions', []) export_symbols = pe_dict.get('Exported symbols', []) for symbols in export_symbols: name = symbols.get('Name') if name is not None and isinstance(name, bytes) and name not in self.event['export_functions']: self.event['export_functions'].append(name) import_cache = {} self.event.setdefault('imports', []) import_symbols = pe_dict.get('Imported symbols', []) for symbol in import_symbols: for import_ in symbol: dll = import_.get('DLL') if dll is not None: if dll not in self.event['imports']: self.event['imports'].append(dll) import_cache.setdefault(dll, []) ordinal = import_.get('Ordinal') if ordinal is not None: ordinal = pefile.ordlookup.ordLookup(dll.lower(), ordinal, make_name=True) import_cache[dll].append(ordinal) name = import_.get('Name') if name is not None: import_cache[dll].append(name) self.event.setdefault('import_functions', []) for (import_, functions) in import_cache.items(): import_entry = {'import': import_, 'functions': functions} if import_entry not in self.event['import_functions']: self.event['import_functions'].append(import_entry) self.event.setdefault('resources', []) try: for resource in pe.DIRECTORY_ENTRY_RESOURCE.entries: res_type = pefile.RESOURCE_TYPE.get(resource.id, 'Unknown') for entry in resource.directory.entries: for e_entry in entry.directory.entries: sublang = pefile.get_sublang_name_for_lang( e_entry.data.lang, e_entry.data.sublang, ) offset = e_entry.data.struct.OffsetToData size = e_entry.data.struct.Size r_data = pe.get_data(offset, size) language = pefile.LANG.get(e_entry.data.lang, 'Unknown') data = { 'type': res_type, 'id': e_entry.id, 'name': e_entry.data.struct.name, 'offset': offset, 'size': size, 'sha256': hashlib.sha256(r_data).hexdigest(), 'sha1': hashlib.sha1(r_data).hexdigest(), 'md5': hashlib.md5(r_data).hexdigest(), 'language': language, 'sub_language': sublang, } if data not in self.event['resources']: self.event['resources'].append(data) except AttributeError: self.flags.append('no_resources') if hasattr(pe, 'DIRECTORY_ENTRY_DEBUG'): debug = dict() for e in pe.DIRECTORY_ENTRY_DEBUG: rawData = pe.get_data(e.struct.AddressOfRawData, e.struct.SizeOfData) if rawData.find(b'RSDS') != -1 and len(rawData) > 24: pdb = rawData[rawData.find(b'RSDS'):] debug['guid'] = b'%s-%s-%s-%s' % ( binascii.hexlify(pdb[4:8]), binascii.hexlify(pdb[8:10]), binascii.hexlify(pdb[10:12]), binascii.hexlify(pdb[12:20]), ) debug['age'] = struct.unpack('<L', pdb[20:24])[0] debug['pdb'] = pdb[24:].rstrip(b'\x00') self.event['rsds'] = debug elif rawData.find(b'NB10') != -1 and len(rawData) > 16: pdb = rawData[rawData.find(b'NB10') + 8:] debug['created'] = struct.unpack('<L', pdb[0:4])[0] debug['age'] = struct.unpack('<L', pdb[4:8])[0] debug['pdb'] = pdb[8:].rstrip(b'\x00') self.event['nb10'] = debug self.event.setdefault('sections', []) sections = pe_dict.get('PE Sections', []) for section in sections: section_entry = { 'name': section.get('Name', {}).get('Value', '').replace('\\x00', ''), 'flags': section.get('Flags', []), 'structure': section.get('Structure', ''), } if section_entry not in self.event['sections']: self.event['sections'].append(section_entry) security = pe.OPTIONAL_HEADER.DATA_DIRECTORY[pefile.DIRECTORY_ENTRY['IMAGE_DIRECTORY_ENTRY_SECURITY']] digital_signature_virtual_address = security.VirtualAddress if security.Size > 0: extract_data = pe.write()[digital_signature_virtual_address + 8:] if len(extract_data) > 0: self.flags.append('signed') extract_file = strelka.File( name='digital_signature', source=self.name, ) for c in strelka.chunk_string(extract_data): self.upload_to_cache( extract_file.pointer, c, expire_at, ) self.files.append(extract_file) else: self.flags.append('empty_signature') if hasattr(pe, 'FileInfo'): self.event.setdefault('version_info', []) for structure in pe.FileInfo: for fileinfo in structure: if fileinfo.Key.decode() == 'StringFileInfo': for block in fileinfo.StringTable: for name, value in block.entries.items(): fixedinfo = { 'name': name.decode(), 'value': value.decode(), } if fixedinfo not in self.event['version_info']: self.event['version_info'].append(fixedinfo) else: self.flags.append('no_version_info') except IndexError: self.flags.append('index_error') except pefile.PEFormatError: self.flags.append('pe_format_error')
[ "liburdi.joshua@gmail.com" ]
liburdi.joshua@gmail.com
b2dc7e238687297569b877965faf69acc67f19ed
37413580d8f2402068fc9658fbe3df7b897fb728
/admm1.py
d442db9484c2325200152a67832e3ab97555e760
[]
no_license
johnston-jeremy/mmv_cvx
b104b3afafe7a3a3ae2e4ebcf814c2b78c751ac1
8938a42e69f98da6b6b208114422770eddeaca47
refs/heads/main
2023-08-21T19:10:33.897425
2021-10-20T04:12:35
2021-10-20T04:12:35
411,396,714
0
1
null
null
null
null
UTF-8
Python
false
false
778
py
import numpy as np import numpy.linalg as la from proximal_operators import prox_l2_norm_batch def admm_problem1(Y, p): N, L, M, mu, beta, taux, gamma = p.N, p.L, p.M, p.mu, p.beta, p.taux, p.gamma X = np.zeros((N,M),dtype=complex) E = np.zeros_like(Y) T = np.zeros_like(Y) A = p.A AtA = np.matmul(A.T.conj(),A) AtY = np.matmul(np.conj(A.T),Y) for t in range(p.maxiter): Xprev = X E = mu*beta/(1+mu*beta) * (-np.matmul(A,X) + Y - 1/beta * T) G = 2*(np.matmul(AtA, X) + np.matmul(np.conj(A.T), E + (1/beta)*T) - AtY) D = X - taux/2 * G X = prox_l2_norm_batch(taux/beta, D) T = T + gamma*beta*(np.matmul(A, X) + E - Y) if t > 10: if np.linalg.norm(X-Xprev) <= p.tol*np.linalg.norm(Xprev): break return X
[ "jjohnston1994@gmail.com" ]
jjohnston1994@gmail.com
d6f9c06998c30989b694c28b3da3ce04272f062f
61939b14aefb49057ac6aa93ea2b33c2a967988b
/actvision/config/urls.py
70223914199c62ea7aa80dce37b2bb1ee64987bd
[]
no_license
ninanonansilo/actvision826
7e237608703e58e7bb3ea21e34044c790f07bc12
fdd7852ce2a92199919f58836a81675122842e7a
refs/heads/master
2023-07-05T01:06:57.149773
2021-08-26T11:51:14
2021-08-26T11:51:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,067
py
"""Actvision URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home0 . 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include import loginapp.views import home.views import movie.views import settings.views import inform.views import register.views import imgn.views urlpatterns = [ path('admin/', admin.site.urls), path('', loginapp.views.login, name='login.html'), path('login/', loginapp.views.login_success, name='login_success.html'), path('home', home.views.home, name='home.html'), path('home/movie', movie.views.movie, name='movie.html'), path('home/movie/video_list', movie.views.video_list, name='video_list'), path('home/movie/upload_list', movie.views.upload_list, name='upload_list'), path('home/movie/upload_video', movie.views.upload_video, name='upload_video'), path('home/movie/delete_play_list', movie.views.delete_play_list, name='delete_play_list'), path('home/movie/delete_video', movie.views.delete_video, name='delete_video'), #path('home/setting', include('settings.urls')), path('home/setting', settings.views.settings, name='settings.html'), path('home/setting/check', settings.views.check, name='check'), path('home/setting/check_pattern', settings.views.check_pattern, name='check_pattern'), path('home/setting/check_Brightness_mode', settings.views.check_Brightness_mode, name='check_Brightness_mode'), path('home/setting/update_Brightness', settings.views.update_Brightness, name='update_Brightness'), path('home/setting/update_CDS_Value', settings.views.update_CDS_Value, name='update_CDS_Value'), path('home/setting/update_min_max', settings.views.update_min_max, name='update_min_max'), path('home/setting/power_mode', settings.views.power_mode, name='power_mode'), path('home/setting/manual_control', settings.views.manual_control, name='manual_control'), path('home/setting/update_on_off', settings.views.update_on_off, name='update_on_off'), path('home/inform', inform.views.inform, name='inform.html'), path('home/register', register.views.register, name='register.html'), path('home/register/users_list', register.views.users_list, name='users_list'), path('home/imgn', imgn.views.imgn, name='image.html'), path('home/imgn/upload_img', imgn.views.upload_img, name='upload_img'), path('home/imgn/save_letter', imgn.views.save_letter, name='save_letter'), path('home/imgn/event_trans', imgn.views.event_trans, name='event_trans'), ]
[ "ckdgl@DESKTOP-6NQFU1P" ]
ckdgl@DESKTOP-6NQFU1P
e9a1e970d4704ef0445f93aed0cd5162806488f7
ca7aa979e7059467e158830b76673f5b77a0f5a3
/Python_codes/p03273/s702731643.py
a626a36c61e3c295dfc6c90d75e2a4adb265c98f
[]
no_license
Aasthaengg/IBMdataset
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
refs/heads/main
2023-04-22T10:22:44.763102
2021-05-13T17:27:22
2021-05-13T17:27:22
367,112,348
0
0
null
null
null
null
UTF-8
Python
false
false
745
py
from collections import defaultdict import itertools import copy def readInt(): return int(input()) def readInts(): return list(map(int, input().split())) def readChar(): return input() def readChars(): return input().split() def p(arr,b="\n",e="\n"): print(b,end="") for i in arr: for j in i: print(j,end="") print() print(e,end="") h,w = readInts() a = [list(input()) for i in range(h)] for i in range(h-1,-1,-1): boo = 1 for j in range(w-1,-1,-1): if a[i][j]=="#": boo = 0 if boo==1: del a[i] for i in range(len(a[0])-1,-1,-1): boo = 1 for j in range(len(a)-1,-1,-1): if a[j][i]=="#": boo = 0 if boo==1: for j in range(len(a)-1,-1,-1): del a[j][i] p(a,b="",e="")
[ "66529651+Aastha2104@users.noreply.github.com" ]
66529651+Aastha2104@users.noreply.github.com
a11679509d39df99aea016f09d46980e5ad22918
d02e279c61c111d250812946f299828330947ed6
/easy/remove element.py
e5afce3b88516929d4662fde1ee3670b0a0f3576
[]
no_license
wlawt/leetcode
b1599528e027bd8bfd2581f3bc56bb3680118c4b
c00fdce2f5f1ed1acc15f74f98c99b7139fedb50
refs/heads/master
2023-02-20T05:42:34.307169
2021-01-22T01:48:30
2021-01-22T01:48:30
326,090,331
1
0
null
null
null
null
UTF-8
Python
false
false
275
py
class Solution: def removeElement(self, nums: List[int], val: int) -> int: i=0 nums.sort() for j in range(len(nums)): if nums[j] != val: nums[i] = nums[j] i+=1 return i
[ "williamlaw.wtl@gmail.com" ]
williamlaw.wtl@gmail.com
296215af5720028bb4c81737ec6fe46db989de0c
4648437f4004530142e8aaf7bc65ffc9df121058
/asgi.py
b3e33727dc5c089f91996f06e4e404eb7442c29d
[]
no_license
YashDilipShah/KC_random_forest
ebca680198b6db27888f1b315153e85149050b06
5cfea708450f09569e7f93cc74ec9e4d38bfc312
refs/heads/master
2020-12-27T18:30:22.352301
2020-02-27T21:08:19
2020-02-27T21:08:19
238,005,735
0
0
null
null
null
null
UTF-8
Python
false
false
413
py
""" ASGI config for houseprice project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'houseprice.settings') application = get_asgi_application()
[ "noreply@github.com" ]
YashDilipShah.noreply@github.com
f7cf518c9adba372fba54eac9d1c3ca7dbadeeac
44aa5314f0291f6a5579214ba2d57b894ddcd1ec
/backend/dashboard/models.py
82b3d803f623cfda4b862f4232d35504c23846ec
[]
no_license
dimnl/modum
f81fef16a599d79f2083ac72484857aadc52a87d
b31f80ac4c1e77ddbcd11d4eb2b1937f1c9215d7
refs/heads/master
2022-06-28T02:30:19.190845
2020-05-03T04:55:39
2020-05-03T04:55:39
260,448,626
0
0
null
null
null
null
UTF-8
Python
false
false
585
py
from django.db import models # Country information. class Country(models.Model): name = models.CharField(max_length=120) description = models.TextField() focus = models.CharField(max_length=120, default="") def _str_(self): return self.name # Sectors information. class Sector(models.Model): name = models.CharField(max_length=120) description = models.TextField() def _str_(self): return self.name # Measures information. class Measure(models.Model): description = models.TextField() def _str_(self): return self.name
[ "alexandru.neculai96@gmail.com" ]
alexandru.neculai96@gmail.com
d090e08a3de3c1ee882e0a3704be94198a57b77b
70134d55728500641c6edc422bb34159c0816fb4
/fi/migrations/0002_blog.py
4ebecd9c47fff0dcc69bebaed2d594afd74790f5
[]
no_license
leenamkyoung/forfor
9f3df218b67f16eb68f0c928ec5566e9f94bc183
bbc725941bb5543871f27cf4783b6f986d83c6a9
refs/heads/master
2022-12-11T19:31:23.561523
2019-10-19T11:44:44
2019-10-19T11:44:44
205,156,784
0
0
null
2022-11-22T04:13:04
2019-08-29T12:16:33
HTML
UTF-8
Python
false
false
626
py
# Generated by Django 2.2.3 on 2019-08-16 05:02 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('fi', '0001_initial'), ] operations = [ migrations.CreateModel( name='Blog', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=200)), ('pub_date', models.DateTimeField(verbose_name='date published')), ('body', models.TextField()), ], ), ]
[ "729497@likelion.org" ]
729497@likelion.org
6e94570e6231536349f4848a253e2446a7657101
e1e4bf7539269bf5f34b9ea02b2841f287e047e0
/test/test_graph.py
daeb7856a893c70921723f3d5964754dc0e57945
[]
no_license
melifluos/twitter_age_detection
2b39d0477eef193e71dbd07b81a553ca7010d4cd
280165c597849e59089b461f2f51c3604f16bb94
refs/heads/master
2020-04-11T05:57:16.942127
2017-05-18T09:44:54
2017-05-18T09:44:54
68,198,226
1
2
null
null
null
null
UTF-8
Python
false
false
2,291
py
__author__ = 'benchamberlain' from ..graph import Graph from scipy.sparse import csr_matrix import numpy as np data = csr_matrix(np.array([[0, 1, 0, 1], [1, 0, 1, 1], [0, 1, 0, 1], [1, 1, 1, 0]])) data1 = csr_matrix(np.array([[0, 1], [1, 0]])) edges = np.array([[1, 3, 0], [0, 2, 3], [1, 3, 0], [0, 1, 2]]) degs = np.array([2, 3, 2, 3]) walks = np.array([[0, 2, 3], [1, 3, 1]]) def test_number_of_vertices(): g = Graph(data) assert g.n_vertices == 4 def test_input_degree(): g = Graph(data) assert np.array_equal(degs, g.deg) def test_input_edge_shape(): g = Graph(data) truth = (4, 3) assert truth == g.edges.shape def test_input_edges(): g = Graph(data) g.build_edge_array() assert np.array_equal(edges, g.edges) def test_initialise_walk_array(): g = Graph(data) num_walks = 10 walk_length = 20 walks = g.initialise_walk_array(num_walks=num_walks, walk_length=walk_length) assert walks.shape == (40, 20) assert np.array_equal(walks[:, 0], np.array([0, 1, 2, 3] * 10)) def test_sample_next_vertices(): """ In the test graph the vertex with index 2 is only connected to vertices 1 and 3 :return: """ g = Graph(data) current_vertices = np.array([2, 2, 2, 2]) for idx in range(10): next_vertex_indices = g.sample_next_vertices(current_vertices, degs) for elem in next_vertex_indices: assert (elem == 0) | (elem == 1) assert next_vertex_indices.shape == current_vertices.shape def test_walks_to_list_of_strings(): walks_str = walks.astype(str) walk_list = walks_str.tolist() for walk in walk_list: assert len(walk) == 3 for elem in walk: assert type(elem) == str def test_oscillating_random_walk_1walk(): g = Graph(data1) g.build_edge_array() walks = g.generate_walks(1, 10) walk1 = [0, 1] * 5 walk2 = [1, 0] * 5 truth = np.array([walk1, walk2]) print walks assert np.array_equal(walks, truth) def test_oscillating_random_walk_2walks(): g = Graph(data1) g.build_edge_array() walks = g.generate_walks(2, 10) walk1 = [0, 1] * 5 walk2 = [1, 0] * 5 truth = np.array([walk1, walk2, walk1, walk2]) print walks assert np.array_equal(walks, truth)
[ "ben@starcount.com" ]
ben@starcount.com
78a12d8bff14792b00e4507e76858d1a178bc660
c60ef27fe285c73fad4076122bb3d6f2fe05f111
/fragscapy/modifications/ipv4_frag.py
02fb8dd3eaf1d8ea14cfed0938fc1dfb0fb6c079
[ "MIT" ]
permissive
daeon/Fragscapy
be88d8b3c6fc309515ecf5f06939f43ddf8022a5
3ee7f5c73fc6c7eb64858e197c0b8d2b313734e0
refs/heads/master
2023-08-31T09:27:31.931466
2021-05-27T20:01:11
2021-05-27T20:01:11
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,743
py
"""Fragments the IPv4 packets at the L3-layer.""" import scapy.layers.inet import scapy.packet from fragscapy.modifications.mod import Mod from fragscapy.packetlist import PacketList class Ipv4Frag(Mod): """Fragments the IPv4 packets at the L3-layer. Fragment each IPv4 packet. the fragmentation size must be specified. It represents the maximum size of each packet (including headers). It uses the scapy's fragmentation function. Args: *args: The arguments of the mods. Attributes: fragsize: The fragmentation size (maximum length of a fragment). Raises: ValueError: Unrecognized or incorrect number of parameters. Examples: >>> Ipv4Frag(32).fragsize 32 """ name = "Ipv4Frag" doc = ("Fragments the IPv4 packets at the L3-layer\n" "ipv4_frag <size>") _nb_args = 1 def parse_args(self, *args): """See base class.""" try: self.fragsize = int(args[0]) except ValueError: raise ValueError("Parameter 1 unrecognized. " "Got {}".format(args[0])) def apply(self, pkt_list): """Fragment each IPv6 packet. See `Mod.apply` for more details.""" new_pl = PacketList() for pkt in pkt_list: if pkt.pkt.haslayer('IP'): fragments = scapy.layers.inet.fragment(pkt.pkt, self.fragsize) index = len(new_pl) - 1 for fragment in fragments: new_pl.add_packet(fragment) new_pl.edit_delay(index, pkt.delay) else: # Not IPv4 so no fragmentation new_pl.add_packet(fragment, pkt.delay) return new_pl
[ "frederic.guihery@amossys.fr" ]
frederic.guihery@amossys.fr
57f3fd827be2d763a94518ae48d40d7b18419a79
6a275ce8642562f93c659b58f5c47bc5cf84f85c
/luffy_django/x.py
ca687f2cd7b3383d3e77b26de20b5d659e1fde9f
[]
no_license
LXianB/school_city
ec2d5080ae13bc3e0279fe9f0cee55a60269efa1
7c06deb063d3ed1c20c6b26275f51111e280bb79
refs/heads/master
2020-07-28T19:38:50.257633
2019-09-19T09:28:28
2019-09-19T09:28:28
209,513,987
0
0
null
null
null
null
UTF-8
Python
false
false
86
py
import json v = { 1:'db', 2:'sb', } s = json.dumps(v) print(json.loads(s))
[ "1135487517A392C3588D2B7E9075EE33@i-search.com.cn" ]
1135487517A392C3588D2B7E9075EE33@i-search.com.cn
c82bef90561c2f2fe7c38bf3d5bde226a9fd1930
33f448e2d3315f758675c852e5853a16813a211b
/melon/crawler/forms.py
34a1d217bbc00cff1c396a272af7ed993f140571
[]
no_license
CuCTeMeH/image_scrapper
f400c4b67934fa17dc48eeaf2355f91bd379b540
567e301840d648a4ca912dfa1eb3228cc973838e
refs/heads/master
2020-06-08T14:25:15.572146
2013-08-15T20:25:36
2013-08-15T20:25:36
null
0
0
null
null
null
null
UTF-8
Python
false
false
164
py
from django import forms from crawler.models import site_url, site_image from django.http import HttpResponse class UrlForm(forms.Form): url = forms.URLField()
[ "kutinchev87@gmail.com" ]
kutinchev87@gmail.com
a341ad35c0cc89c48ba55282eb9191fa23f561e4
af2b4fbb514468846b2d96f5010292d9b973b819
/tests/test_cli.py
2f1eeee3b88c87c7ac853abcc6124c52ef2a6a1d
[ "MIT" ]
permissive
rdimaio/parsa
350f4f2b8bb07d11ce2f3edf359b758208c8f79e
9ce45235efb702cea0aa4254ed2f2c91d56d34a8
refs/heads/master
2021-07-25T17:46:29.169755
2020-04-13T11:38:44
2020-04-13T11:38:44
146,991,563
1
0
MIT
2018-10-11T21:11:34
2018-09-01T11:08:47
Python
UTF-8
Python
false
false
1,690
py
"""Tests for utils/cli.py. Tests: parse_arguments: empty_args no_output_arg_passed output_arg_passed _set_arguments: tested implicitly in the parse_arguments test """ import unittest import os import sys try: from unittest import mock except ImportError: import mock sys.path.append(os.path.abspath('..')) from parsa.utils import cli class CLITest(unittest.TestCase): def test_parse_arguments_empty_args(self): """When sys.argvs is empty, the function should exit with SystemExit: 2.""" testargs = [''] with mock.patch.object(sys, 'argv', testargs): # https://stackoverflow.com/a/13491726 with self.assertRaises(SystemExit) as sys_e: cli.parse_arguments() self.assertEqual(sys_e.exception.code, 2) def test_parse_arguments_no_output_arg_passed(self): """Only the input argument is passed.""" cli_input_arg = 'foo' testargs = ['', cli_input_arg] with mock.patch.object(sys, 'argv', testargs): args = vars(cli.parse_arguments()) self.assertEqual(args['input'], cli_input_arg) def test_parse_arguments_output_arg_passed(self): """Both the input and output arguments are passed.""" cli_input_arg = 'foo' cli_output_arg = 'bar' testargs = ['', '-o', cli_output_arg, cli_input_arg] with mock.patch.object(sys, 'argv', testargs): args = vars(cli.parse_arguments()) self.assertEqual(args['input'], cli_input_arg) self.assertEqual(args['output'], cli_output_arg)
[ "riccardodimaio11@gmail.com" ]
riccardodimaio11@gmail.com
e8ba2a98ff92412f2246fd72b4c6ec99a9424125
4c0a2efb54a87e8419c530e49173484660021c16
/src/demo_hic_et_nunc/types/hen_minter/storage.py
f5033f16e1f986ad59cd0840b788ee2872f06481
[ "MIT" ]
permissive
jellybazil/dipdup-py
7cc6641b7a25379034be401626d91d17d2493f43
950b086effbfce78080461ecc2f959ba7a8ba998
refs/heads/master
2023-08-12T06:50:01.445161
2021-10-16T20:52:29
2021-10-16T20:52:29
null
0
0
null
null
null
null
UTF-8
Python
false
false
751
py
# generated by datamodel-codegen: # filename: storage.json from __future__ import annotations from typing import Dict from pydantic import BaseModel, Extra class Royalties(BaseModel): class Config: extra = Extra.forbid issuer: str royalties: str class Swaps(BaseModel): class Config: extra = Extra.forbid issuer: str objkt_amount: str objkt_id: str xtz_per_objkt: str class HenMinterStorage(BaseModel): class Config: extra = Extra.forbid curate: str genesis: str hdao: str locked: bool manager: str metadata: Dict[str, str] objkt: str objkt_id: str royalties: Dict[str, Royalties] size: str swap_id: str swaps: Dict[str, Swaps]
[ "noreply@github.com" ]
jellybazil.noreply@github.com
558d7cf4a36cfe878990521818b11026bcc8b7b2
bba60e8fdde48cdcd07b0fef80e85f931b9377af
/labs/week5/perspective_lab.py
8dd0c967dd763a09d6bcf4c90c1ea411892f1af2
[]
no_license
atomminder/Coursera_Brown_Coding_the_matrix
dc233e2ecfc7025a15f7c5b72c2c2b3501e13249
5e5e26cff4db4b39e63acf003c00350c1d83a5d7
refs/heads/master
2016-09-06T02:59:04.266312
2013-08-26T04:52:42
2013-08-26T04:52:42
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,462
py
from image_mat_util import * from mat import Mat from matutil import * from vec import Vec from solver import solve ## Task 1 def move2board(v): ''' Input: - v: a vector with domain {'y1','y2','y3'}, the coordinate representation of a point q. Output: - A {'y1','y2','y3'}-vector z, the coordinate representation in whiteboard coordinates of the point p such that the line through the origin and q intersects the whiteboard plane at p. ''' result_vec = Vec({'y1','y2','y3'}, {}) result_vec['y1'] = v['y1'] / v['y3']; result_vec['y2'] = v['y2'] / v['y3']; result_vec['y3'] = 1; return result_vec ## Task 2 def make_equations(x1, x2, w1, w2): ''' Input: - x1 & x2: photo coordinates of a point on the board - y1 & y2: whiteboard coordinates of a point on the board Output: - List [u,v] where u*h = 0 and v*h = 0 ''' domain = {(a, b) for a in {'y1', 'y2', 'y3'} for b in {'x1', 'x2', 'x3'}} u = Vec(domain, {}) u[('y3','x1')] = w1 * x1 u[('y3','x2')] = w1 * x2 u[('y3','x3')] = w1 u[('y1','x1')] = -x1 u[('y1','x2')] = -x2 u[('y1','x3')] = -1 v = Vec(domain, {}) v[('y3','x1')] = w2 * x1 v[('y3','x2')] = w2 * x2 v[('y3','x3')] = w2 v[('y2','x1')] = -x1 v[('y2','x2')] = -x2 v[('y2','x3')] = -1 return [u, v] ## Task 3 # calculate u1,v1 = make_equations(329,597,0,1) u2,v2 = make_equations(358,36,0,0) u3,v3 = make_equations(592,157,1,0) u4,v4 = make_equations(580,483,1,1) domain = {(a, b) for a in {'y1', 'y2', 'y3'} for b in {'x1', 'x2', 'x3'}} last_vec = Vec(domain, {}) last_vec[('y1','x1')] = 1 vector_list = [u1,v1,u2,v2,u3,v3,u4,v4,last_vec] L = rowdict2mat(vector_list) #print(L) b = Vec({0,1,2,3,4,5,6,7,8},{8:1}) #print(b) h = solve(L,b) #residual = b - L*h #if residual * residual < 10e-14: # print(True) #else: # print(False) #print(h) #H = Mat(({'y1', 'y3', 'y2'}, {'x2', 'x3', 'x1'}),{}) #H[('y1','x1')] = 1 #H[('y1','x2')] = 0.0517 #H[('y1','x3')] = -360 #H[('y2','x1')] = -0.382 #H[('y2','x2')] = 0.738 #H[('y2','x3')] = 110 #H[('y3','x1')] = -0.722 #H[('y3','x2')] = -0.0117 #H[('y3','x3')] = 669 #H = Mat(({'y1', 'y3', 'y2'}, {'x2', 'x3', 'x1'}), # {('y1','x1'):1,('y1','x2'):0.0517,('y1','x3'):-360, # ('y2','x1'):-0.382,('y2','x2'):0.738,('y2','x3'):110, # ('y3','x1'):-0.722,('y3','x2'):-0.0117,('y3','x3'):669}) H = Mat(({'y1', 'y3', 'y2'}, {'x2', 'x3', 'x1'}), h.f) ## Task 4 def mat_move2board(Y): ''' Input: - Y: Mat instance, each column of which is a 'y1', 'y2', 'y3' vector giving the whiteboard coordinates of a point q. Output: - Mat instance, each column of which is the corresponding point in the whiteboard plane (the point of intersection with the whiteboard plane of the line through the origin and q). ''' for i in Y.D[1]: Y['y1',i] = Y['y1',i] / Y['y3',i] Y['y2',i] = Y['y2',i] / Y['y3',i] Y['y3',i] = 1 return Y # test #(X_pts, colors) = file2mat('board.png', ('x1','x2','x3')) #Y_pts = H * X_pts #print(Y_pts.D[0]) # print(leY_pts.D[1]) #Y_in = Mat(({'y1', 'y2', 'y3'}, {0,1,2,3}), #{('y1',0):2, ('y2',0):4, ('y3',0):8, #('y1',1):10, ('y2',1):5, ('y3',1):5, #('y1',2):4, ('y2',2):25, ('y3',2):2, #('y1',3):5, ('y2',3):10, ('y3',3):4}) #print(Y_in) #print(mat_move2board(Y_in)) #print(Y)
[ "ice.sagittarius@gmail.com" ]
ice.sagittarius@gmail.com
526bfabd8d4add6feced315ac34c366676ccfe3d
69864d3134f224fb00c46327f7cd4f34c8749f3f
/bongo/apps/frontend/tests/view_tests.py
1fbb400ab0d3d6551e735cfffb3308c990d2cd84
[ "MIT" ]
permissive
BowdoinOrient/bongo
4298e2f1a353be94640ceb2b24da1178abaf92ec
3a78dd8a8f9d853661ba9f0b7df900ec497940a1
refs/heads/develop
2021-01-17T17:07:18.582716
2015-10-25T15:33:47
2015-10-25T15:33:47
18,506,776
3
1
null
2015-10-25T15:33:48
2014-04-07T04:57:02
Python
UTF-8
Python
false
false
3,185
py
from django.test import TestCase from bongo.apps.bongo.tests import factories class ArticleViewTestCase(TestCase): def test_by_slug(self): """Test that you can route to an article by using its slug""" post = factories.PostFactory.create() response = self.client.get("/article/{}/".format(post.slug)) self.assertEqual(response.status_code, 200) def test_by_id(self): """Test that you can route to an article by using its ID""" post = factories.PostFactory.create() response = self.client.get("/article/{}/".format(post.id)) self.assertEqual(response.status_code, 301) self.assertEqual(response.url, "http://testserver/article/{}/".format(post.slug)) class HomeViewTestCase(TestCase): pass class AuthorViewTestCase(TestCase): def test_creator_view_context(self): creator = factories.CreatorFactory.create() posts = [factories.PostFactory.create() for x in range(5)] for post in posts: article = post.text.first() article.creators.add(creator) article.save() post.save(auto_dates=False) res = self.client.get('/author/{}/'.format(creator.pk)) self.assertEqual(creator, res.context['creator']) self.assertEqual(set(posts), set(res.context['posts'])) def test_series_view_route(self): creator = factories.CreatorFactory.create() self.assertEqual(self.client.get('/author/{}/'.format(creator.pk)).status_code, 200) self.assertEqual(self.client.get('/author/'.format(creator.pk)).status_code, 404) self.assertEqual(self.client.get('/author/0/').status_code, 404) class SeriesViewTestCase(TestCase): def test_series_view_context(self): series = factories.SeriesFactory.create() posts = [factories.PostFactory.create() for x in range(5)] for post in posts: post.series.add(series) post.save(auto_dates=False) res = self.client.get('/series/{}/'.format(series.pk)) self.assertEqual(series, res.context['series']) self.assertEqual(set(posts), set(res.context['posts'])) def test_series_view_route(self): series = factories.SeriesFactory.create() self.assertEqual(self.client.get('/series/{}/'.format(series.pk)).status_code, 200) self.assertEqual(self.client.get('/series/'.format(series.pk)).status_code, 404) self.assertEqual(self.client.get('/series/0/').status_code, 404) class StaticViewsTestCase(TestCase): def test_about_view(self): res = self.client.get('/about/') self.assertEqual(res.status_code, 200) def test_ethics_view(self): res = self.client.get('/ethics/') self.assertEqual(res.status_code, 200) def test_subscribe_view(self): res = self.client.get('/subscribe/') self.assertEqual(res.status_code, 200) def test_advertise_view(self): res = self.client.get('/advertise/') self.assertEqual(res.status_code, 200) def test_contact_view(self): res = self.client.get('/contact/') self.assertEqual(res.status_code, 200)
[ "bjacobel@localytics.com" ]
bjacobel@localytics.com
0f06f71767bd30c22c6e18ec01e9a0c89ee695ce
3dfaf9d28b59c23ee3432c26dbf499589645a0b7
/timed_io.py
7326af92a0c9a56229bd514e916cbf0b46495fc0
[]
no_license
wielgusm/timed_new
c9b82575c2cc77a83fd94b1ee7a085748f8b726f
d0017af0985bad0fd5cb98f675382f05824650d8
refs/heads/master
2020-03-29T01:46:21.353351
2018-11-05T17:14:15
2018-11-05T17:14:15
149,405,524
0
0
null
null
null
null
UTF-8
Python
false
false
28,272
py
import sys, os, itertools import numpy as np import pandas as pd import matplotlib.pyplot as plt #import qmetric from timed_new import qmetric from astropy.time import Time import datetime as datetime try: import ehtim as eh except ModuleNotFoundError: sys.path.append('/Volumes/DATAPEN/Shared/EHT/EHTIM/eht-imaging_polrep/eht-imaging/') import ehtim as eh nam2lett = {'ALMA':'A','AA':'A','A':'A', 'APEX':'X','AP':'X','X':'X', 'LMT':'L','LM':'L','L':'L', 'PICOVEL':'P','PICO':'P','PV':'P','P':'P','IRAM30':'P', 'SMTO':'Z','SMT':'Z','AZ':'Z','Z':'Z', 'SPT':'Y','SP':'Y','Y':'Y', 'JCMT':'J','JC':'J','J':'J', 'SMAP':'S','SMA':'S','SM':'S','S':'S', 'SMAR':'R','R':'R','SR':'R', 'B':'B','C':'C','D':'D'} pol_dic={'LL':'ll','ll':'ll','L':'ll', 'RR':'rr','rr':'rr','R':'rr', 'RL':'rl','rl':'rl', 'LR':'lr','lr':'lr'} def load_uvfits(path_to_data,tcoh=-1,single_letter=True,polrep='circ',polar=None): if polar=='LL':polar='L' if polar=='RR':polar='R' try: obs = eh.obsdata.load_uvfits(path_to_data,polrep=polrep,force_singlepol=polar) except TypeError: obs = eh.obsdata.load_uvfits(path_to_data,force_singlepol=polar) #if full_polar: obs.df = make_df_full_cp(obs) #else: obs.df = eh.statistics.dataframes.make_df(obs) obs.df = eh.statistics.dataframes.make_df(obs) if (type(tcoh)!=str): if (tcoh > 0): obs = obs.avg_coherent(inttime=tcoh) else: if tcoh=='scan': try: foo = len(obs.scan) except: print('Adding scans automatically') obs.add_scans() obs = obs.avg_coherent(inttime=1,scan_avg=True) tobs=tobsdata(obs,single_letter=single_letter) return tobs #def load_csv(path_to_data, product, columns=None): class tobsdata: def __init__(self,obs,single_letter=True): try: self.df=obs.df except AttributeError: obs.df = eh.statistics.dataframes.make_df(obs) if single_letter: if np.mean([len(x) for x in np.asarray(obs.df['baseline'])]) > 2.5: obs.df['baseline'] = [nam2lett[x.split('-')[0]]+nam2lett[x.split('-')[1]] for x in list(obs.df['baseline'])] self.source = obs.source self.df=obs.df self.ra=obs.ra self.dec=obs.dec self.data=obs.data self.mjd=obs.mjd try: self.polrep=obs.polrep except AttributeError: pass try: self.scans=obs.scans except: pass def get_tseries(self,ident,product='',polar='none'): return tseries(self,ident,product=product,polar=polar) class fake_tobs: def __init__(self,**kwargs): for key in kwargs: setattr(self, key, kwargs[key]) try: foo = self.source except AttributeError: self.source='source' try: foo = self.ra except AttributeError: self.ra=0 try: foo = self.dec except AttributeError: self.dec=0 class tseries: def __init__(self,tobs,ident,product='',polar='none',csv_path='',csv_columns=None,csv_product=None,**kwargs): if product=='csv': tobs = fake_tobs(**kwargs) foo = pd.read_csv(csv_path,names=csv_columns) if product=='': if len(ident)==2: product='amp' elif len(ident)==3: product='cphase' elif len(ident)==4: product='lcamp' self.product=product if product=='csv': self.type=csv_product else: self.type=product self.ident = ident self.polarization = polar self.source = tobs.source self.ra=tobs.ra self.dec=tobs.dec if product=='amp': foo = tobs.df[(tobs.df.baseline==ident) | (tobs.df.baseline==ident[1]+ident[0])] if polar != 'none': polamp=pol_dic[polar]+'amp' polsigma=pol_dic[polar]+'sigma' else: polamp='amp'; polsigma='sigma' foo=foo[foo[polamp]==foo[polamp]].copy() self.mjd = np.asarray(foo.mjd) self.time = np.asarray(foo.time) self.amp = np.asarray(foo[polamp]) self.sigma = np.asarray(foo[polsigma]) self.data = foo elif product=='cphase': foo = get_cphase(tobs,ident,polar=polar) foo=foo[foo.cphase==foo.cphase].copy() self.mjd = np.asarray(foo.mjd) self.time = np.asarray(foo.time) self.cphase = np.asarray(foo.cphase) self.sigmaCP = np.asarray(foo.sigmaCP) self.data = foo elif product=='lcamp': foo = get_lcamp(tobs,ident,polar=polar) #if polar!='none': foo = foo.dropna(subset=[polamp]) foo=foo[foo.lcamp==foo.lcamp].copy() self.mjd = np.asarray(foo.mjd) self.time = np.asarray(foo.time) self.lcamp = np.asarray(foo.lcamp) self.sigmaLCA = np.asarray(foo.sigmaLCA) self.data = foo elif product=='lcfrac': foo = get_lcfrac(tobs,ident) #if polar!='none': foo = foo.dropna(subset=[polamp]) foo=foo[foo.lcfrac==foo.lcfrac].copy() self.mjd = np.asarray(foo.mjd) self.time = np.asarray(foo.time) self.lcfrac = np.asarray(foo.lcfrac) self.sigmaLCF = np.asarray(foo.sigmaLCF) self.data = foo elif product=='cfrac': foo = get_cfrac(tobs,ident) #if polar!='none': foo = foo.dropna(subset=[polamp]) foo=foo[foo.cfrac==foo.cfrac].copy() self.mjd = np.asarray(foo.mjd) self.time = np.asarray(foo.time) self.cfrac = np.asarray(foo.cfrac) self.sigmaCF = np.asarray(foo.sigmaCF) self.data = foo elif product=='csv': for col in csv_columns: setattr(self, col, foo[col]) self.data = foo try: goo=self.time except AttributeError: self.time=self.mjd def plot(self,line=False,figsize='',errorscale=1.,add_title=''): if figsize=='': plt.figure(figsize=(10,5)) else: plt.figure(figsize=figsize) if line: fmt='o-' else: fmt='o' plt.title(self.ident+' '+self.type+' '+add_title) if self.type=='cphase': plt.errorbar(self.time,self.cphase,errorscale*self.sigmaCP,fmt=fmt,capsize=5) plt.ylabel('cphase [deg]') elif self.type=='amp': plt.errorbar(self.time,self.amp,errorscale*self.sigma,fmt=fmt,capsize=5) plt.ylabel('amp') elif self.type=='lcamp': plt.errorbar(self.time,self.lcamp,errorscale*self.sigmaLCA,fmt=fmt,capsize=5) plt.ylabel('log camp') elif self.type=='lcfrac': plt.errorbar(self.time,self.lcfrac,errorscale*self.sigmaLCF,fmt=fmt,capsize=5) plt.ylabel('log cfracpol') elif self.type=='cfrac': plt.errorbar(self.time,self.cfrac,errorscale*self.sigmaCF,fmt=fmt,capsize=5) plt.ylabel('cfracpol') plt.grid() plt.xlabel('time [h]') plt.show() def plot_compare(self,tser,line=False,figsize='',errorscale=1.,add_title=''): if figsize=='': plt.figure(figsize=(10,5)) else: plt.figure(figsize=figsize) if line: fmt='o-' else: fmt='o' plt.title(self.ident+' '+self.type+' '+add_title) if self.type=='cphase': plt.errorbar(self.time,self.cphase,errorscale*self.sigmaCP,fmt=fmt,capsize=5,label=self.ident) plt.errorbar(tser.time,tser.cphase,errorscale*tser.sigmaCP,fmt=fmt,capsize=5,label=tser.ident) plt.ylabel('cphase [deg]') elif self.type=='amp': plt.errorbar(self.time,self.amp,errorscale*self.sigma,fmt=fmt,capsize=5,label=self.ident) plt.errorbar(tser.time,tser.amp,errorscale*tser.sigma,fmt=fmt,capsize=5,label=tser.ident) plt.ylabel('amp') elif self.type=='lcamp': plt.errorbar(self.time,self.lcamp,errorscale*self.sigmaLCA,fmt=fmt,capsize=5,label=self.ident) plt.errorbar(tser.time,tser.lcamp,errorscale*tser.sigmaLCA,fmt=fmt,capsize=5,label=tser.ident) plt.ylabel('log camp') elif self.type=='lcfrac': plt.errorbar(self.time,self.lcfrac,errorscale*self.sigmaLCF,fmt=fmt,capsize=5,label=self.ident) plt.errorbar(tser.time,tser.lcfrac,errorscale*tser.sigmaLCF,fmt=fmt,capsize=5,label=tser.ident) plt.ylabel('log cfracpol') elif self.type=='cfrac': plt.errorbar(self.time,self.cfrac,errorscale*self.sigmaCF,fmt=fmt,capsize=5,label=self.ident) plt.errorbar(tser.time,tser.cfrac,errorscale*tser.sigmaCF,fmt=fmt,capsize=5,label=tser.ident) plt.ylabel('cfracpol') plt.grid() plt.xlabel('time [h]') plt.legend() plt.show() def plot_compare_list(self,tserL,line=False,figsize='',errorscale=1.,add_title='',labelsL=None,err_cut=1e5,xrange=None,yrange=None): if figsize=='': plt.figure(figsize=(10,5)) else: plt.figure(figsize=figsize) if line: fmt='o-' else: fmt='o' if labelsL==None: labelsL0 = [tser.ident for tser in tserL] labelsL=[self.ident]+labelsL0 plt.title(self.ident+' '+self.type+' '+add_title) if self.type=='cphase': plt.errorbar(self.time[self.sigmaCP<err_cut],self.cphase[self.sigmaCP<err_cut],errorscale*self.sigmaCP[self.sigmaCP<err_cut],fmt=fmt,capsize=5,label=labelsL[0]) for cou,tser in enumerate(tserL): plt.errorbar(tser.time[tser.sigmaCP<err_cut],tser.cphase[tser.sigmaCP<err_cut],errorscale*tser.sigmaCP[tser.sigmaCP<err_cut],fmt=fmt,capsize=5,label=labelsL[cou+1]) plt.ylabel('cphase [deg]') elif self.type=='amp': plt.errorbar(self.time[self.sigma<err_cut],self.amp[self.sigma<err_cut],errorscale*self.sigma[self.sigma<err_cut],fmt=fmt,capsize=5,label=labelsL[0]) for cou,tser in enumerate(tserL): plt.errorbar(tser.time[tser.sigma<err_cut],tser.lcamp[tser.sigma<err_cut],errorscale*tser.sigma[tser.sigma<err_cut],fmt=fmt,capsize=5,label=labelsL[cou+1]) plt.ylabel('amp') elif self.type=='lcamp': plt.errorbar(self.time[self.sigmaLCA<err_cut],self.lcamp[self.sigmaLCA<err_cut],errorscale*self.sigmaLCA[self.sigmaLCA<err_cut],fmt=fmt,capsize=5,label=labelsL[0]) for cou,tser in enumerate(tserL): plt.errorbar(tser.time[tser.sigmaLCA<err_cut],tser.lcamp[tser.sigmaLCA<err_cut],errorscale*tser.sigmaLCA[tser.sigmaLCA<err_cut],fmt=fmt,capsize=5,label=labelsL[cou+1]) plt.ylabel('log camp') elif self.type=='lcfrac': plt.errorbar(self.time[self.sigmaLCF<err_cut],self.lcfrac[self.sigmaLCF<err_cut],errorscale*self.sigmaLCF[self.sigmaLCF<err_cut],fmt=fmt,capsize=5,label=labelsL[0]) for cou,tser in enumerate(tserL): plt.errorbar(tser.time[tser.sigmaLCF<err_cut],tser.lcfrac[tser.sigmaLCF<err_cut],errorscale*tser.sigmaLCF[tser.sigmaLCF<err_cut],fmt=fmt,capsize=5,label=labelsL[cou+1]) plt.ylabel('log cfracpol') elif self.type=='cfrac': plt.errorbar(self.time[self.sigmaCF<err_cut],self.cfrac[self.sigmaCF<err_cut],errorscale*self.sigmaCF[self.sigmaCF<err_cut],fmt=fmt,capsize=5,label=labelsL[0]) for cou,tser in enumerate(tserL): plt.errorbar(tser.time[tser.sigmaCF<err_cut],tser.cfrac[tser.sigmaCF<err_cut],errorscale*tser.sigmaCF[tser.sigmaCF<err_cut],fmt=fmt,capsize=5,label=labelsL[cou+1]) plt.ylabel('cfracpol') plt.grid() plt.xlabel('time [h]') if yrange!=None: plt.ylim(yrange) if xrange!=None: plt.xlim(xrange) plt.legend() plt.show() def hist(self,figsize='',perc=2.,show_normal=True): if figsize=='': plt.figure(figsize=(10,5)) else: plt.figure(figsize=figsize) if self.type=='cphase': x=self.cphase err=self.sigmaCP rel_cl = self.cphase/self.sigmaCP plt.xlabel('(closure phase) / (estimated error)') elif self.type=='lcamp': x=self.lcamp err=self.sigmaLCA rel_cl = self.lcamp/self.sigmaLCA plt.xlabel('(log closure amp) / (estimated error)') elif self.type=='amp': x=(self.amp-np.mean(self.amp)) err=self.sigma rel_cl = (self.amp-np.mean(self.amp))/self.sigma plt.xlabel('(amp - mean amp) / (estimated error)') binL = np.percentile(rel_cl,perc) binR = np.percentile(rel_cl,100.-perc) binDist = np.abs(binR-binL) binR = binR + 0.1*binDist binL = binL - 0.1*binDist bins = np.linspace(binL,binR,int(1.2*np.sqrt(len(rel_cl)))) plt.hist(rel_cl,bins=bins,normed=True) if show_normal: plt.axvline(0,color='k',linestyle='--') xg = np.linspace(binL, binR,128) plt.plot(xg,1/np.sqrt(2.*np.pi)*np.exp(-xg**2/2.),'k--') plt.title(self.ident) plt.grid() plt.show() print('MAD0: ', 1.4826*np.median(np.abs(rel_cl))) print('MEDIAN ABSOLUTE: ',np.median(np.abs(x))) print('MEDIAN NORMALIZED: ', np.median(rel_cl)) print('MEDIAN ABSOLUTE:',np.median(x)) print('MEDIAN THERMAL ERROR: ', np.median(err)) print('VARIATION: ',np.std(x) ) def qmetric(self): if self.type=='amp': x = self.amp err_x = self.sigma if self.type=='cphase': x = self.cphase err_x = self.sigmaCP if self.type=='lcamp': x = self.lcamp err_x = self.sigmaLCA q,dq = qmetric.qmetric(self.time,x,err_x,product=self.type) return q,dq def save_csv(self,name_out,columns='default',sep=',',header=False): if columns=='default': if self.type=='amp': columns=['mjd','amp','sigma'] elif self.type=='cphase': columns=['mjd','cphase','sigmaCP'] elif self.type=='lcamp': columns=['mjd','lcamp','sigmaLCA'] elif self.type=='lcfrac': columns=['mjd','lcfrac','sigmaLCF'] elif self.type=='cfrac': columns=['mjd','cfrac','sigmaCF'] self.data[columns].to_csv(name_out,index=False,header=header,sep=sep) def get_cphase(tobs,triangle,polar='none'): if polar != 'none': polvis=pol_dic[polar]+'vis' polsnr=pol_dic[polar]+'snr' else: polvis='vis'; polsnr='snr' # tobs.df=tobs.df[tobs.df.polarization==polar].copy() baseL=list(tobs.df.baseline.unique()) #determine order stations b=[triangle[0]+triangle[1],triangle[1]+triangle[2],triangle[2]+triangle[0]] sign=[0,0,0] baseT=b for cou in range(3): if (b[cou] in baseL)&(b[cou][::-1] not in baseL): sign[cou]=1 elif (b[cou] not in baseL)&(b[cou][::-1] in baseL): sign[cou]=-1 baseT[cou]= b[cou][::-1] #print(tobs.df.columns) #print(baseT) foo=tobs.df[list(map(lambda x: x in baseT, tobs.df.baseline))] #print('mjd',foo.columns) foo=foo.groupby('mjd').filter(lambda x: len(x)==3) fooB0=foo[foo.baseline==baseT[0]].sort_values('mjd').copy() fooB1=foo[foo.baseline==baseT[1]].sort_values('mjd').copy() fooB2=foo[foo.baseline==baseT[2]].sort_values('mjd').copy() foo_out=fooB0[['time','datetime','mjd']].copy() foo_out['u1'] = np.asarray(fooB0['u']) foo_out['v1'] = np.asarray(fooB0['v']) foo_out['vis1'] = np.asarray(fooB0[polvis]) if sign[0]==-1: foo_out['vis1'] = np.asarray(foo_out['vis1']).conj() foo_out['snr1'] = np.asarray(fooB0[polsnr]) foo_out['u2'] = np.asarray(fooB1['u']) foo_out['v2'] = np.asarray(fooB1['v']) foo_out['vis2'] = np.asarray(fooB1[polvis]) if sign[1]==-1: foo_out['vis2'] = np.asarray(foo_out['vis2']).conj() foo_out['snr2'] = np.asarray(fooB1[polsnr]) foo_out['u3'] = np.asarray(fooB2['u']) foo_out['v3'] = np.asarray(fooB2['v']) foo_out['vis3'] = np.asarray(fooB2[polvis]) if sign[2]==-1: foo_out['vis3'] = np.asarray(foo_out['vis3']).conj() foo_out['snr3'] = np.asarray(fooB2[polsnr]) foo_out['cphase'] = (180./np.pi)*np.angle( foo_out['vis1']* foo_out['vis2']*foo_out['vis3']) foo_out['sigmaCP'] = (180./np.pi)*np.sqrt(1./foo_out['snr1']**2 + 1./foo_out['snr2']**2 + 1./foo_out['snr3']**2) return foo_out def get_lcamp(tobs,quadrangle,polar='none'): if polar != 'none': polvis=pol_dic[polar]+'vis' polsnr=pol_dic[polar]+'snr' else: polvis='vis'; polsnr='snr' baseL=list(tobs.df.baseline.unique()) b=[quadrangle[0]+quadrangle[1],quadrangle[2]+quadrangle[3],quadrangle[0]+quadrangle[2],quadrangle[1]+quadrangle[3]] baseQ=b for cou in range(4): if (b[cou] not in baseL)&(b[cou][::-1] in baseL): baseQ[cou]= b[cou][::-1] foo=tobs.df[list(map(lambda x: (x in baseQ), tobs.df.baseline))] foo=foo.groupby('mjd').filter(lambda x: len(x)==4) fooB0=foo[foo.baseline==baseQ[0]].sort_values('mjd').copy() fooB1=foo[foo.baseline==baseQ[1]].sort_values('mjd').copy() fooB2=foo[foo.baseline==baseQ[2]].sort_values('mjd').copy() fooB3=foo[foo.baseline==baseQ[3]].sort_values('mjd').copy() foo_out=fooB0[['time','datetime','mjd']].copy() foo_out['u1'] = np.asarray(fooB0['u']) foo_out['v1'] = np.asarray(fooB0['v']) foo_out['vis1'] = np.asarray(fooB0[polvis]) foo_out['snr1'] = np.asarray(fooB0[polsnr]) foo_out['u2'] = np.asarray(fooB1['u']) foo_out['v2'] = np.asarray(fooB1['v']) foo_out['vis2'] = np.asarray(fooB1[polvis]) foo_out['snr2'] = np.asarray(fooB1[polsnr]) foo_out['u3'] = np.asarray(fooB2['u']) foo_out['v3'] = np.asarray(fooB2['v']) foo_out['vis3'] = np.asarray(fooB2[polvis]) foo_out['snr3'] = np.asarray(fooB2[polsnr]) foo_out['u4'] = np.asarray(fooB3['u']) foo_out['v4'] = np.asarray(fooB3['v']) foo_out['vis4'] = np.asarray(fooB3[polvis]) foo_out['snr4'] = np.asarray(fooB3[polsnr]) foo_out['lcamp'] = np.log(np.abs(foo_out['vis1'])) + np.log(np.abs(foo_out['vis2'])) - np.log(np.abs(foo_out['vis3'])) - np.log(np.abs(foo_out['vis4'])) foo_out['sigmaLCA'] = np.sqrt(1./foo_out['snr1']**2 + 1./foo_out['snr2']**2 + 1./foo_out['snr3']**2 + 1./foo_out['snr4']**2) return foo_out def get_lcfrac(tobs,baseline): baseL=list(tobs.df.baseline.unique()) #print(baseL) if baseline not in baseL: if baseline[1]+baseline[0] in baseL: print('Using transposed baseline') baseline=baseline[1]+baseline[0] else: print('No such baseline') foo = tobs.df[tobs.df.baseline==baseline] if tobs.polrep=='circ': foo.dropna(axis=0, subset=['rrvis','rlvis','llvis','lrvis','rrsigma','llsigma','lrsigma','rlsigma','rrsnr','llsnr','lrsnr','rlsnr'], inplace=True) foo_out=foo[['time','datetime','mjd']].copy() foo_out['u'] = np.asarray(foo['u']) foo_out['v'] = np.asarray(foo['v']) debias=True if debias==True: foo['rlvis'] = foo['rlvis']*np.sqrt(np.abs(foo['rlvis'])**2 - np.abs(foo['rlsigma'])**2)/np.abs(foo['rlvis']) foo['lrvis'] = foo['lrvis']*np.sqrt(np.abs(foo['lrvis'])**2 - np.abs(foo['lrsigma'])**2)/np.abs(foo['lrvis']) foo['rrvis'] = foo['rrvis']*np.sqrt(np.abs(foo['rrvis'])**2 - np.abs(foo['rrsigma'])**2)/np.abs(foo['rrvis']) foo['llvis'] = foo['llvis']*np.sqrt(np.abs(foo['llvis'])**2 - np.abs(foo['llsigma'])**2)/np.abs(foo['llvis']) foo_out['lcfrac'] = np.log(np.abs(foo['rlvis'])) + np.log(np.abs(foo['lrvis'])) - np.log(np.abs(foo['rrvis'])) - np.log(np.abs(foo['llvis'])) foo_out['sigmaLCF'] = np.sqrt(1./foo['llsnr']**2 + 1./foo['rrsnr']**2 + 1./foo['lrsnr']**2 + 1./foo['rlsnr']**2) return foo_out def get_cfrac(tobs,baseline): baseL=list(tobs.df.baseline.unique()) if baseline not in baseL: if baseline[1]+baseline[0] in baseL: print('Using transposed baseline') baseline=baseline[1]+baseline[0] else: print('No such baseline') foo = tobs.df[tobs.df.baseline==baseline] if tobs.polrep=='circ': foo.dropna(axis=0, subset=['rrvis','rlvis','llvis','lrvis','rrsigma','llsigma','lrsigma','rlsigma','rrsnr','llsnr','lrsnr','rlsnr'], inplace=True) foo_out=foo[['time','datetime','mjd']].copy() foo_out['u'] = np.asarray(foo['u']) foo_out['v'] = np.asarray(foo['v']) debias=True if debias==True: foo['rlvis'] = foo['rlvis']*np.sqrt(np.abs(foo['rlvis'])**2 - np.abs(foo['rlsigma'])**2)/np.abs(foo['rlvis']) foo['lrvis'] = foo['lrvis']*np.sqrt(np.abs(foo['lrvis'])**2 - np.abs(foo['lrsigma'])**2)/np.abs(foo['lrvis']) foo['rrvis'] = foo['rrvis']*np.sqrt(np.abs(foo['rrvis'])**2 - np.abs(foo['rrsigma'])**2)/np.abs(foo['rrvis']) foo['llvis'] = foo['llvis']*np.sqrt(np.abs(foo['llvis'])**2 - np.abs(foo['llsigma'])**2)/np.abs(foo['llvis']) foo_out['cfrac'] = np.sqrt((np.abs(foo['rlvis']))*(np.abs(foo['lrvis']))/(np.abs(foo['rrvis']))/(np.abs(foo['llvis']))) foo_out['sigmaCF'] = 0.5*(foo_out['cfrac'])*np.sqrt(1./foo['llsnr']**2 + 1./foo['rrsnr']**2 + 1./foo['lrsnr']**2 + 1./foo['rlsnr']**2) return foo_out def make_df_full_cp(obs,round_s=0.1): """converts visibilities from obs.data to DataFrame format Args: obs: ObsData object round_s: accuracy of datetime object in seconds polarization: just label for polarization save_polar: what to do about different polarizations, if Returns: df: observation visibility data in DataFrame format """ sour=obs.source df = pd.DataFrame(data=obs.data) df['fmjd'] = df['time']/24. df['mjd'] = obs.mjd + df['fmjd'] telescopes = list(zip(df['t1'],df['t2'])) telescopes = [(x[0],x[1]) for x in telescopes] df['baseline'] = [x[0]+'-'+x[1] for x in telescopes] df['amp'] = list(map(np.abs,df['vis'])) df['phase'] = list(map(lambda x: (180./np.pi)*np.angle(x),df['vis'])) df['datetime'] = Time(df['mjd'], format='mjd').datetime df['datetime'] =list(map(lambda x: round_time(x,round_s=round_s),df['datetime'])) df['jd'] = Time(df['mjd'], format='mjd').jd #df['snr'] = df['amp']/df['sigma'] quantities=['llamp','rramp','rlamp','lramp','llsigma','rrsigma','rlsigma','lrsigma','rrphase','llphase','rlphase','lrphase'] for quantity in quantities: df[quantity] = [x[0] for x in obs.unpack(quantity)] df['source'] = sour df['baselength'] = np.sqrt(np.asarray(df.u)**2+np.asarray(df.v)**2) basic_columns = list(set(df.columns)-set(quantities)) dfrr=df[basic_columns+['rramp','rrphase','rrsigma']].copy() dfrr['amp']=dfrr['rramp'] dfrr['phase']=dfrr['rrphase'] dfrr['sigma']=dfrr['rrsigma'] dfrr=dfrr[basic_columns] dfrr['polarization']='RR' dfll=df[basic_columns+['llamp','llphase','llsigma']].copy() dfll['amp']=dfll['llamp'] dfll['phase']=dfll['llphase'] dfll['sigma']=dfll['llsigma'] dfll=dfll[basic_columns] dfll['polarization']='LL' dflr=df[basic_columns+['lramp','lrphase','lrsigma']].copy() dflr['amp']=dflr['lramp'] dflr['phase']=dflr['lrphase'] dflr['sigma']=dflr['lrsigma'] dflr=dflr[basic_columns] dflr['polarization']='LR' dfrl=df[basic_columns+['rlamp','rlphase','rlsigma']].copy() dfrl['amp']=dfrl['rlamp'] dfrl['phase']=dfrl['rlphase'] dfrl['sigma']=dfrl['rlsigma'] dfrl=dfrl[basic_columns] dfrl['polarization']='RL' df = pd.concat() return df def round_time(t,round_s=0.1): """rounding time to given accuracy Args: t: time round_s: delta time to round to in seconds Returns: round_t: rounded time """ t0 = datetime.datetime(t.year,1,1) foo = t - t0 foo_s = foo.days*24*3600 + foo.seconds + foo.microseconds*(1e-6) foo_s = np.round(foo_s/round_s)*round_s days = np.floor(foo_s/24/3600) seconds = np.floor(foo_s - 24*3600*days) microseconds = int(1e6*(foo_s - days*3600*24 - seconds)) round_t = t0+datetime.timedelta(days,seconds,microseconds) return round_t def save_all_products(pathf,path_out,special_name,get_what=['AMP','CP','LCA','CF'],get_pol=['LL','RR'],min_elem=100.,cadence=-1,polrep='circ',columns='default'): if get_pol==None: get_pol=[None] for pol in get_pol: tobs = load_uvfits(pathf,tcoh=cadence,polar=pol,polrep=polrep) if pol==None: pol='' stations = list(set(''.join(tobs.df.baseline))) stations = [x for x in stations if x!='R'] #print(stations) #print(tobs.baseline.unique()) if 'AMP' in get_what: print('Saving visibility amplitudes time series...') if not os.path.exists(path_out+'AMP'): os.makedirs(path_out+'AMP') #baseL=sorted([x[0]+x[1] for x in itertools.combinations(stations,2)]) baseL = tobs.df.baseline.unique() for base in baseL: tser = tseries(tobs,base,product='amp') if len(tser.mjd)>min_elem: tser.save_csv(path_out+'AMP/'+special_name+'_'+tser.source+'_'+base+'_'+pol+'.csv',columns=columns) if 'CP' in get_what: print('Saving closure phase time series...') if not os.path.exists(path_out+'CP'): os.makedirs(path_out+'CP') triangleL=sorted([x[0]+x[1]+x[2] for x in itertools.combinations(stations,3)]) for tri in triangleL: tser = tseries(tobs,tri,product='cphase') if len(tser.mjd)>min_elem: tser.save_csv(path_out+'CP/'+special_name+'_'+tser.source+'_'+tri+'_'+pol+'.csv',columns=columns) if 'LCA' in get_what: print('Saving log closure amplitude time series...') if not os.path.exists(path_out+'LCA'): os.makedirs(path_out+'LCA') quadrangleL1=sorted([x[0]+x[1]+x[2]+x[3] for x in itertools.combinations(stations,4)]) quadrangleL2=sorted([x[0]+x[3]+x[1]+x[2] for x in itertools.combinations(stations,4)]) quadrangleL=quadrangleL1+quadrangleL2 for quad in quadrangleL: tser = tseries(tobs,quad,product='lcamp') if len(tser.mjd)>min_elem: tser.save_csv(path_out+'LCA/'+special_name+'_'+tser.source+'_'+quad+'_'+pol+'.csv',columns=columns) if 'LCF' in get_what: print('Saving log closure fracpol time series...') if not os.path.exists(path_out+'LCF'): os.makedirs(path_out+'LCF') baseL = tobs.df.baseline.unique() baseL = [base for base in baseL if 'R' not in base] for base in baseL: tser = tseries(tobs,base,product='lcfrac') if len(tser.mjd)>min_elem: tser.save_csv(path_out+'LCF/'+special_name+'_'+tser.source+'_'+base+'.csv',columns=columns) if 'CF' in get_what: print('Saving closure fracpol time series...') if not os.path.exists(path_out+'CF'): os.makedirs(path_out+'CF') baseL = tobs.df.baseline.unique() baseL = [base for base in baseL if 'R' not in base] #print(baseL) for base in baseL: #print('base ', base) tser = tseries(tobs,base,product='cfrac') #print(base,np.shape(tser.data)) if len(tser.mjd)>min_elem: tser.save_csv(path_out+'CF/'+special_name+'_'+tser.source+'_'+base+'.csv',columns=columns)
[ "maciek.wielgus@gmail.com" ]
maciek.wielgus@gmail.com