hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
67e6967f9057bb9fe14cc5543b93fd2036edcf8d | 2,662 | py | Python | 8/star2.py | nfitzen/advent-of-code-2020 | 774b7db35aaf31b0e72a569b3441343d50f4d079 | [
"CC0-1.0",
"MIT"
] | null | null | null | 8/star2.py | nfitzen/advent-of-code-2020 | 774b7db35aaf31b0e72a569b3441343d50f4d079 | [
"CC0-1.0",
"MIT"
] | null | null | null | 8/star2.py | nfitzen/advent-of-code-2020 | 774b7db35aaf31b0e72a569b3441343d50f4d079 | [
"CC0-1.0",
"MIT"
] | null | null | null | #!/usr/bin/env python3
# SPDX-FileCopyrightText: 2020 Nathaniel Fitzenrider <https://github.com/nfitzen>
#
# SPDX-License-Identifier: CC0-1.0
# Jesus Christ this was overengineered to Hell and back.
from typing import List, Tuple, Union
with open('input.txt') as f:
instructions = f.readlines()
# It's not a universal solution; it only works for jmp.
# I just got lucky.
console = Console()
instructions = console.compile(instructions)
positions = {i[0] if i[1][0] == 'jmp' else None for i in enumerate(console.compile(instructions))}
positions -= {None}
for pos in positions:
console.__init__()
tmpInstruct = instructions.copy()
tmpInstruct[pos] = ('nop', tmpInstruct[pos][1])
acc, status = console.process(tmpInstruct)
if status == 0:
print(acc)
| 31.690476 | 98 | 0.583396 |
67e77f21e80bffc6d63b3d609643ba3804770c10 | 1,010 | py | Python | projects/20151163/api/api.py | universe3306/WebStudio2019 | f6827875c449e762bae21e0d4d4fc76187626930 | [
"MIT"
] | 14 | 2019-03-06T10:32:40.000Z | 2021-11-18T01:44:28.000Z | projects/20151163/api/api.py | universe3306/WebStudio2019 | f6827875c449e762bae21e0d4d4fc76187626930 | [
"MIT"
] | 35 | 2019-03-13T07:04:02.000Z | 2019-10-08T06:26:45.000Z | projects/20151163/api/api.py | universe3306/WebStudio2019 | f6827875c449e762bae21e0d4d4fc76187626930 | [
"MIT"
] | 22 | 2019-03-11T11:00:24.000Z | 2019-09-14T06:53:30.000Z | from flask import Flask, request, jsonify
from flask_restful import Api, Resource
from flask_cors import CORS
import json, os
from models import db, User
from UserList import UserList
from PicturesList import Picture, PicturesList, Uploader
basedir = os.path.dirname(os.path.abspath(__file__))
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'app.db')
app = Flask(__name__)
app.config.update({
'SQLALCHEMY_TRACK_MODIFICATIONS': True,
"SQLALCHEMY_DATABASE_URI": SQLALCHEMY_DATABASE_URI,
})
cors = CORS(app)
api = Api(app)
db.init_app(app)
api.add_resource(UserList, '/api/users')
api.add_resource(PicturesList, '/api/pictures')
api.add_resource(Picture, '/api/pictures/<name>')
api.add_resource(Uploader, '/api/pictures/new')
if __name__ == '__main__':
with app.app_context():
db.create_all()
app.run(host='0.0.0.0', port=5000, debug=True)
| 26.578947 | 72 | 0.725743 |
67e793c1f1db4accdabd37b5f3ae0c798f19a953 | 40,518 | py | Python | app.py | sharonytlau/dash-loan-calculator | b789d30953c8836cc5e861f36a66e73aace24e2c | [
"Apache-2.0"
] | 1 | 2021-10-30T14:41:15.000Z | 2021-10-30T14:41:15.000Z | app.py | sharonytlau/dash-loan-calculator | b789d30953c8836cc5e861f36a66e73aace24e2c | [
"Apache-2.0"
] | null | null | null | app.py | sharonytlau/dash-loan-calculator | b789d30953c8836cc5e861f36a66e73aace24e2c | [
"Apache-2.0"
] | null | null | null | # Ying Tung Lau - sharonlau@brandeis.edu
# Jiaying Yan - jiayingyan@brandeis.edu
# <editor-fold desc="import modules">
import pandas as pd
import numpy as np
import json
import os
import re
import dash
import dash_table
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from dash.dependencies import State, Input, Output
from dash.exceptions import PreventUpdate
import plotly.graph_objects as go
from algorithms.Helper import *
from algorithms.LoanImpacts import *
# </editor-fold>
# <editor-fold desc="dash app">
external_stylesheets = [dbc.themes.BOOTSTRAP,
'https://cdnjs.cloudflare.com/ajax/libs/font-awesome/4.7.0/css/font-awesome.min.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
app.config.suppress_callback_exceptions = True
# <editor-fold desc="app-components">
loan_input_card = dbc.Card(
[
dbc.CardHeader(
[
html.Div(
[
html.H1('LOAN SPECS'),
],
className='w-fit d-flex align-items-center text-nowrap'),
html.Div(
[
html.Div(
[
"Loan Number",
html.Div(
[
dbc.Button('-', color='light', id='decrease-loan',
className='symbol-style offset-2',
n_clicks=0),
dbc.Button('+', color='light', id='increase-loan',
className='symbol-style mr-1',
n_clicks=0),
], className='increment-btn'),
], className='number-widget pl-3'),
html.Div(
[
'Contribution Number',
dbc.Button('+', color='light', id='contribution-button',
className='symbol-style mr-1 increment-btn',
n_clicks=0, ),
], className='number-widget'),
]
, className="d-flex flex-column align-items-end"),
],
className='d-inline-flex justify-content-between'),
dbc.CardBody(
[
individual_loan_input(1, {'display': 'block'}),
individual_loan_input(2),
individual_loan_input(3),
], id="loan-card", className='input-card-body'),
], className='input-card'
)
# </editor-fold>
# <editor-fold desc="app-callbacks">
# %% alter input panel
# %%
# %% store input loan data
# %%
# %% Reset input
# %%
# %% Show checklist
# %% Show schedule figure
# Define functions for use of shedule figure
def get_Bar_principal(index, df_schedule):
palette = [dict(color='rgba(163, 201, 199, 1)', line=dict(color='rgba(163, 201, 199, 1)')),
dict(color='rgba(163, 201, 199, 0.7)', line=dict(color='rgba(163, 201, 199, 0.7)')),
dict(color='rgba(163, 201, 199, 0.4)', line=dict(color='rgba(163, 201, 199, 0.4)')),
]
fig = go.Bar(name='Loan{} Principal'.format(index + 1),
x=df_schedule['Payment Number'],
y=df_schedule['Applied Principal'],
marker=palette[index],
legendgroup=index,
)
return fig
def get_Bar_interest(index, df_schedule):
palette = [dict(color='rgba(236, 197, 76, 1)', line=dict(color='rgba(236, 197, 76, 1)')),
dict(color='rgba(236, 197, 76, 0.7)', line=dict(color='rgba(236, 197, 76, 0.7)')),
dict(color='rgba(236, 197, 76, 0.4)', line=dict(color='rgba(236, 197, 76, 0.4)')),
]
fig = go.Bar(name='Loan{} Interest'.format(index + 1),
x=df_schedule['Payment Number'],
y=df_schedule['Applied Interest'],
marker=palette[index],
legendgroup=index,
)
return fig
# %% Show contribution
def get_contribution_fig(df_impact):
fig = go.Figure()
trace_interest = go.Bar(
name="Total Interest Paid",
x=df_impact['Index'],
y=df_impact['InterestPaid'],
yaxis='y',
offsetgroup=1,
marker=dict(color='rgba(236, 197, 76, 1)')
)
trace_duration = go.Bar(
name="Loan Term",
x=df_impact['Index'],
y=df_impact['Duration'],
yaxis='y2',
offsetgroup=2,
marker=dict(color='rgba(163, 161, 161, 1)')
)
fig.add_trace(trace_interest)
fig.add_trace(trace_duration)
fig['layout'].update(
margin=dict(l=0, r=0, b=0, t=30),
paper_bgcolor='rgba(0,0,0,0)',
plot_bgcolor='rgba(0,0,0,0)',
legend=dict(xanchor='left', x=0, y=-0.25, orientation='h'), # , bordercolor = 'Black', borderwidth = 1
xaxis=dict(title="<b>Contributor</b>"),
yaxis=dict(title="<b>Total Interest Paid</b>",
range=[0.5 * max(df_impact['InterestPaid']), 1.1 * max(df_impact['InterestPaid'])], showgrid=False),
yaxis2=dict(title="<b>Loan Term</b>", anchor='x', overlaying='y', side='right', showgrid=False),
)
return fig
# %% Schedule Table
# %%
# </editor-fold>
# <editor-fold desc="app-layout">
app.layout = html.Div(
[
dcc.Store(id="apply-store"),
dcc.Store(id='loan-number'),
dcc.Store(id='store_df_impact'),
dbc.Alert(id='apply-alert', is_open=False, duration=4000, className='apply-alert'),
dbc.Row(
[
html.P('', className='bar-title title-icon'),
html.Div([
html.P('MULTI-LOAN CALCULATOR', className='bar-title'),
html.P('\u00a0\u00a0\u00a0- by Jiaying Yan, Ying Tung Lau', className='bar-author'),
], className='d-flex flex-column align-items-end'),
dbc.Tooltip(
'Need help on loan terminology? Click to see web article on loan amortization by Investopedia.',
target='info-button', placement='right'),
html.A([dbc.Button(html.I(className="fa fa-question"), className='info-button', color='dark',
outline=True, id='info-button')],
href='https://www.investopedia.com/terms/a/amortization_schedule.asp', target='_blank',
rel="noopener noreferrer", className='info-button-wrapper'),
],
className='bar'),
dbc.Row([
loan_input_card,
html.Div(
[
html.H1('Multi-loan', className='display-1 m-0 text-nowrap'),
html.H1('Calculator', className='display-1 text-nowrap mb-3'),
html.P(
'Our smart tool helps you manage multiple loans with ease, allowing calculation for '
'up to three loans and three contributions.',
className='pb-0 pt-3 m-0'),
html.P('Enter your loan specs on the left and click submit right now to see your loan schedules!',
className='pt-0 pb-2 m-0'),
html.Div([
dbc.Button("SUBMIT", color="primary", outline=True, id='apply-button', n_clicks=0,
className='apply-button'),
dbc.Button('Reset', color='danger', outline=True, id='reset-button', className='reset-button',
n_clicks=0)
], className="apply-btn-group"),
],
className='app-title'),
html.A(html.I(className="fa fa-chevron-down"), href='#row-2-target', style={'display': 'none'},
className='go-row-2', id='go-row-2')
], className='app-row-1'),
dbc.Row(
[
html.A(id='row-2-target', className='anchor-target'),
html.A(html.I(className="fa fa-chevron-up"), href='#top', className='return-to-top'),
html.Div(
[
html.H6('Amortization Schedule and Contribution Impact', className='display-4 row-2-title'),
html.P(
"See the interactive chart for amortization schedule of your loan portfolio. "),
html.P(
'Receiving contributions for repaying loans? Check or uncheck the contributor boxes to see changes'
' of your loan schedules under different combination of contributions, and compare the impact'
' on total interest and loan term among contributors.'),
dbc.Button([html.Span('Switch Chart\u00a0'), html.Span(html.I(className="fa fa-caret-right"))],
id='graph-switch-btn', className='switch-btn', n_clicks=0, color='dark',
outline=True)
], className='row-2-text'),
html.Div([
html.Div(
[
html.Div(id='impact_banner', className='impact_banner'),
dbc.Checklist(id='contribution_checklist'),
dcc.Graph(id='schedule', figure=go.Figure(), className='graph-schedule')
], style={'display': 'flex'}, id='graph-schedule', className='graph-schedule-wrapper'
),
dcc.Graph(id='contribution', figure=go.Figure(), className='graph-contribution', style={'display': 'none'}),
], className='graph-container')
],
className='app-row-2', id='row-2', style={'display': 'none'}),
dbc.Row(
[
html.A(id='row-3-target', className='anchor-target'),
html.A(html.I(className="fa fa-chevron-up"), href='#top', className='return-to-top'),
html.H6('Amortization Table', className='display-4 row-3-title'),
html.Div(
[
dcc.RadioItems(id='dropdown_schedule'),
html.Div(dash_table.DataTable(
id='table_schedule',
style_table={'overflowY': 'auto'},
style_cell={'textOverflow': 'ellipsis', },
style_header={'bacgroundColor': 'white', 'fontWeight': 'bold'},
style_as_list_view=True,
), className="table-wrapper"),
], className='schedule-table-group'),
],
className='app-row-3', id='row-3', style={'display': 'none'}),
], className='app-body'
)
app.run_server(debug=False, use_reloader=False)
# </editor-fold>
# </editor-fold> | 45.88675 | 145 | 0.542722 |
67e7da06bf5b0c480be1e68da30d3dd8280232f5 | 2,888 | py | Python | examples/advanced-topics/IIR-FIR/delay_channels.py | qua-platform/qua-libs | 805a3b1a69980b939b370b3ba09434bc26dc45ec | [
"BSD-3-Clause"
] | 21 | 2021-05-21T08:23:34.000Z | 2022-03-25T11:30:55.000Z | examples/advanced-topics/IIR-FIR/delay_channels.py | qua-platform/qua-libs | 805a3b1a69980b939b370b3ba09434bc26dc45ec | [
"BSD-3-Clause"
] | 9 | 2021-05-13T19:56:00.000Z | 2021-12-21T05:11:04.000Z | examples/advanced-topics/IIR-FIR/delay_channels.py | qua-platform/qua-libs | 805a3b1a69980b939b370b3ba09434bc26dc45ec | [
"BSD-3-Clause"
] | 2 | 2021-06-21T10:56:40.000Z | 2021-12-19T14:21:33.000Z | import scipy.signal as sig
import numpy as np
from qm.qua import *
import matplotlib.pyplot as plt
import warnings
from qm.QuantumMachinesManager import (
SimulationConfig,
QuantumMachinesManager,
LoopbackInterface,
)
ntaps = 40
delays = [0, 22, 22.25, 22.35]
for delay in delays:
delay_gaussian(delay, ntaps)
plt.legend(delays)
plt.axis([270, 340, -0.01, 0.26])
| 28.594059 | 88 | 0.480956 |
67e7dfe8a3a11d78c472c0f64358e33daa1e6979 | 1,696 | py | Python | listener.py | chrismarget/ios-icmp-channel | b2a09f1c345816f525a3f7aed6a562631b0fc7e6 | [
"Apache-2.0"
] | 1 | 2018-01-30T01:53:20.000Z | 2018-01-30T01:53:20.000Z | listener.py | chrismarget/ios-icmp-channel | b2a09f1c345816f525a3f7aed6a562631b0fc7e6 | [
"Apache-2.0"
] | null | null | null | listener.py | chrismarget/ios-icmp-channel | b2a09f1c345816f525a3f7aed6a562631b0fc7e6 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import sys
s = open_icmp_sniffer()
messages = {}
while True:
p = s.recvfrom(65565)
sender = p[1][0]
sequence = ord(p[0][-2])
payload = p[0][-1]
if sender not in messages.keys():
messages[sender] = message(sender, sequence, payload)
else:
messages[sender].add(sequence, payload)
| 27.803279 | 71 | 0.56191 |
67e9dd76bdad3ed45018c88774b6229ebe78a253 | 12,780 | py | Python | hapiclient/util.py | hbatta/client-python | 1c1d32fce9e84bc1a4938ae7adc30cef8d682aa4 | [
"BSD-3-Clause"
] | null | null | null | hapiclient/util.py | hbatta/client-python | 1c1d32fce9e84bc1a4938ae7adc30cef8d682aa4 | [
"BSD-3-Clause"
] | null | null | null | hapiclient/util.py | hbatta/client-python | 1c1d32fce9e84bc1a4938ae7adc30cef8d682aa4 | [
"BSD-3-Clause"
] | null | null | null | def setopts(defaults, given):
"""Override default keyword dictionary options.
kwargs = setopts(defaults, kwargs)
A warning is shown if kwargs contains a key not found in default.
"""
# Override defaults
for key, value in given.items():
if type(given[key]) == dict:
setopts(defaults[key], given[key])
continue
if key in defaults:
defaults[key] = value
else:
warning('Ignoring invalid keyword option "%s".' % key)
return defaults
def log(msg, opts):
"""Print message to console or file."""
import os
import sys
if not 'logging' in opts:
opts = opts.copy()
opts['logging'] = False
pre = sys._getframe(1).f_code.co_name + '(): '
if isinstance(opts['logging'], bool) and opts['logging']:
if pythonshell() == 'jupyter-notebook':
# Don't show full path information.
msg = msg.replace(opts['cachedir'] + os.path.sep, '')
msg = msg.replace(opts['cachedir'], '')
print(pre + msg)
elif hasattr(opts['logging'], 'write'):
opts['logging'].write(pre + msg + "\n")
opts['logging'].flush()
else:
pass # TODO: error
def jsonparse(res, url):
"""Try/catch of json.loads() function with short error message."""
from json import loads
try:
return loads(res.read().decode('utf-8'))
except:
error('Could not parse JSON from %s' % url)
def pythonshell():
"""Determine python shell
pythonshell() returns
'shell' if started python on command line using "python"
'ipython' if started ipython on command line using "ipython"
'ipython-notebook' if running in Spyder or started with "ipython qtconsole"
'jupyter-notebook' if running in a Jupyter notebook started using executable
named jupyter-notebook
On Windows, jupyter-notebook cannot be detected and ipython-notebook
will be returned.
See also https://stackoverflow.com/a/37661854
"""
import os
env = os.environ
program = ''
if '_' in env:
program = os.path.basename(env['_'])
shell = 'shell'
try:
shell_name = get_ipython().__class__.__name__
if shell_name == 'TerminalInteractiveShell':
shell = 'ipython'
elif shell_name == 'ZMQInteractiveShell':
if 'jupyter-notebook' in program:
shell = 'jupyter-notebook'
else:
shell = 'ipython-notebook'
# Not needed, but could be used
#if 'spyder' in sys.modules:
# shell = 'spyder-notebook'
except:
pass
return shell
def warning_test():
"""For testing warning function."""
# Should show warnings in order and only HAPIWarning {1,2} should
# have a different format
from warnings import warn
warn('Normal warning 1')
warn('Normal warning 2')
warning('HAPI Warning 1')
warning('HAPI Warning 2')
warn('Normal warning 3')
warn('Normal warning 4')
def warning(*args):
"""Display a short warning message.
warning(message) raises a warning of type HAPIWarning and displays
"Warning: " + message. Use for warnings when a full stack trace is not
needed.
"""
import warnings
from os import path
from sys import stderr
from inspect import stack
message = args[0]
if len(args) > 1:
fname = args[1]
else:
fname = stack()[1][1]
#line = stack()[1][2]
fname = path.basename(fname)
# Custom warning format function
# Copy default showwarning function
showwarning_default = warnings.showwarning
# Use custom warning function instead of default
warnings.showwarning = _warning
# Raise warning
warnings.warn(message, HAPIWarning)
def error(msg, debug=False):
"""Display a short error message.
error(message) raises an error of type HAPIError and displays
"Error: " + message. Use for errors when a full stack trace is not needed.
If debug=True, full stack trace is shown.
"""
import sys
from inspect import stack
from os import path
debug = False
if pythonshell() != 'shell':
try:
from IPython.core.interactiveshell import InteractiveShell
except:
pass
sys.stdout.flush()
fname = stack()[1][1]
fname = path.basename(fname)
#line = stack()[1][2]
if pythonshell() == 'shell':
sys.excepthook = exception_handler
else:
try:
# Copy default function
showtraceback_default = InteractiveShell.showtraceback
# TODO: Use set_custom_exc
# https://ipython.readthedocs.io/en/stable/api/generated/IPython.core.interactiveshell.html
InteractiveShell.showtraceback = exception_handler_ipython
except:
# IPython over-rides this, so this does nothing in IPython shell.
# https://stackoverflow.com/questions/1261668/cannot-override-sys-excepthook
# Don't need to copy default function as it is provided as sys.__excepthook__.
sys.excepthook = exception_handler
raise HAPIError(msg)
def head(url):
"""HTTP HEAD request on URL."""
import urllib3
http = urllib3.PoolManager()
try:
res = http.request('HEAD', url, retries=2)
if res.status != 200:
raise Exception('Head request failed on ' + url)
return res.headers
except Exception as e:
raise e
return res.headers
def urlopen(url):
"""Wrapper to request.get() in urllib3"""
import sys
from json import load
# https://stackoverflow.com/a/2020083
import urllib3
c = " If problem persists, a contact email for the server may be listed "
c = c + "at http://hapi-server.org/servers/"
try:
http = urllib3.PoolManager()
res = http.request('GET', url, preload_content=False, retries=2)
if res.status != 200:
try:
jres = load(res)
if 'status' in jres:
if 'message' in jres['status']:
error('\n%s\n %s\n' % (url, jres['status']['message']))
error("Problem with " + url + \
". Server responded with non-200 HTTP status (" \
+ str(res.status) + \
") and invalid HAPI JSON error message in response body." + c)
except:
error("Problem with " + url + \
". Server responded with non-200 HTTP status (" + \
str(res.status) + \
") and no HAPI JSON error message in response body." + c)
except urllib3.exceptions.NewConnectionError:
error('Connection error for : ' + url + c)
except urllib3.exceptions.ConnectTimeoutError:
error('Connection timeout for: ' + url + c)
except urllib3.exceptions.MaxRetryError:
error('Failed to connect to: ' + url + c)
except urllib3.exceptions.ReadTimeoutError:
error('Read timeout for: ' + url + c)
except urllib3.exceptions.LocationParseError:
error('Could not parse URL: ' + url)
except urllib3.exceptions.LocationValueError:
error('Invalid URL: ' + url)
except urllib3.exceptions.HTTPError as e:
error('Exception ' + get_full_class_name(e) + " for: " + url)
except Exception as e:
error(type(sys.exc_info()[1]).__name__ + ': ' \
+ str(e) + ' for URL: ' + url)
return res
def urlretrieve(url, fname, check_last_modified=False, **kwargs):
"""Download URL to file
urlretrieve(url, fname, check_last_modified=False, **kwargs)
If check_last_modified=True, `fname` is found, URL returns Last-Modfied
header, and `fname` timestamp is after Last-Modfied timestamp, the URL
is not downloaded.
"""
import shutil
from os import path, utime, makedirs
from time import mktime, strptime
if check_last_modified:
if modified(url, fname, **kwargs):
log('Downloading ' + url + ' to ' + fname, kwargs)
res = urlretrieve(url, fname, check_last_modified=False)
if "Last-Modified" in res.headers:
# Change access and modfied time to match that on server.
# TODO: Won't need if using file.head in modified().
urlLastModified = mktime(strptime(res.headers["Last-Modified"],
"%a, %d %b %Y %H:%M:%S GMT"))
utime(fname, (urlLastModified, urlLastModified))
else:
log('Local version of ' + fname + ' is up-to-date; using it.', kwargs)
dirname = path.dirname(fname)
if not path.exists(dirname):
makedirs(dirname)
with open(fname, 'wb') as out:
res = urlopen(url)
shutil.copyfileobj(res, out)
return res
def modified(url, fname, **kwargs):
"""Check if timestamp on file is later than Last-Modifed in HEAD request"""
from os import stat, path
from time import mktime, strptime
debug = False
if not path.exists(fname):
return True
# HEAD request on url
log('Making head request on ' + url, kwargs)
headers = head(url)
# TODO: Write headers to file.head
if debug:
print("Header:\n--\n")
print(headers)
print("--")
# TODO: Get this from file.head if found
fileLastModified = stat(fname).st_mtime
if "Last-Modified" in headers:
urlLastModified = mktime(strptime(headers["Last-Modified"],
"%a, %d %b %Y %H:%M:%S GMT"))
if debug:
print("File Last Modified = %s" % fileLastModified)
print("URL Last Modified = %s" % urlLastModified)
if urlLastModified > fileLastModified:
return True
return False
else:
if debug:
print("No Last-Modified header. Will re-download")
# TODO: Read file.head and compare etag
return True
def urlquote(url):
"""Python 2/3 urlquote compatability function.
If Python 3, returns
urllib.parse.quote(url)
If Python 2, returns
urllib.quote(url)
"""
import sys
if sys.version_info[0] == 2:
from urllib import quote
return quote(url)
import urllib.parse
return urllib.parse.quote(url)
| 30.356295 | 103 | 0.590141 |
67ea232a964b415b5c48734cb2b31e366146e901 | 269 | py | Python | docs/examples/combine-configs/convert.py | Mbompr/fromconfig | eb34582c79a9a9e3b9e60d41fec2ac6a619e9c27 | [
"Apache-2.0"
] | 19 | 2021-03-18T16:48:03.000Z | 2022-03-02T13:09:21.000Z | docs/examples/combine-configs/convert.py | Mbompr/fromconfig | eb34582c79a9a9e3b9e60d41fec2ac6a619e9c27 | [
"Apache-2.0"
] | 3 | 2021-04-23T23:03:29.000Z | 2021-05-11T14:09:16.000Z | docs/examples/combine-configs/convert.py | Mbompr/fromconfig | eb34582c79a9a9e3b9e60d41fec2ac6a619e9c27 | [
"Apache-2.0"
] | 3 | 2021-04-19T22:05:34.000Z | 2022-02-21T11:32:16.000Z | """Convert file format."""
import fire
import fromconfig
def convert(path_input, path_output):
"""Convert input into output with load and dump."""
fromconfig.dump(fromconfig.load(path_input), path_output)
if __name__ == "__main__":
fire.Fire(convert)
| 17.933333 | 61 | 0.717472 |
67eb8e7c17780b803858f13f5e39eadc802e465d | 11,257 | py | Python | pyfibot/modules/module_rss.py | aapa/pyfibot | a8a4330d060b05f0ce63cbcfc6915afb8141955f | [
"BSD-3-Clause"
] | null | null | null | pyfibot/modules/module_rss.py | aapa/pyfibot | a8a4330d060b05f0ce63cbcfc6915afb8141955f | [
"BSD-3-Clause"
] | null | null | null | pyfibot/modules/module_rss.py | aapa/pyfibot | a8a4330d060b05f0ce63cbcfc6915afb8141955f | [
"BSD-3-Clause"
] | null | null | null | from __future__ import unicode_literals, print_function, division
import feedparser
import dataset
from twisted.internet.reactor import callLater
from threading import Thread
import twisted.internet.error
import logging
logger = logging.getLogger('module_rss')
DATABASE = None
updater = None
botref = None
config = {}
def init(bot, testing=False):
''' Initialize updater '''
global DATABASE
global config
global botref
global updater
global logger
if testing:
DATABASE = dataset.connect('sqlite:///:memory:')
else:
DATABASE = dataset.connect('sqlite:///databases/rss.db')
logger.info('RSS module initialized')
botref = bot
config = bot.config.get('rss', {})
finalize()
# As there's no signal if this is a rehash or restart
# update feeds in 30 seconds
updater = callLater(30, update_feeds)
def finalize():
''' Finalize updater (rehash etc) so we don't leave an updater running '''
global updater
global logger
logger.info('RSS module finalized')
if updater:
try:
updater.cancel()
except twisted.internet.error.AlreadyCalled:
pass
updater = None
def get_feeds(**kwargs):
''' Get feeds from database '''
return [
Feed(f['network'], f['channel'], f['id'])
for f in list(DATABASE['feeds'].find(**kwargs))
]
def find_feed(network, channel, **kwargs):
''' Find specific feed from database '''
f = DATABASE['feeds'].find_one(network=network, channel=channel, **kwargs)
if not f:
return
return Feed(f['network'], f['channel'], f['id'])
def add_feed(network, channel, url):
''' Add feed to database '''
f = Feed(network=network, channel=channel, url=url)
return (f.initialized, f.read())
def remove_feed(network, channel, id):
''' Remove feed from database '''
f = find_feed(network=network, channel=channel, id=int(id))
if not f:
return
DATABASE['feeds'].delete(id=f.id)
DATABASE['items_%i' % (f.id)].drop()
return f
def update_feeds(cancel=True, **kwargs):
# from time import sleep
''' Update all feeds in the DB '''
global config
global updater
global logger
logger.info('Updating RSS feeds started')
for f in get_feeds(**kwargs):
Thread(target=f.update).start()
# If we get a cancel, cancel the existing updater
# and start a new one
# NOTE: Not sure if needed, as atm cancel isn't used in any command...
if cancel:
try:
updater.cancel()
except twisted.internet.error.AlreadyCalled:
pass
updater = callLater(5 * 60, update_feeds)
if __name__ == '__main__':
f = Feed('ircnet', '#pyfibot', 'http://feeds.feedburner.com/ampparit-kaikki?format=xml')
f.read()
for i in f.get_new_items(True):
print(i)
| 32.819242 | 111 | 0.587634 |
67ec5c96d81577346cea04b4409e2275d4e56466 | 15,335 | py | Python | main.py | omidsakhi/progressive_introvae | 8f052ca7202196fe214ea238afe60e806660d6d4 | [
"MIT"
] | 5 | 2018-10-19T03:30:27.000Z | 2019-03-25T06:01:27.000Z | main.py | omidsakhi/progressive_introvae | 8f052ca7202196fe214ea238afe60e806660d6d4 | [
"MIT"
] | 1 | 2019-03-27T08:39:55.000Z | 2019-03-27T08:39:55.000Z | main.py | omidsakhi/progressive_introvae | 8f052ca7202196fe214ea238afe60e806660d6d4 | [
"MIT"
] | null | null | null | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os, ops, utils
# Standard Imports
from absl import flags
import absl.logging as _logging # pylint: disable=unused-import
import numpy as np
import tensorflow as tf
from PIL import Image
import input_pipelines
import models
from tensorflow.contrib.tpu.python.tpu import tpu_config # pylint: disable=E0611
from tensorflow.contrib.tpu.python.tpu import tpu_estimator # pylint: disable=E0611
from tensorflow.contrib.tpu.python.tpu import tpu_optimizer # pylint: disable=E0611
from tensorflow.python.estimator import estimator # pylint: disable=E0611
FLAGS = flags.FLAGS
global dataset
dataset = input_pipelines
USE_TPU = False
DRY_RUN = False
# Cloud TPU Cluster Resolvers
flags.DEFINE_string(
'tpu', default='omid-sakhi',
help='The Cloud TPU to use for training. This should be either the name '
'used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.')
flags.DEFINE_string(
'gcp_project', default=None,
help='Project name for the Cloud TPU-enabled project. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string(
'tpu_zone', default=None,
help='GCE zone where the Cloud TPU is located in. If not specified, we '
'will attempt to automatically detect the GCE project from metadata.')
flags.DEFINE_string('data_dir', 'gs://os_celeba/dataset' if USE_TPU else 'C:/Projects/datasets/tfr-celeba128',
'Bucket/Folder that contains the data tfrecord files')
flags.DEFINE_string(
'model_dir', 'gs://os_celeba/output1' if USE_TPU else './output', 'Output model directory')
flags.DEFINE_integer('noise_dim', 256,
'Number of dimensions for the noise vector')
flags.DEFINE_integer('batch_size', 128 if USE_TPU else 32,
'Batch size for both generator and discriminator')
flags.DEFINE_integer('start_resolution', 8,
'Starting resoltuion')
flags.DEFINE_integer('end_resolution', 128,
'Ending resoltuion')
flags.DEFINE_integer('resolution_steps', 10000 if not DRY_RUN else 60,
'Resoltuion steps')
flags.DEFINE_integer('num_shards', 8, 'Number of TPU chips')
flags.DEFINE_integer('train_steps', 500000, 'Number of training steps')
flags.DEFINE_integer('train_steps_per_eval', 5000 if USE_TPU else (200 if not DRY_RUN else 20) ,
'Steps per eval and image generation')
flags.DEFINE_integer('iterations_per_loop', 500 if USE_TPU else (50 if not DRY_RUN else 5) ,
'Steps per interior TPU loop. Should be less than'
' --train_steps_per_eval')
flags.DEFINE_float('learning_rate', 0.001, 'LR for both D and G')
flags.DEFINE_boolean('eval_loss', False,
'Evaluate discriminator and generator loss during eval')
flags.DEFINE_boolean('use_tpu', True if USE_TPU else False,
'Use TPU for training')
flags.DEFINE_integer('num_eval_images', 100,
'Number of images for evaluation')
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
tf.app.run(main)
| 44.708455 | 159 | 0.637105 |
67ecb4f05375d9a4dfbfec0d8b5a28b3678e0e4e | 172 | py | Python | docs/examples/timer.py | vlcinsky/nameko | 88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d | [
"Apache-2.0"
] | 3,425 | 2016-11-10T17:12:42.000Z | 2022-03-31T19:07:49.000Z | docs/examples/timer.py | vlcinsky/nameko | 88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d | [
"Apache-2.0"
] | 371 | 2020-03-04T21:51:56.000Z | 2022-03-31T20:59:11.000Z | docs/examples/timer.py | vlcinsky/nameko | 88d7e5211de4fcc1c34cd7f84d7c77f0619c5f5d | [
"Apache-2.0"
] | 420 | 2016-11-17T05:46:42.000Z | 2022-03-23T12:36:06.000Z | from nameko.timer import timer
| 17.2 | 38 | 0.627907 |
67ed812b563acfcc4e10ecbff190182561180c0d | 752 | py | Python | app/controllers/config/system/slack.py | grepleria/SnitchDNS | 24f98b01fd5fca9aa2c660d6ee15742f2e44915c | [
"MIT"
] | 152 | 2020-12-07T13:26:53.000Z | 2022-03-23T02:00:04.000Z | app/controllers/config/system/slack.py | grepleria/SnitchDNS | 24f98b01fd5fca9aa2c660d6ee15742f2e44915c | [
"MIT"
] | 16 | 2020-12-07T17:04:36.000Z | 2022-03-10T11:12:52.000Z | app/controllers/config/system/slack.py | grepleria/SnitchDNS | 24f98b01fd5fca9aa2c660d6ee15742f2e44915c | [
"MIT"
] | 36 | 2020-12-09T13:04:40.000Z | 2022-03-12T18:14:36.000Z | from .. import bp
from flask import request, render_template, flash, redirect, url_for
from flask_login import current_user, login_required
from app.lib.base.provider import Provider
from app.lib.base.decorators import admin_required
| 26.857143 | 85 | 0.743351 |
67edef8325e323ad0e7a7ee375973574e5b9dbb3 | 845 | py | Python | setup.py | 7AM7/Arabic-dialects-segmenter-with-flask | a69e060fa25a5905864dae7d500c4f46436e0c40 | [
"MIT"
] | 1 | 2021-07-07T06:54:43.000Z | 2021-07-07T06:54:43.000Z | setup.py | 7AM7/Arabic-dialects-segmenter-with-flask | a69e060fa25a5905864dae7d500c4f46436e0c40 | [
"MIT"
] | null | null | null | setup.py | 7AM7/Arabic-dialects-segmenter-with-flask | a69e060fa25a5905864dae7d500c4f46436e0c40 | [
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name='FarasaPy3',
version='3.0.0',
packages=find_packages(exclude=['tests*']),
license='MIT',
description='Farasa (which means insight in Arabic), is a fast and accurate text processing toolkit for Arabic text.',
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=['requests', 'json'],
url='https://github.com/ahmed451/SummerInternship2020-PyPIFarasa/tree/master/7AM7',
author='AM7',
author_email='ahmed.moorsy798@gmail.com',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 32.5 | 124 | 0.673373 |
67eef460ddcba049717ee205dce3da7ab1a62a5b | 45,026 | py | Python | oldversion/crystIT_v0.1.py | GKieslich/crystIT | 2632b544b3ec0f4893f84aa6bb73f03a7f3c0890 | [
"MIT"
] | 4 | 2020-10-14T04:35:40.000Z | 2022-03-31T08:11:40.000Z | oldversion/crystIT_v0.1.py | GKieslich/crystIT | 2632b544b3ec0f4893f84aa6bb73f03a7f3c0890 | [
"MIT"
] | null | null | null | oldversion/crystIT_v0.1.py | GKieslich/crystIT | 2632b544b3ec0f4893f84aa6bb73f03a7f3c0890 | [
"MIT"
] | null | null | null | import ase
from ase.spacegroup import crystal
from ase.units import kB,mol,kJ
import spglib
import pyxtal
from pyxtal.symmetry import Group
import numpy # arrays
import math # log
import os.path # isfile, isdir
import copy # copy dictionary
import glob # iterate through dir
import time # for batch processing
import io # creating file from string
import multiprocessing # for batch mode
import warnings # control warning output
import traceback # detailed error messages
warningCache = '' #
# Default Settings
symmetryTolerance = 5e-3 # distance tolerance in cartesian coordinates to find crystal symmetry
occupancy = False # show menu to correct occupancy values
maxThreads = multiprocessing.cpu_count() # maximum no of parallel threads
decimalSeparator = '.'
entropyOptions = False # calculation of entropy values from Krivovichev (2016)
recursive = False # subdirectory scanning in batch mode
# except for userMenu() these settings are usually forwarded through function parameters, as nested functions sometimes do not realize that global variables have been changed
# Program Information
programName = 'crystIT'
paper = 'Kauler, Kieslich (2020): unpublished'
versionNumber = '0.1'
releaseDate = '2020-09-22'
authors = 'Clemens Kauler and Gregor Kieslich'
institution = 'Technical University of Munich'
def getComplexity(structure, pathToCif, verbose, entropy, sym):
"""
calculates complexity of crystal structure based on an ASE Atoms object (including tags, storing CIF data-block)
Parameters:
arg1 (Atoms): ASE Atoms object, including CIF data tags (store_tags = True)
arg2 (string): path to CIF
arg3 (bool): output results to console (True) or suppress console output and return result array (False)
arg4 (bool): entropy options
arg5 (float): symmetry tolerance value in cartesian coordinates
Returns: if (arg3 == False): array will be returned; most important variables given below:
if (arg4 == True): values in {brackets} are returned additionally
array:
warningCache, errors and warnings
chemical_formula, chemical formula composed from CIF-entry, ignoring dummy entries
aSG, spacegroup assumed by spglib
SG, spacegroup given in CIF
atomsPerUnitCell, number of atoms per crystallographic unit cell (vacancies do not count as atoms)
atomsPerPrimitiveUnitCell, number of atoms per primitive unit cell (vacancies do not count as atoms)
positionsPerPrimitiveUnitCell, amount of positions per primitive unit cell, corresponding to the sum over the crystallographic orbits' multiplicities
uniqueSpecies, number of unique species, defined by combination of element (vacancies count as elements too) and crystallographic orbit
aritySum, number of coordinational degrees of freedom per reduced unit cell
I_comb, I_comb_max, I_comb_norm, I_comb_tot, I_comb_density, {S_comb_max_molar, Delta_S_comb_molar,} combinatorial information, as defined by S. Krivovichev in 2014 (corresponding to I_G, I_G,max, I_G,norm, I_G,total, rho_inf, S_cfg_max, Delta S), but extended by partial occupancies
I_coor, I_coor_max, I_coor_norm, I_coor_tot, I_coor_density, {S_coor_max_molar, Delta_S_coor_molar,} coordinational information, as defined by W. Hornfeck in 2020, NOTE: sum over unique Wyckoff positions
I_conf, I_conf_max, I_conf_norm, I_conf_tot, I_conf_density, {S_conf_max_molar, Delta_S_conf_molar} configurational information, as defined by W. Hornfeck in 2020
"""
if not verbose:
global warningCache
# direct input of ASE Atoms object into spglib is deprecated!
cell = (
structure.get_cell(),
structure.get_scaled_positions(),
structure.get_atomic_numbers()
)
# find reduced unit cell
primitiveCell = spglib.find_primitive(cell, symprec = sym)
# get symmetry from reduced unit cell
primitiveDataset = spglib.get_symmetry_dataset(primitiveCell, symprec = sym)
primitiveCrystallographicOrbits = primitiveDataset['crystallographic_orbits']
primitiveWyckoff = primitiveDataset['wyckoffs']
# compare spacegroup set in CIF (SG) with assumed spacegroup (aSG)
cifTags = structure.info.copy()
try:
iSG = cifTags['_symmetry_space_group_name_h-m']
except:
try:
iSG = cifTags['_space_group_name_h-m_alt']
except:
iSG = 'not set'
try:
iSGNo = str(cifTags['_symmetry_int_tables_number'])
except:
try:
iSGNo = str(cifTags['_space_group_it_number'])
except:
iSGNo = 'not set'
SG = iSG + ' (' + iSGNo + ')'
aSG = spglib.get_spacegroup(cell, symprec = sym)
groupnumber = aSG[aSG.index('(')+1:aSG.index(')')]
if not iSGNo == 'not set' and not iSGNo == groupnumber:
if verbose:
print(f'Wrong space group detected by spglib: {groupnumber} vs. {iSGNo} given in CIF. Try to alter the symmetry tolerance value. Continuing with fingers crossed.')
else:
warningCache += f'Wrong space group detected by spglib: {groupnumber} vs. {iSGNo} given in CIF. Try to alter the symmetry tolerance value. Continuing with fingers crossed. '
# gather some more info about publication (batch documentation)
try:
journal = str(cifTags['_journal_name_full']).replace('\n', ' ').replace(';', ',')
except:
journal = ''
try:
year = str(cifTags['_journal_year'])
except:
year = ''
try:
doi = str(cifTags['_journal_paper_doi']).replace(';', '')
except:
doi = ''
# compose matrix of wyckoff letters, multiplicities and arities for all crystallographic orbits
g = Group(int(groupnumber))
iCrystallographicOrbits = {}
equivalenceClassNumber = 0
for x in numpy.unique(primitiveCrystallographicOrbits):
iCrystallographicOrbits[equivalenceClassNumber, 0] = numpy.count_nonzero(primitiveCrystallographicOrbits == x) # 0 - multiplicity (in context of red uc)
wyckoffLetter = primitiveWyckoff[list(primitiveCrystallographicOrbits).index(x)]
iCrystallographicOrbits[equivalenceClassNumber, 1] = wyckoffLetter #1 - wyckoff letter
iCrystallographicOrbits[equivalenceClassNumber, 2] = getArity(g[wyckoffLetter]) #2 - arity
equivalenceClassNumber += 1
arityArray = []
for x in numpy.unique(primitiveWyckoff):
arityArray.append(getArity(g[str(x)]))
# identify duplicate atoms (same x,y,z coordinates = same cryst orbit) from structure in order to condense occupancyDict for all entries with identical coordinates!
try:
atomSiteTypeSymbol = []
for entry in cifTags['_atom_site_type_symbol']:
if len(entry) > 1 and entry[1].islower():
atomSiteTypeSymbol.append(entry[0:2])
else:
atomSiteTypeSymbol.append(entry[0])
except:
# sometimes _atom_site_type_symbol isn't set, usually when there are no fractional occupancies to consider -> extract atom species from _atom_site_label
atomSiteTypeSymbol = []
for entry in cifTags['_atom_site_label']:
if len(entry) > 1 and entry[1].islower():
atomSiteTypeSymbol.append(entry[0:2])
else:
atomSiteTypeSymbol.append(entry[0])
duplicateArray = []
identPos = []
for x in range(0, len(atomSiteTypeSymbol)):
XYZInfo = [
cifTags['_atom_site_fract_x'][x],
cifTags['_atom_site_fract_y'][x],
cifTags['_atom_site_fract_z'][x]
]
# check whether coordinates of current atom are already contained in identPos
for y in range(0, len(identPos)):
if numpy.allclose(XYZInfo, identPos[y], atol = sym):
duplicateArray.append([x, y])
break
identPos.append(XYZInfo)
discrepancy = len(atomSiteTypeSymbol) - equivalenceClassNumber - len(duplicateArray)
if discrepancy > 0:
# same crystallographic orbit has probably been reached with different coordinates (e.g. GITWIQ)
# ==> construct all symmetrically equivalent positions & compare with priors. Requires significantly more computing power, therefore only executed in second step...
duplicateArray = []
symEquivPos = []
for x in range(0, len(atomSiteTypeSymbol)):
duplicate = False
XYZInfo = [
cifTags['_atom_site_fract_x'][x],
cifTags['_atom_site_fract_y'][x],
cifTags['_atom_site_fract_z'][x]
]
# check whether coordinates of current atom are already contained in symEquivPos
for y in range(0, len(symEquivPos)):
for pos in symEquivPos[y]:
if numpy.allclose(XYZInfo, pos, atol = sym):
duplicateArray.append([x, y])
duplicate = True
break
if duplicate:
break
if not duplicate:
# generate all symmetrically equivalent positions
offset = len(duplicateArray) # if duplicates were identified, x has to be reduced
wyckoffLetter = iCrystallographicOrbits[x-offset, 1]
arity = iCrystallographicOrbits[x-offset, 2]
# using partially parametrized positions ==> find out which wyckoff instance is present and isolate actual (x,y,z)
if arity > 0:
lineNo = -1
for line in str(g[wyckoffLetter]).split('\n'):
if lineNo == -1:
lineNo += 1
continue
elements = line.split(',')
matches = 0
for y in range(0, 3):
if(
'x' not in elements[y]
and 'y' not in elements[y]
and 'z' not in elements[y]
and XYZInfo[y] == eval(elements[y])
):
matches += 1
if matches == (3 - arity):
correctedXYZInfo = [0, 0, 0]
for z in range (0, 3):
if 'x' in elements[z]:
correctedXYZInfo[0] = correctCoordinates(elements[z], 'x', XYZInfo[z])
elif 'y' in elements[z]:
correctedXYZInfo[1] = correctCoordinates(elements[z], 'y', XYZInfo[z])
elif 'z' in elements[z]:
correctedXYZInfo[2] = correctCoordinates(elements[z], 'z', XYZInfo[z])
XYZInfo = correctedXYZInfo
break
lineNo += 1
symEquivPos.append(
pyxtal.operations.filtered_coords(
pyxtal.operations.apply_ops(XYZInfo, g[wyckoffLetter])
)
)
else:
symEquivPos.append([])
discrepancy = len(atomSiteTypeSymbol) - equivalenceClassNumber - len(duplicateArray)
if discrepancy == 0:
# compose own occupancyDict, as too many errors may occur while correcting the one given by ASE (structure.info['occupancy'])
try:
siteOccupancy = cifTags['_atom_site_occupancy']
except:
siteOccupancy = []
for i in range(0, len(atomSiteTypeSymbol)):
siteOccupancy.append(1)
occupancyDict = {}
offset = 0
for i in range(0, equivalenceClassNumber):
# ignore duplicates
for entry in duplicateArray:
if entry[0] == (i+offset):
offset += 1
# add value
occupancyDict[i] = {}
occupancyDict[i][atomSiteTypeSymbol[i + offset]] = siteOccupancy[i + offset]
# add all duplicates
for entry in duplicateArray:
if entry[1] == (i + offset):
try:
occupancyDict[i][atomSiteTypeSymbol[entry[0]]] += siteOccupancy[entry[0]]
except:
occupancyDict[i][atomSiteTypeSymbol[entry[0]]] = siteOccupancy[entry[0]]
# double check for too high occupancy value at current crystallographic orbit
occupancySum = 0
for element in occupancyDict[i]:
occupancySum += occupancyDict[i][element]
if occupancySum > 1:
if verbose:
print(f'Warning: Occupancy sum {occupancySum} at Wyckoff {iCrystallographicOrbits[i, 0]}{iCrystallographicOrbits[i, 1]}, crystallographic orbit #{i}: {occupancyDict[i]}.')
else:
warningCache += f'Warning: Occupancy sum {occupancySum} at Wyckoff {iCrystallographicOrbits[i, 0]}{iCrystallographicOrbits[i, 1]}, crystallographic orbit #{i}: {occupancyDict[i]}. '
elif verbose:
print(f'Error: discrepancy of {discrepancy} positions between crystallographic orbits calculated by spglib and given CIF-entries. Wrong space group detected? Try to adjust symmetry tolerance!')
return
else:
warningCache += f'Error: discrepancy of {discrepancy} positions between crystallographic orbits calculated by spglib and given CIF-entries. Wrong space group detected? Try to adjust symmetry tolerance! '
return [warningCache, pathToCif]
# allow corrections if occupancy options are enabled
if occupancy:
if '[' in pathToCif or verbose == False:
print('\n\n'+pathToCif)
occupancyDict = correctOccupancy(occupancyDict, iCrystallographicOrbits)
# determine number of atoms in primitive unit cell and thereby compose sum formula
# w/ occupancy (find gcd of crystal orbit muliplicities, consider occupancy)
wyckoffSum = 0.0
chemicalFormulaDict = {}
numbers = []
for i in range(0, equivalenceClassNumber):
numbers.append(iCrystallographicOrbits[i, 0])
divisor = gcd(numbers)
if divisor < 0:
divisor = 1
counter = 0
for x in occupancyDict:
multiplicity = iCrystallographicOrbits[counter, 0]
for element in occupancyDict[x]:
try:
chemicalFormulaDict[element] += occupancyDict[x][element] * multiplicity / divisor
except:
chemicalFormulaDict[element] = occupancyDict[x][element] * multiplicity / divisor
wyckoffSum += occupancyDict[x][element] * multiplicity
counter += 1
# sometimes gcd of multiplicities does not yield empirical formula (e.g. Cu2P6O18Li2 / MnN10C18H28)
# better safe than sorry: try to reduce formula a second time
# (multiplicity approach still implemented bc fractional occupancies often complicate computation of gcd)
numbers = []
for element in chemicalFormulaDict:
# suppose: a) lacking precision
if abs(chemicalFormulaDict[element] - round(chemicalFormulaDict[element])) < 0.1:
numbers.append(round(chemicalFormulaDict[element]))
# or b) more severe defects
else:
numbers.append(math.ceil(chemicalFormulaDict[element]))
if not numbers:
divisor = 1
else:
divisor = gcd(numbers)
if divisor < 0:
divisor = 1
# compose assumed chemical formula
chemical_formula = ''
for element in sorted(chemicalFormulaDict):
stoichiometry = chemicalFormulaDict[element] / divisor
if stoichiometry == 1:
stoichiometry = ''
elif stoichiometry % 1 == 0:
stoichiometry = str(int(stoichiometry))
else:
stoichiometry = str(stoichiometry)
chemical_formula = chemical_formula + element + stoichiometry
atomsPerPrimitiveUnitCell = wyckoffSum
atomsPerUnitCell = wyckoffSum * len(structure) / len(primitiveCrystallographicOrbits)
positionsPerPrimitiveUnitCell = 0 # sum over multiplicities of all crystallographic orbits
for x in range(0, equivalenceClassNumber):
positionsPerPrimitiveUnitCell += iCrystallographicOrbits[x,0]
aritySum = 0 # sum over arities of unique, occupied wyckoff positions (different crystallographic orbits with same wyckoff letter are NOT counted multiple times!)
for x in arityArray:
aritySum += x
# calculate information contents
I_comb = I_coor = I_conf = 0.0
uniqueSpecies = 0
if aritySum > 0:
# the coordinational sum is formed over unique wyckoff positions
for x in arityArray:
probability = x / aritySum
if probability > 0:
I_coor -= probability * math.log(probability, 2)
# the configurational sum over wyckoff positions and crystallographic orbits
probability = x / (aritySum + positionsPerPrimitiveUnitCell)
if probability > 0:
I_conf -= probability * math.log(probability, 2)
for x in range(0, equivalenceClassNumber):
# the combinatorial sum is formed over each element in a crystallographic orbit individually (in other words: over unique species)
# vacancies count as elements too -> probability according to positionsPerPrimitiveUnitCell
occupancySum = 0
multiplicity = iCrystallographicOrbits[x, 0]
for element in occupancyDict[x]:
occupancyValue = occupancyDict[x][element]
occupancySum += occupancyDict[x][element]
probability = multiplicity * occupancyValue / positionsPerPrimitiveUnitCell
if probability > 0:
I_comb -= probability * math.log(probability, 2)
uniqueSpecies += 1
elif verbose:
print(f'Probability <= 0 was skipped: {element} at pos. {x}')
else:
warningCache += f'Probability <= 0 was skipped: {element} at pos. {x} '
probability = multiplicity * occupancyValue / (aritySum + positionsPerPrimitiveUnitCell)
if probability > 0:
I_conf -= probability * math.log(probability, 2)
if occupancySum < 1:
probability = multiplicity * (1 - occupancySum) / positionsPerPrimitiveUnitCell
I_comb -= probability * math.log(probability, 2)
uniqueSpecies += 1
probability = multiplicity * (1 - occupancySum) / (aritySum + positionsPerPrimitiveUnitCell)
I_conf -= probability * math.log(probability, 2)
I_comb_tot = positionsPerPrimitiveUnitCell * I_comb
I_coor_tot = aritySum * I_coor
I_conf_tot = (aritySum + positionsPerPrimitiveUnitCell) * I_conf
# maximum combinatorial information content based on number of unique species which are defined by a combination of crystallographic orbit and element (vacancies obviously count too).
# otherwise: I_comb > I_comb_max for alloys (in general: cases w/ all occupancies < 1)
I_comb_max = math.log(uniqueSpecies, 2)
if aritySum > 0:
I_coor_max = math.log(aritySum, 2)
else:
I_coor_max = 0
I_conf_max = math.log(uniqueSpecies + aritySum, 2)
if I_comb_max != 0:
I_comb_norm = I_comb / I_comb_max
else:
I_comb_norm = 0
if I_coor_max != 0:
I_coor_norm = I_coor / I_coor_max
else:
I_coor_norm = 0
if I_conf_max != 0:
I_conf_norm = I_conf / I_conf_max
else:
I_conf_norm = 0
# correct cell volume to primitive cell volume
perVolume = atomsPerUnitCell / (atomsPerPrimitiveUnitCell * structure.cell.volume)
I_comb_density = perVolume * I_comb_tot
I_coor_density = perVolume * I_coor_tot
I_conf_density = perVolume * I_conf_tot
if entropy:
gasConstantR = mol * kB / (kJ / 1000)
conversionFactor = math.log(2, math.e)
# error for stirling-approximation of ln(N!) < 1% for N >= 90
if positionsPerPrimitiveUnitCell >= 90:
S_comb_max_molar = gasConstantR * positionsPerPrimitiveUnitCell * (math.log(positionsPerPrimitiveUnitCell, math.e) - 1)
else:
S_comb_max_molar = gasConstantR * math.log(math.factorial(positionsPerPrimitiveUnitCell), math.e)
if aritySum >= 90:
S_coor_max_molar = gasConstantR * aritySum * (math.log(aritySum, math.e) - 1)
else:
S_coor_max_molar = gasConstantR * math.log(math.factorial(aritySum), math.e)
if (positionsPerPrimitiveUnitCell + aritySum) >= 90:
S_conf_max_molar = gasConstantR * (positionsPerPrimitiveUnitCell + aritySum) * (math.log((positionsPerPrimitiveUnitCell + aritySum), math.e) - 1)
else:
S_conf_max_molar = gasConstantR * math.log(math.factorial(positionsPerPrimitiveUnitCell + aritySum), math.e)
Delta_S_comb_molar = gasConstantR * I_comb * conversionFactor
Delta_S_coor_molar = gasConstantR * I_coor * conversionFactor
Delta_S_conf_molar = gasConstantR * I_conf * conversionFactor
if verbose:
print(f'\n\n------------ {pathToCif} ------------')
print(f'assumed formula\t {chemical_formula}')
print(f'assumed SG\t {aSG}')
print(f'SG from CIF\t {SG}')
print(
'lattice [A] \t a: {:.2f}, b: {:.2f}, c: {:.2f}'.format(
structure.get_cell_lengths_and_angles()[0],
structure.get_cell_lengths_and_angles()[1],
structure.get_cell_lengths_and_angles()[2]
).replace('.', decimalSeparator)
)
print(
'angles [] \t b,c: {:.2f}, a,c: {:.2f}, a,b: {:.2f}'.format(
structure.get_cell_lengths_and_angles()[3],
structure.get_cell_lengths_and_angles()[4],
structure.get_cell_lengths_and_angles()[5]
).replace('.', decimalSeparator)
)
print('---')
print('{:.6f} \t atoms / unit cell'.format(atomsPerUnitCell).replace('.', decimalSeparator))
print('{:.6f} \t atoms / reduced unit cell'.format(atomsPerPrimitiveUnitCell).replace('.', decimalSeparator))
print('{:.6f} \t positions / reduced unit cell'.format(positionsPerPrimitiveUnitCell).replace('.', decimalSeparator))
print('{:.6f} \t unique species'.format(uniqueSpecies).replace('.', decimalSeparator))
print('{:.6f} \t coordinational degrees of freedom'.format(aritySum).replace('.', decimalSeparator))
print('--- combinatorial (extended Krivovichev) ---')
print('{:.6f} \t I_comb \t\t [bit / position]'.format(I_comb).replace('.', decimalSeparator))
print('{:.6f} \t I_comb_max \t\t [bit / position]'.format(I_comb_max).replace('.', decimalSeparator))
print('{:.6f} \t I_comb_norm \t\t [-]'.format(I_comb_norm).replace('.', decimalSeparator))
print('{:.6f} \t I_comb_tot \t\t [bit / reduced unit cell]'.format(I_comb_tot).replace('.', decimalSeparator))
print('{:.6f} \t I_comb_dens \t\t [bit / A^3]'.format(I_comb_density).replace('.', decimalSeparator))
if entropy:
print('{:.6f} \t S_comb_max_molar \t [J / (mol * K)]'.format(S_comb_max_molar).replace('.', decimalSeparator))
print('{:.6f} \t Delta_S_comb_molar \t [J / (mol * K)]'.format(Delta_S_comb_molar).replace('.', decimalSeparator))
print('--- coordinational (Hornfeck) ---')
print('{:.6f} \t I_coor \t\t [bit / freedom]'.format(I_coor).replace('.', decimalSeparator))
print('{:.6f} \t I_coor_max \t\t [bit / freedom]'.format(I_coor_max).replace('.', decimalSeparator))
print('{:.6f} \t I_coor_norm \t\t [-]'.format(I_coor_norm).replace('.', decimalSeparator))
print('{:.6f} \t I_coor_tot \t\t [bit / reduced unit cell]'.format(I_coor_tot).replace('.', decimalSeparator))
print('{:.6f} \t I_coor_dens \t\t [bit / A^3]'.format(I_coor_density).replace('.', decimalSeparator))
if entropy:
print('{:.6f} \t S_coor_max_molar \t [J / (mol * K)]'.format(S_coor_max_molar).replace('.', decimalSeparator))
print('{:.6f} \t Delta_S_coor_molar \t [J / (mol * K)]'.format(Delta_S_coor_molar).replace('.', decimalSeparator))
print('--- configurational (extended Hornfeck) ---')
print('{:.6f} \t I_conf \t\t [bit / (position + freedom)]'.format(I_conf).replace('.', decimalSeparator))
print('{:.6f} \t I_conf_max \t\t [bit / (position + freedom)]'.format(I_conf_max).replace('.', decimalSeparator))
print('{:.6f} \t I_conf_norm \t\t [-]'.format(I_conf_norm).replace('.', decimalSeparator))
print('{:.6f} \t I_conf_tot \t\t [bit / reduced unit cell]'.format(I_conf_tot).replace('.', decimalSeparator))
print('{:.6f} \t I_conf_dens \t\t [bit / A^3]'.format(I_conf_density).replace('.', decimalSeparator))
if entropy:
print('{:.6f} \t S_conf_max_molar \t [J / (mol * K)]'.format(S_conf_max_molar).replace('.', decimalSeparator))
print('{:.6f} \t Delta_S_conf_molar \t [J / (mol * K)]'.format(Delta_S_conf_molar).replace('.', decimalSeparator))
return
elif entropy:
returnArray = [
warningCache,
pathToCif,
doi, journal, year,
chemical_formula,
aSG,
SG,
structure.get_cell_lengths_and_angles()[0],
structure.get_cell_lengths_and_angles()[1],
structure.get_cell_lengths_and_angles()[2],
structure.get_cell_lengths_and_angles()[3],
structure.get_cell_lengths_and_angles()[4],
structure.get_cell_lengths_and_angles()[5],
atomsPerUnitCell,
atomsPerPrimitiveUnitCell,
positionsPerPrimitiveUnitCell,
uniqueSpecies,
aritySum,
I_comb, I_comb_max, I_comb_norm, I_comb_tot, I_comb_density, S_comb_max_molar, Delta_S_comb_molar,
I_coor, I_coor_max, I_coor_norm, I_coor_tot, I_coor_density, S_coor_max_molar, Delta_S_coor_molar,
I_conf, I_conf_max, I_conf_norm, I_conf_tot, I_conf_density, S_conf_max_molar, Delta_S_conf_molar
]
else:
returnArray = [
warningCache,
pathToCif,
doi, journal, year,
chemical_formula,
aSG,
SG,
structure.get_cell_lengths_and_angles()[0],
structure.get_cell_lengths_and_angles()[1],
structure.get_cell_lengths_and_angles()[2],
structure.get_cell_lengths_and_angles()[3],
structure.get_cell_lengths_and_angles()[4],
structure.get_cell_lengths_and_angles()[5],
atomsPerUnitCell,
atomsPerPrimitiveUnitCell,
positionsPerPrimitiveUnitCell,
uniqueSpecies,
aritySum,
I_comb, I_comb_max, I_comb_norm, I_comb_tot, I_comb_density,
I_coor, I_coor_max, I_coor_norm, I_coor_tot, I_coor_density,
I_conf, I_conf_max, I_conf_norm, I_conf_tot, I_conf_density
]
return returnArray
def correctCoordinates(coordinateDescription, parameter, coordinate):
"""
extracts x/y/z parameter of a wyckoff position's individual coordinates. e.g. the z-coordinate of a wyckoff position 4c in SG 24 might be defined as (-z+1/2) = 0.3 --> returns (z) = 0.2
Parameters
arg1 (string) parametrized description of the coordinate e.g. '-z+1/2'
arg2 (string) 'x', 'y' or 'z' as parameter to isolate from arg1 (coordinateDescription) e.g. 'z'
arg3 (float) fractional coordinate on x/y/z axis e.g. 0.3
Returns
float fractional coordinate, corresponding to the isolated parameter (x, y or z) e.g. 0.2
"""
if coordinateDescription.split(parameter)[0] == '-':
factor = -1
else:
factor = +1
if coordinateDescription.split(parameter)[1] != '':
summand = eval(coordinateDescription.split(parameter)[1])
else:
summand = 0
return (factor * (coordinate - summand)) % 1
def getArity(pyxtalWyckoff):
"""
calculates the arity of a given wyckoff position
Parameters
arg1 (Wyckoff_position) pyxtal Wyckoff_position class object
Returns
int arity
"""
firstSymmOp = str(pyxtalWyckoff).splitlines()[1] # line 0 contains general description: 'wyckoff pos nA in SG xx with site symmetry xx'
arity = 0
if 'x' in firstSymmOp:
arity += 1
if 'y' in firstSymmOp:
arity += 1
if 'z' in firstSymmOp:
arity += 1
return arity
def correctOccupancy(occupancyDict, iCrystallographicOrbits):
"""
a menu that allows for on-the-fly editing of occupancy values
Parameters
arg1 (dictionary) dictionary, containing {Element1 : occupancy1, Element2 : occupancy2} for every crystallographic orbit
arg2 (array) array, containing the multiplicities [x, 0], wyckoff letters [x, 1] and arities [x, 2] of every crystallographic orbit
Returns
dictionary updated occupancyDict
"""
corrOccupancyDict = copy.deepcopy(occupancyDict)
while True:
print('\n\nEnter a number on the left to correct the species\' occupancy. \'c\' to continue with current values. \'d\' to discard changes.')
print('#\t Element \t Wyckoff \t arity \t original \t current')
positions = []
for x in corrOccupancyDict:
for element in corrOccupancyDict[x]:
positions.append([x,element])
print(f'{len(positions) - 1} \t {element} \t\t {iCrystallographicOrbits[x, 0]}{iCrystallographicOrbits[x, 1]} \t\t {iCrystallographicOrbits[x, 2]} \t {occupancyDict[x][element]} \t\t {corrOccupancyDict[x][element]}')
print('')
userInput = input()
if userInput == 'c':
return corrOccupancyDict
elif userInput == 'd':
return occupancyDict
elif RepresentsInt(userInput) and 0 <= int(userInput) < len(positions):
x = positions[int(userInput)][0]
element = positions[int(userInput)][1]
print(f'\n\nInput the new stoichiometry for {element} at Wyckoff {iCrystallographicOrbits[x, 0]}{iCrystallographicOrbits[x, 1]} with \'.\' as decimal separator. Currently: {corrOccupancyDict[x][element]}')
userInput2 = input()
if RepresentsFloat(userInput2) and 0 < float(userInput2) <= 1:
corrOccupancyDict[x][element] = float(userInput2)
else:
print(f'\n\nPlease only insert occupancy values 0 < x <= 1')
continue
else:
print(f'\n\nPlease only enter integer numbers in the range of 0 to {len(positions) - 1}')
continue
def gcd(numbers):
"""
calculates the greatest common divisor of a given array of integers
"""
divisor = numbers[0]
while True:
try:
for number in numbers:
rest = number % divisor
if not rest:
pass
else:
raise
break
except:
pass
divisor -= 1
return divisor
def customWarnings(message, category, filename, lineno, file, random):
"""
redirects warnings into the global variable warningCache (batch mode)
"""
global warningCache
warningCache += str(message) + ', in: ' + str(filename) + ', line: ' + str(lineno) + ' '
def processFile(pathToCif, verbose, entropy, symprec):
"""
open CIF from given path, perform corrections that enhance ASE-compatibility and facilitate calculations in getComplexity()
let ASE parse the file and forward the data blocks in form of Atoms objects to getComplexity()
Parameters:
arg1 (string) path to valid CIF
arg2 (Boolean) verbosity: (True) --> output to console <=> (False) --> output to .csv-file in respective folder
arg3 (Boolean) entropy options
arg4 (float) symmetry tolerance in cartesian coordinates
Returns:
returns return valuess of getComplexity() as an array
"""
# redirect warnings for batch mode
if not verbose:
resultArray = []
global warningCache
warnings.showwarning = customWarnings
# get contents from CIF-file and thereby correct spacegroups that are written with brackets (ASE will throw errors)
# crystal water is often denominated as "Wat", ASE hates that, replace "Wat" with "O" as hydrogen atoms are missing anyway
# ignore dummy atoms completely as they will cause problems and should not contribute to any information content
# filter fractional coordinates with modulo operator (should be between 0 and 1!), thereby discard of uncertainty values
input = open(pathToCif)
output = ''
xPos = yPos = zPos = counter = -1
for line in input:
low = line.lower()
if line[0] == '#':
continue
elif '_' in line:
if (
'_symmetry_space_group_name_h-m' in low
or '_space_group_name_h-m_alt' in low
):
output += line.replace('(', '').replace(')', '')
elif 'loop_' in low:
output += line
xPos = yPos = zPos = counter = -1
elif '_atom_site_fract_x' in low:
output += line
xPos = counter
elif '_atom_site_fract_y' in low:
output += line
yPos = counter
elif '_atom_site_fract_z' in low:
output += line
zPos = counter
else:
output += line
counter += 1
elif xPos >= 0 and yPos >=0 and zPos >= 0:
if 'dum' in low:
continue
segments = line.split()
if len(segments) > max([xPos, yPos, zPos]):
if '(' in segments[xPos]:
segments[xPos] = segments[xPos][0:segments[xPos].find('(')]
if '(' in segments[yPos]:
segments[yPos] = segments[yPos][0:segments[yPos].find('(')]
if '(' in segments[zPos]:
segments[zPos] = segments[zPos][0:segments[zPos].find('(')]
if RepresentsFloat(segments[xPos]):
segments[xPos] = str(float(segments[xPos]) % 1)
if RepresentsFloat(segments[yPos]):
segments[yPos] = str(float(segments[yPos]) % 1)
if RepresentsFloat(segments[zPos]):
segments[zPos] = str(float(segments[zPos]) % 1)
for segment in segments:
output += ' '
output += segment.replace('Wat', 'O')
output += '\n'
else:
output += line.replace('Wat', 'O')
else:
output += line
cifFile = io.StringIO(output)
#let ase read adjusted CIF-file
try:
structureList = ase.io.read(cifFile, format = 'cif', index = ':', store_tags = True, reader = 'ase') #, fractional_occupancies = True
except Exception as e:
errorMessage = 'File is either empty or corrupt. ' + traceback.format_exc().replace('\n', ' ')
if verbose:
print(errorMessage)
return
else:
errorMessage += warningCache
warningCache = ''
resultArray.append([errorMessage, pathToCif])
return resultArray
# iterate through entries in CIF-file
index = 0
for structure in structureList:
outputPath = pathToCif
if len(structureList) > 1:
outputPath = outputPath + ' [' + str(index) + ']'
try:
if verbose:
getComplexity(structure, outputPath, verbose, entropy, symprec)
else:
resultArray.append(getComplexity(structure, outputPath, verbose, entropy, symprec))
except Exception as e:
errorMessage = 'Error: ' + traceback.format_exc().replace('\n', ' ')
if verbose:
print(errorMessage)
else:
warningCache += errorMessage
resultArray.append([warningCache, outputPath])
warningCache = ''
index += 1
if not verbose:
return resultArray
def processDirectory(dir, recursive, entropy, symprec):
"""
iterates through all .cif-files in a given directory with multithreading and compiles results into .csv-file
Parameters:
arg1 (string): path to directory
arg2 (Boolean): iterate through subdirs as well?
arg3 (Boolean): entropy options
arg4 (float): symmetry tolerance in cartesian coordinates
Returns: results as .csv-file into dir
"""
start = time.time()
if not dir[-1] == '/' and not dir[-1] == '\\':
dir += '\\'
if recursive:
extension = '**/*.cif'
else:
extension = '*.cif'
resultArray = []
fileList = glob.glob(dir + extension, recursive = recursive)
numFiles = len(fileList)
if numFiles == 0:
print(f'{dir} does not contain .cif-files')
return
if numFiles > maxThreads:
numProcesses = maxThreads
else:
numProcesses = numFiles
pool = multiprocessing.Pool(processes = numProcesses)
for file in fileList:
resultArray.append(pool.apply_async(processFile, args = (file, False, entropy, symprec)))
output = ''
numEntries = 0
for fileResult in resultArray:
for cifResult in fileResult.get():
counter = 0
numEntries += 1
for string in cifResult:
if counter > 7:
if decimalSeparator == ',':
output += '{:.6f}; '.format(string).replace('.', ',')
else:
output += '{:.6f}; '.format(string)
else:
output += string + '; '
counter += 1
output += '\n '
if entropyOptions:
header = 'Errors; Path; DOI; Journal; Year; Assumed Formula; assumed SG; SG from CIF; a [A]; b [A]; c [A]; b,c []; a,c []; a,b []; atoms / uc; atoms / reduc; pos / reduc; unique species; coor freedom (aritySum); I_comb; I_comb_max; I_comb_norm; I_comb_tot; I_comb_density; S_comb_max_molar; Delta_S_comb_molar; I_coor; I_coor_max; I_coor_norm; I_coor_tot; I_coor_density; S_coor_max_molar; Delta_S_coor_molar; I_conf; I_conf_max; I_conf_norm; I_conf_tot; I_conf_density; S_conf_max_molar; Delta_S_conf_molar; \n '
else:
header = 'Errors; Path; DOI; Journal; Year; Assumed Formula; assumed SG; SG from CIF; a [A]; b [A]; c [A]; b,c []; a,c []; a,b []; atoms / uc; atoms / reduc; pos / reduc; unique species; coor freedom (aritySum); I_comb; I_comb_max; I_comb_norm; I_comb_tot; I_comb_density; I_coor; I_coor_max; I_coor_norm; I_coor_tot; I_coor_density; I_conf; I_conf_max; I_conf_norm; I_conf_tot; I_conf_density; \n '
finish = time.time()
outputFile = dir + f'batch_{int(finish)}.csv'
f = open(outputFile, 'w', encoding = 'utf-8')
f.write(header + output)
f.close()
timer = '{:.3f}'.format(finish - start)
print(f'\n\nProcessed {numFiles} files ({numEntries} entries) in {timer} s. Results written into {outputFile}')
if __name__ == '__main__':
userMenu() | 47.296218 | 525 | 0.58042 |
67ef29d1d4ce47e0f4c946159c2b8e5e9239317e | 2,166 | py | Python | bin-opcodes-vec/top50opcodes.py | laurencejbelliott/Ensemble_DL_Ransomware_Detector | 0cae02c2425e787a810513537a47897f3a42e5b5 | [
"MIT"
] | 18 | 2019-04-10T21:16:45.000Z | 2021-11-03T00:22:14.000Z | bin-opcodes-vec/top50opcodes.py | laurencejbelliott/Ensemble_DL_Ransomware_Detector | 0cae02c2425e787a810513537a47897f3a42e5b5 | [
"MIT"
] | null | null | null | bin-opcodes-vec/top50opcodes.py | laurencejbelliott/Ensemble_DL_Ransomware_Detector | 0cae02c2425e787a810513537a47897f3a42e5b5 | [
"MIT"
] | 9 | 2019-06-29T18:09:24.000Z | 2021-11-10T22:15:13.000Z | __author__ = "Laurence Elliott - 16600748"
from capstone import *
import pefile, os
# samplePaths = ["testSamples/" + sample for sample in os.listdir("testSamples")]
samplePaths = ["../bin-utf8-vec/benignSamples/" + sample for sample in os.listdir("../bin-utf8-vec/benignSamples")] + \
["../bin-utf8-vec/malwareSamples/" + sample for sample in os.listdir("../bin-utf8-vec/malwareSamples")] + \
["../bin-utf8-vec/ransomwareSamples/" + sample for sample in os.listdir("../bin-utf8-vec/ransomwareSamples")]
opcodeSet = set()
opCodeDicts = []
opCodeFreqs = {}
nSamples = len(samplePaths)
count = 1
for sample in samplePaths:
try:
pe = pefile.PE(sample, fast_load=True)
entryPoint = pe.OPTIONAL_HEADER.AddressOfEntryPoint
data = pe.get_memory_mapped_image()[entryPoint:]
cs = Cs(CS_ARCH_X86, CS_MODE_32)
opcodes = []
for i in cs.disasm(data, 0x1000):
opcodes.append(i.mnemonic)
opcodeDict = {}
total = len(opcodes)
opcodeSet = set(list(opcodeSet) + opcodes)
for opcode in opcodeSet:
freq = 1
for op in opcodes:
if opcode == op:
freq += 1
try:
opCodeFreqs[opcode] += freq
except:
opCodeFreqs[opcode] = freq
opcodeDict[opcode] = round((freq / total) * 100, 2)
opCodeDicts.append(opcodeDict)
os.system("clear")
print(str((count / nSamples) * 100) + "%")
count += 1
except Exception as e:
print(e)
# for opcode in opcodeSet:
# print(opcode, str(opcodeDict[opcode]) + "%")
# for opcodeDict in opCodeDicts:
# freqSorted = sorted(opcodeDict, key=opcodeDict.get)[-1:0:-1]
# print(opcodeDict[freqSorted[0]], opcodeDict[freqSorted[1]], opcodeDict[freqSorted[2]], freqSorted)
opCodeFreqsSorted = sorted(opCodeFreqs, key=opCodeFreqs.get)[-1:0:-1]
with open("top50opcodes.csv", "w") as f:
f.write("opcode, frequency\n")
for opcode in opCodeFreqsSorted[:50]:
f.write(str(opcode) + ", " + str(opCodeFreqs[opcode]) + "\n")
print(opcode, opCodeFreqs[opcode])
| 31.391304 | 119 | 0.612188 |
67f01ead8301ab0d013d90c2874dceeac2e0f7b9 | 233 | py | Python | chat/messaging/apps.py | VsevolodOkhrimenko/enchad | eca2790b374d336dfc5e109657d25ab0616196ee | [
"MIT"
] | null | null | null | chat/messaging/apps.py | VsevolodOkhrimenko/enchad | eca2790b374d336dfc5e109657d25ab0616196ee | [
"MIT"
] | null | null | null | chat/messaging/apps.py | VsevolodOkhrimenko/enchad | eca2790b374d336dfc5e109657d25ab0616196ee | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 19.416667 | 54 | 0.622318 |
67f15b64983f5eafc8f2961a8adfe37568e44cb9 | 2,051 | py | Python | tests/test_keepalived2.py | khosrow/lvsm | 516ee1422f736d016ccc198e54f5f019102504a6 | [
"MIT"
] | 15 | 2015-03-18T21:45:24.000Z | 2021-02-22T09:41:30.000Z | tests/test_keepalived2.py | khosrow/lvsm | 516ee1422f736d016ccc198e54f5f019102504a6 | [
"MIT"
] | 12 | 2016-01-15T19:32:36.000Z | 2016-10-27T14:21:14.000Z | tests/test_keepalived2.py | khosrow/lvsm | 516ee1422f736d016ccc198e54f5f019102504a6 | [
"MIT"
] | 8 | 2015-03-20T00:24:56.000Z | 2021-11-19T06:21:19.000Z | import unittest
import os
import sys
import StringIO
path = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '../lvsm')))
from lvsm.modules import keepalived
if __name__ == "__main__":
unittest.main()
| 42.729167 | 113 | 0.376889 |
67f2b9d79410dba976d86159718de46c71935384 | 1,416 | py | Python | faeAuditor/auditGroupResults/urlsCSV.py | opena11y/fae-auditor | ea9099b37b77ddc30092b0cdd962647c92b143a7 | [
"Apache-2.0"
] | 2 | 2018-02-28T19:03:28.000Z | 2021-09-30T13:40:23.000Z | faeAuditor/auditGroupResults/urlsCSV.py | opena11y/fae-auditor | ea9099b37b77ddc30092b0cdd962647c92b143a7 | [
"Apache-2.0"
] | 6 | 2020-02-11T21:53:58.000Z | 2022-02-10T07:57:58.000Z | faeAuditor/auditGroupResults/urlsCSV.py | opena11y/fae-auditor | ea9099b37b77ddc30092b0cdd962647c92b143a7 | [
"Apache-2.0"
] | 1 | 2019-12-05T06:05:20.000Z | 2019-12-05T06:05:20.000Z | """
Copyright 2014-2018 University of Illinois
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
file: auditResults/urlsCSV.py
Author: Jon Gunderson
"""
# reports/urls.py
from __future__ import absolute_import
from django.conf.urls import url
from .viewsCSV import GroupResultsViewCSV
from .viewsCSV import GroupResultsAuditGroupViewCSV
from .viewsCSV import GroupRuleGroupResultsViewCSV
urlpatterns = [
url(r'^all/(?P<result_slug>[\w-]+)/(?P<rule_grouping>[\w-]+)/$',
GroupResultsViewCSV,
name='group_results_csv'),
url(r'^all/(?P<result_slug>[\w-]+)/(?P<rule_grouping>[\w-]+)/g/(?P<audit_group_slug>[\w-]+)/$',
GroupResultsAuditGroupViewCSV,
name='group_results_audit_group_csv'),
# Rule grouping result views
url(r'^some/(?P<result_slug>[\w-]+)/(?P<rule_grouping>[\w-]+)/rg/(?P<rule_group_slug>[\w-]+)/$',
GroupRuleGroupResultsViewCSV,
name='group_rule_group_results_csv')
]
| 29.5 | 100 | 0.735169 |
67f2d1af7b93140433f3b44d8d6f9fbf50549676 | 912 | py | Python | microcosm_caching/base.py | globality-corp/microcosm-caching | 9e4ddb60d95e1344bf97f69248d1f7ac36a92cc8 | [
"Apache-2.0"
] | 1 | 2019-08-29T16:47:18.000Z | 2019-08-29T16:47:18.000Z | microcosm_caching/base.py | globality-corp/microcosm-caching | 9e4ddb60d95e1344bf97f69248d1f7ac36a92cc8 | [
"Apache-2.0"
] | 2 | 2019-10-29T19:25:16.000Z | 2019-11-12T00:00:04.000Z | microcosm_caching/base.py | globality-corp/microcosm-caching | 9e4ddb60d95e1344bf97f69248d1f7ac36a92cc8 | [
"Apache-2.0"
] | null | null | null | """
Cache abstractions for use with API resources.
"""
from abc import ABC, abstractmethod
| 19.404255 | 63 | 0.574561 |
67f2fda918bbde7a4b1b415f81dab3ffab386200 | 876 | py | Python | randomizer.py | shane1027/PollDaddySlurp | 6cc17156f38427379d095277681dbe1a68baa49d | [
"MIT"
] | null | null | null | randomizer.py | shane1027/PollDaddySlurp | 6cc17156f38427379d095277681dbe1a68baa49d | [
"MIT"
] | null | null | null | randomizer.py | shane1027/PollDaddySlurp | 6cc17156f38427379d095277681dbe1a68baa49d | [
"MIT"
] | 1 | 2019-10-10T15:19:33.000Z | 2019-10-10T15:19:33.000Z | #!/usr/bin/env python2.7
import time
from http_request_randomizer.requests.proxy.requestProxy import RequestProxy
if __name__ == '__main__':
start = time.time()
req_proxy = RequestProxy()
print "Initialization took: {0} sec".format((time.time() - start))
print "Size : ", len(req_proxy.get_proxy_list())
print " ALL = ", req_proxy.get_proxy_list()
test_url = 'http://ipv4.icanhazip.com'
while True:
start = time.time()
request = req_proxy.generate_proxied_request(test_url)
print "Proxied Request Took: {0} sec => Status: {1}".format((time.time() - start), request.__str__())
if request is not None:
print "\t Response: ip={0}".format(u''.join(request.text).encode('utf-8'))
print "Proxy List Size: ", len(req_proxy.get_proxy_list())
print"-> Going to sleep.."
time.sleep(1)
| 35.04 | 109 | 0.643836 |
67f38cc9e41435b2a8a8c22aa5a456b1d76fb88e | 555 | py | Python | examples/nni_data_augmentation/basenet/data.py | petuum/tuun | 8eec472dbf0e5e695449b0fa2d98985469fd5b30 | [
"Apache-2.0"
] | 33 | 2020-08-30T16:22:35.000Z | 2022-02-26T13:48:32.000Z | examples/nni_data_augmentation/basenet/data.py | petuum/tuun | 8eec472dbf0e5e695449b0fa2d98985469fd5b30 | [
"Apache-2.0"
] | 2 | 2021-01-18T19:46:43.000Z | 2021-03-24T09:59:14.000Z | examples/nni_data_augmentation/basenet/data.py | petuum/tuun | 8eec472dbf0e5e695449b0fa2d98985469fd5b30 | [
"Apache-2.0"
] | 2 | 2020-08-25T17:02:15.000Z | 2021-04-21T16:40:44.000Z | #!/usr/bin/env python
"""
data.py
"""
import itertools
| 18.5 | 60 | 0.578378 |
67f3afbe3c2036ebfbec72e16288761010482211 | 1,180 | py | Python | tools_box/_selling/report/sales_representative_scorecard/sales_representative_scorecard.py | maisonarmani/Tools_Box | 4f8cc3a0deac1be50a3ac80758a10608faf58454 | [
"MIT"
] | null | null | null | tools_box/_selling/report/sales_representative_scorecard/sales_representative_scorecard.py | maisonarmani/Tools_Box | 4f8cc3a0deac1be50a3ac80758a10608faf58454 | [
"MIT"
] | null | null | null | tools_box/_selling/report/sales_representative_scorecard/sales_representative_scorecard.py | maisonarmani/Tools_Box | 4f8cc3a0deac1be50a3ac80758a10608faf58454 | [
"MIT"
] | 1 | 2022-01-30T12:15:41.000Z | 2022-01-30T12:15:41.000Z | # Copyright (c) 2013, masonarmani38@gmail.com and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
| 39.333333 | 183 | 0.691525 |
67f3bbd2cd29eb37f8dc56a77c4074bc640a2a29 | 484 | py | Python | Google-IT-Automation-with-Python-Professional-Certificate/3-Introduction-to-Git-and-Github/Week-1/disk_usage.py | fengjings/Coursera | 54098a9732faa4b37afe69d196e27805b1ac73aa | [
"MIT"
] | null | null | null | Google-IT-Automation-with-Python-Professional-Certificate/3-Introduction-to-Git-and-Github/Week-1/disk_usage.py | fengjings/Coursera | 54098a9732faa4b37afe69d196e27805b1ac73aa | [
"MIT"
] | null | null | null | Google-IT-Automation-with-Python-Professional-Certificate/3-Introduction-to-Git-and-Github/Week-1/disk_usage.py | fengjings/Coursera | 54098a9732faa4b37afe69d196e27805b1ac73aa | [
"MIT"
] | 1 | 2021-06-09T08:59:48.000Z | 2021-06-09T08:59:48.000Z | import shutil
import sys
def check_disk_usage(disk, min_absolute, min_percent):
'''return true if there is enough free disk space, else false'''
du = shutil.disk_usage(disk)
percent_free= 100*du.free/du.total
gigabytes_free = du.free/2**30
if percent_free<min_percent or gigabytes_free < min_absolute:
return False
return True
if not check_disk_usage('/',2*2**30, 10):
print('error not enough space')
return 1
print('everything ok')
return 0 | 26.888889 | 68 | 0.708678 |
67f441ca489816b005f268005b6753cf7c38a180 | 1,796 | py | Python | src/utils/tests/test_www.py | nuuuwan/utils | d5085d9bddd1ffc79544241b43aaa8269c5806f0 | [
"MIT"
] | null | null | null | src/utils/tests/test_www.py | nuuuwan/utils | d5085d9bddd1ffc79544241b43aaa8269c5806f0 | [
"MIT"
] | 1 | 2021-07-06T11:16:58.000Z | 2021-07-06T11:16:58.000Z | src/utils/tests/test_www.py | nuuuwan/utils | d5085d9bddd1ffc79544241b43aaa8269c5806f0 | [
"MIT"
] | null | null | null | """Test."""
import os
import unittest
import pytest
from utils import www
TEST_JSON_URL = os.path.join(
'https://raw.githubusercontent.com',
'nuuuwan/misc-sl-data/master',
'sl_power_station_info.json',
)
TEST_TSV_URL = os.path.join(
'https://raw.githubusercontent.com',
'nuuuwan/gig-data/master',
'province.tsv',
)
TEST_INVALID_URL = 'http://www.29df.c'
TEST_IMAGE_LINK = 'https://www.python.org/static/img/python-logo@2x.png'
| 24.60274 | 72 | 0.604677 |
67f6677df6c93e2d632b899ab9dc98b595479ae0 | 19,511 | py | Python | src/qrl/core/State.py | scottdonaldau/QRL | fb78c1cdf227330ace46f590a36cc6a52c7af3fe | [
"MIT"
] | 1 | 2020-07-12T23:40:48.000Z | 2020-07-12T23:40:48.000Z | src/qrl/core/State.py | scottdonaldau/QRL | fb78c1cdf227330ace46f590a36cc6a52c7af3fe | [
"MIT"
] | null | null | null | src/qrl/core/State.py | scottdonaldau/QRL | fb78c1cdf227330ace46f590a36cc6a52c7af3fe | [
"MIT"
] | null | null | null | # coding=utf-8
# Distributed under the MIT software license, see the accompanying
# file LICENSE or http://www.opensource.org/licenses/mit-license.php.
from typing import Optional
from statistics import median
import functools
from google.protobuf.json_format import MessageToJson, Parse
from pyqrllib.pyqrllib import bin2hstr
from pyqryptonight.pyqryptonight import UInt256ToString
from qrl.core import config
from qrl.core.BlockMetadata import BlockMetadata
from qrl.core.GenesisBlock import GenesisBlock
from qrl.core.Block import Block
from qrl.core.misc import logger, db
from qrl.core.txs.Transaction import Transaction
from qrl.core.txs.TransferTokenTransaction import TransferTokenTransaction
from qrl.core.txs.TokenTransaction import TokenTransaction
from qrl.core.txs.CoinBase import CoinBase
from qrl.core.TokenMetadata import TokenMetadata
from qrl.core.AddressState import AddressState
from qrl.core.LastTransactions import LastTransactions
from qrl.core.TransactionMetadata import TransactionMetadata
from qrl.generated import qrl_pb2, qrlstateinfo_pb2
def put_addresses_state(self, addresses_state: dict, batch=None):
"""
:param addresses_state:
:param batch:
:return:
"""
for address in addresses_state:
address_state = addresses_state[address]
data = address_state.pbdata.SerializeToString()
self._db.put_raw(address_state.address, data, batch)
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
#########################################
| 38.559289 | 125 | 0.602788 |
67f6729eb5c33b2e9485a361bcba852adc1d1e4b | 2,670 | py | Python | data/make_stterror_data/main.py | gcunhase/StackedDeBERT | 82777114fd99cafc6e2a3d760e774f007c563245 | [
"MIT"
] | 32 | 2020-01-03T09:53:03.000Z | 2021-09-07T07:23:26.000Z | data/make_stterror_data/main.py | gcunhase/StackedDeBERT | 82777114fd99cafc6e2a3d760e774f007c563245 | [
"MIT"
] | null | null | null | data/make_stterror_data/main.py | gcunhase/StackedDeBERT | 82777114fd99cafc6e2a3d760e774f007c563245 | [
"MIT"
] | 6 | 2020-01-21T06:50:21.000Z | 2021-01-22T08:04:00.000Z | import os.path
from timeit import default_timer as timer
import data.make_stterror_data.utils as utils
from data.make_stterror_data.handler import HandlerIntent
from data.make_stterror_data.parser import snips_parser
__author__ = "Gwena Cunha"
""" Main module for Snips
text -> TTS -> STT -> wrong text
"""
if __name__ == '__main__':
time = timer()
main()
print("Program ran for %.2f minutes" % ((timer()-time)/60)) | 45.254237 | 131 | 0.695506 |
67f6d526ab4ecec5625261ee10602db862d65a55 | 5,591 | py | Python | src/tk_live_model_test.py | KarlWithK/gesture | d60204684c1e3868177e76b62d74d899d39d287d | [
"MIT"
] | null | null | null | src/tk_live_model_test.py | KarlWithK/gesture | d60204684c1e3868177e76b62d74d899d39d287d | [
"MIT"
] | null | null | null | src/tk_live_model_test.py | KarlWithK/gesture | d60204684c1e3868177e76b62d74d899d39d287d | [
"MIT"
] | 2 | 2021-09-01T01:06:23.000Z | 2021-09-06T00:18:54.000Z | import tkinter as tk
from PIL import Image, ImageTk
from cv2 import cv2
import numpy as np
import mediapipe as mp
from keyboard import press_and_release as press
from json import load
from data_preprocessor import DataGenerator
from gestures import GESTURES
import tensorflow as tf
TARGET_FRAMERATE: int = 20
GESTURE_LENGTH: int = 20
TFLITE_MODEL_PATH: str = "saved_models/MODEL-2021-06-02-16-12-10.tflite"
VIDEO_WIDTH = 1920
VIDEO_HEIGHT = 1080
keys = load(open("keybinds.json", "r"))
for key in keys:
if key in GESTURES:
GESTURES[key]['keybind'] = keys[key]
if __name__ == "__main__":
app = LiveModelTester()
app.mainloop()
| 34.512346 | 104 | 0.597746 |
67f86eeb953024e2463d4d73c584b0e83d0b4555 | 12,761 | py | Python | wykop/api/client.py | selfisekai/wykop-sdk-reborn | 7f17c5b2a3d282b5aaf72475a0f58ba66d5c5c5d | [
"MIT"
] | null | null | null | wykop/api/client.py | selfisekai/wykop-sdk-reborn | 7f17c5b2a3d282b5aaf72475a0f58ba66d5c5c5d | [
"MIT"
] | null | null | null | wykop/api/client.py | selfisekai/wykop-sdk-reborn | 7f17c5b2a3d282b5aaf72475a0f58ba66d5c5c5d | [
"MIT"
] | null | null | null | import logging
from typing import Dict, List
from wykop.api.api_const import PAGE_NAMED_ARG, BODY_NAMED_ARG, FILE_POST_NAME
from wykop.core.credentials import Credentials
from wykop.core.requestor import Requestor
log = logging.getLogger(__name__)
| 37.754438 | 114 | 0.587728 |
67f9a1f6ffa0fc0bfe7226b1e9ede9e0f2fe3d7a | 1,461 | py | Python | brainbox/tests/test_singlecell.py | SebastianBruijns/ibllib | 49f2091b7a53430c00c339b862dfc1a53aab008b | [
"MIT"
] | null | null | null | brainbox/tests/test_singlecell.py | SebastianBruijns/ibllib | 49f2091b7a53430c00c339b862dfc1a53aab008b | [
"MIT"
] | null | null | null | brainbox/tests/test_singlecell.py | SebastianBruijns/ibllib | 49f2091b7a53430c00c339b862dfc1a53aab008b | [
"MIT"
] | null | null | null | from brainbox.singlecell import acorr, calculate_peths
import unittest
import numpy as np
def test_firing_rate():
pass
if __name__ == "__main__":
np.random.seed(0)
unittest.main(exit=False)
| 31.085106 | 88 | 0.644764 |
67f9b6a00e2c9b6075dbb4dc4f6b1acedc0ffc2d | 11,958 | py | Python | test/test_base_metric.py | Spraitazz/metric-learn | 137880d9c6ce9a2b81a8af24c07d80e528f657cd | [
"MIT"
] | 547 | 2019-08-01T23:21:30.000Z | 2022-03-31T10:23:04.000Z | test/test_base_metric.py | Spraitazz/metric-learn | 137880d9c6ce9a2b81a8af24c07d80e528f657cd | [
"MIT"
] | 104 | 2019-08-02T10:15:53.000Z | 2022-03-29T20:33:55.000Z | test/test_base_metric.py | Spraitazz/metric-learn | 137880d9c6ce9a2b81a8af24c07d80e528f657cd | [
"MIT"
] | 69 | 2019-08-12T16:22:57.000Z | 2022-03-10T15:10:02.000Z | import pytest
import re
import unittest
import metric_learn
import numpy as np
from sklearn import clone
from test.test_utils import ids_metric_learners, metric_learners, remove_y
from metric_learn.sklearn_shims import set_random_state, SKLEARN_AT_LEAST_0_22
def sk_repr_kwargs(def_kwargs, nndef_kwargs):
"""Given the non-default arguments, and the default
keywords arguments, build the string that will appear
in the __repr__ of the estimator, depending on the
version of scikit-learn.
"""
if SKLEARN_AT_LEAST_0_22:
def_kwargs = {}
def_kwargs.update(nndef_kwargs)
args_str = ",".join(f"{key}={repr(value)}"
for key, value in def_kwargs.items())
return args_str
if __name__ == '__main__':
unittest.main()
| 42.860215 | 79 | 0.647516 |
67fa9c3bff783bccc4fb93e62dd21fe1343fce47 | 881 | py | Python | examples/geomopt/20-callback.py | QuESt-Calculator/pyscf | 0ed03633b699505c7278f1eb501342667d0aa910 | [
"Apache-2.0"
] | 501 | 2018-12-06T23:48:17.000Z | 2022-03-31T11:53:18.000Z | examples/geomopt/20-callback.py | QuESt-Calculator/pyscf | 0ed03633b699505c7278f1eb501342667d0aa910 | [
"Apache-2.0"
] | 710 | 2018-11-26T22:04:52.000Z | 2022-03-30T03:53:12.000Z | examples/geomopt/20-callback.py | QuESt-Calculator/pyscf | 0ed03633b699505c7278f1eb501342667d0aa910 | [
"Apache-2.0"
] | 273 | 2018-11-26T10:10:24.000Z | 2022-03-30T12:25:28.000Z | #!/usr/bin/env python
'''
Optimize molecular geometry within the environment of QM/MM charges.
'''
from pyscf import gto, scf
from pyscf.geomopt import berny_solver
from pyscf.geomopt import geometric_solver
mol = gto.M(atom='''
C 0.000000 0.000000 -0.542500
O 0.000000 0.000000 0.677500
H 0.000000 0.9353074360871938 -1.082500
H 0.000000 -0.9353074360871938 -1.082500
''',
basis='3-21g')
mf = scf.RHF(mol)
# Run analyze function in callback
#
# Method 1: Pass callback to optimize function
#
geometric_solver.optimize(mf, callback=cb)
berny_solver.optimize(mf, callback=cb)
#
# Method 2: Add callback to geometry optimizer
#
opt = mf.nuc_grad_method().as_scanner().optimizer()
opt.callback = cb
opt.kernel()
| 22.589744 | 68 | 0.659478 |
67fa9dc096cb1ead50c5acc747b6ed866a1988a5 | 8,251 | py | Python | Q1_final_project_v2.py | wolhandlerdeb/clustering | d84b0ff91d20b8dbf45e235fc8204f8cedf1ecc5 | [
"MIT"
] | null | null | null | Q1_final_project_v2.py | wolhandlerdeb/clustering | d84b0ff91d20b8dbf45e235fc8204f8cedf1ecc5 | [
"MIT"
] | null | null | null | Q1_final_project_v2.py | wolhandlerdeb/clustering | d84b0ff91d20b8dbf45e235fc8204f8cedf1ecc5 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import scipy as sc
from scipy.stats import randint, norm, multivariate_normal, ortho_group
from scipy import linalg
from scipy.linalg import subspace_angles, orth
from scipy.optimize import fmin
import math
from statistics import mean
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import itertools as it
import seaborn as sns
import matplotlib.pyplot as plt
from cluster.selfrepresentation import ElasticNetSubspaceClustering
import time
# functions for simulate data
# data simulation
df = final_data_simulation(4)
X = df['X'][31]
z = df['z'][31]
z
dim = 4
p = 16
k = 4
kmeans = KMeans(n_clusters=k)
kmeans
temp_df = pd.DataFrame(X)
temp_df['cluster'] = kmeans.fit_predict(X)
# for i in range(k) :
i = 1
df_new = temp_df[temp_df['cluster'] == i].drop(['cluster'], axis=1)
cluster_kmean = KMeans(n_clusters=k).fit_predict(X)
data = {'cluster1': z, 'cluster2': cluster_kmean}
clusters = pd.DataFrame(data, index=range(len(z)))
all_per = list(it.permutations(range(k)))
accuracy_rate_all_per = np.zeros(len(all_per))
c = [i for i in range(k)]
for l, p in enumerate(all_per):
dic = dict(zip(c, p))
clusters['premut_cluster'] = clusters['cluster2'].transform(lambda x: dic[x] if x in dic else None)
m = clusters.groupby(['cluster1', 'premut_cluster']).size().unstack(fill_value=0)
accuracy_rate_all_per[l] = np.trace(m)
accuracy_rate_all_per.max(), len(cluster_kmean)
per = all_per[2]
dic = dict(zip(c, per))
clusters['premut_cluster'] = clusters['cluster2'].transform(lambda x: dic[x] if x in dic else None)
clusters.groupby(['cluster2', 'premut_cluster']).size()
# find kmeans clusters and subspaces
# Recovery Performance
measure1_kmean = pd.DataFrame()
measure2_kmean = pd.DataFrame()
k = 4
for iter in range(2):
df = all_process(k)
measure1_kmean.insert(iter, "", df.apply(lambda x: performance_measure1(k, x['B'], x['B_kmean']), axis=1), True)
measure2_kmean.insert(iter, "", df.apply(lambda x: performance_measure2(k, x['z'], x['cluster_kmean']), axis=1),
True)
# measure1_ensc.insert(iter, "", df.apply(lambda x: performance_measure1(k, x['B'], x['B_ensc']), axis=1), True)
# measure2_ensc.insert(iter, "", df.apply(lambda x: performance_measure2(k, x['z'], x['cluster_ensc']), axis=1), True)
df['measure1_kmean'] = measure1_kmean.apply(lambda x: mean(x), axis=1)
df['measure2_kmean'] = measure2_kmean.apply(lambda x: mean(x), axis=1)
# df['measure1_ensc'] = measure1_ensc.apply(lambda x: mean(x), axis=1)
# df['measure2_ensc'] = measure2_ensc.apply(lambda x: mean(x), axis=1)
df['theta_degree'] = df.apply(lambda x: math.degrees(x['theta']), axis=1)
# ploting
plotting_performance_measure(df, "measure1_kmean")
plotting_performance_measure(df, "measure2_kmean")
plotting_performance_measure(df, "measure1_ensc")
plotting_performance_measure(df, "measure2_ensc")
| 37.848624 | 141 | 0.630105 |
67facec68d3d68647d57845cc972fe7ead4b3012 | 793 | py | Python | lnbits/extensions/usermanager/models.py | blackcoffeexbt/lnbits-legend | a9f2877af77ea56d1900e2b5bc1c21b9b7ac2f64 | [
"MIT"
] | 76 | 2021-11-02T22:19:59.000Z | 2022-03-30T18:01:33.000Z | lnbits/extensions/usermanager/models.py | blackcoffeexbt/lnbits-legend | a9f2877af77ea56d1900e2b5bc1c21b9b7ac2f64 | [
"MIT"
] | 100 | 2021-11-04T16:33:28.000Z | 2022-03-30T15:03:52.000Z | lnbits/extensions/usermanager/models.py | blackcoffeexbt/lnbits-legend | a9f2877af77ea56d1900e2b5bc1c21b9b7ac2f64 | [
"MIT"
] | 57 | 2021-11-08T06:43:59.000Z | 2022-03-31T08:53:16.000Z | from sqlite3 import Row
from fastapi.param_functions import Query
from pydantic import BaseModel
from typing import Optional
| 19.341463 | 45 | 0.630517 |
67fbc8dcaaaab886066c2cc01da3a3bc0ee4a485 | 3,215 | py | Python | Operator.py | zijieli-Jlee/FGN | f707ed31687ea355ab62a1eaf43b5756a6ed883e | [
"MIT"
] | 2 | 2022-02-28T07:36:47.000Z | 2022-03-10T04:45:57.000Z | Operator.py | BaratiLab/FGN | 04729eaebfa8395a7d2ebb275761f98dc0342933 | [
"MIT"
] | null | null | null | Operator.py | BaratiLab/FGN | 04729eaebfa8395a7d2ebb275761f98dc0342933 | [
"MIT"
] | null | null | null | import numba as nb
import numpy as np
import torch
from torch.autograd import Function
from Constants import MPS_KERNEL as w
from Constants import BASE_RADIUS, ND_RAIUS, GRAD_RADIUS, LAP_RADIUS
Divergence = DivOp.apply
Laplacian = LapOp.apply
| 31.213592 | 123 | 0.601244 |
67fc163e324d1273cf478cbfac97cd26f437a946 | 5,274 | py | Python | pythia/LinearRegression.py | MaudBoucherit/Pythia | 0076d8008350c3a323e28c400b26628be34302e6 | [
"MIT"
] | null | null | null | pythia/LinearRegression.py | MaudBoucherit/Pythia | 0076d8008350c3a323e28c400b26628be34302e6 | [
"MIT"
] | 4 | 2018-02-09T01:16:14.000Z | 2018-03-04T07:48:49.000Z | pythia/LinearRegression.py | MaudBoucherit/Pythia | 0076d8008350c3a323e28c400b26628be34302e6 | [
"MIT"
] | 3 | 2018-02-08T22:52:27.000Z | 2018-02-08T22:53:05.000Z | # LinearRegression.py
# March 2018
#
# This script builds a Linear regression class to analyse data.
# It supports a continuous response and several continuous features.
# The class has a constructor building and fitting the model, and
# a plotting method for residuals.
#
# Dependencies:
#
# Usage:
# from pythia.LinearRegression import LinearRegression
# lm = LinearRegression(X,y)
# print(lm.weights)
# plot_pythia(lm)
## Imports
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import sys
import os
sys.path.insert(0, os.path.abspath("."))
sys.path.insert(0, os.path.abspath("../"))
import pandas as pd
import numpy as np
import numpy.random as random
## The LinearRegression class
| 32.555556 | 154 | 0.603527 |
67fc89d1bcce49307c043c31ae573dd5205a3395 | 289 | py | Python | src/renault_api/exceptions.py | slater0013/renault-api | 13c784b6af09331368341c93888f1eb32c46cb19 | [
"MIT"
] | 44 | 2020-11-01T15:52:33.000Z | 2022-03-31T04:40:03.000Z | src/renault_api/exceptions.py | slater0013/renault-api | 13c784b6af09331368341c93888f1eb32c46cb19 | [
"MIT"
] | 334 | 2020-11-01T13:00:01.000Z | 2022-03-31T17:17:40.000Z | src/renault_api/exceptions.py | slater0013/renault-api | 13c784b6af09331368341c93888f1eb32c46cb19 | [
"MIT"
] | 22 | 2020-11-20T08:26:26.000Z | 2022-03-11T18:58:31.000Z | """Exceptions for Renault API."""
| 20.642857 | 67 | 0.702422 |
67fd6116ebb01570250dd4cf9fbbcabbf9f0ae67 | 5,945 | py | Python | analysis/playing_with_pykalman.py | rafaelvalero/covid_forecast | 4e009ade5481f4e3bd48fd8048ca7d293d5d19b4 | [
"MIT"
] | 3 | 2020-03-20T14:23:51.000Z | 2020-03-29T18:55:12.000Z | analysis/playing_with_pykalman.py | rafaelvalero/covid_forecast | 4e009ade5481f4e3bd48fd8048ca7d293d5d19b4 | [
"MIT"
] | 2 | 2020-03-21T14:07:17.000Z | 2020-03-22T07:38:11.000Z | analysis/playing_with_pykalman.py | rafaelvalero/covid_forecast | 4e009ade5481f4e3bd48fd8048ca7d293d5d19b4 | [
"MIT"
] | 1 | 2020-05-12T14:37:28.000Z | 2020-05-12T14:37:28.000Z | '''
=============================
EM for Linear-Gaussian Models
=============================
This example shows how one may use the EM algorithm to estimate model
parameters with a Kalman Filter.
The EM algorithm is a meta-algorithm for learning parameters in probabilistic
models. The algorithm works by first fixing the parameters and finding a closed
form distribution over the unobserved variables, then finds new parameters that
maximize the expected likelihood of the observed variables (where the
expectation is taken over the unobserved ones). Due to convexity arguments, we
are guaranteed that each iteration of the algorithm will increase the
likelihood of the observed data and that it will eventually reach a local
optimum.
The EM algorithm is applied to the Linear-Gaussian system (that is, the model
assumed by the Kalman Filter) by first using the Kalman Smoother to calculate
the distribution over all unobserved variables (in this case, the hidden target
states), then closed-form update equations are used to update the model
parameters.
The first figure plotted contains 4 sets of lines. The first, labeled `true`,
represents the true, unobserved state of the system. The second, labeled
`blind`, represents the predicted state of the system if no measurements are
incorporated. The third, labeled `filtered`, are the state estimates given
measurements up to and including the current time step. Finally, the fourth,
labeled `smoothed`, are the state estimates using all observations for all time
steps. The latter three estimates use parameters learned via 10 iterations of
the EM algorithm.
The second figure contains a single line representing the likelihood of the
observed data as a function of the EM Algorithm iteration.
'''
from pykalman import KalmanFilter
import numpy as np
import matplotlib.pyplot as plt
import time
measurements = np.asarray([(399,293),(403,299),(409,308),(416,315),(418,318),(420,323),(429,326),(423,328),(429,334),(431,337),(433,342),(434,352),(434,349),(433,350),(431,350),(430,349),(428,347),(427,345),(425,341),(429,338),(431,328),(410,313),(406,306),(402,299),(397,291),(391,294),(376,270),(372,272),(351,248),(336,244),(327,236),(307,220)])
initial_state_mean = [measurements[0, 0],
0,
measurements[0, 1],
0]
transition_matrix = [[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 0, 1]]
observation_matrix = [[1, 0, 0, 0],
[0, 0, 1, 0]]
kf1 = KalmanFilter(transition_matrices = transition_matrix,
observation_matrices = observation_matrix,
initial_state_mean = initial_state_mean)
kf1 = kf1.em(measurements, n_iter=5)
(smoothed_state_means, smoothed_state_covariances) = kf1.smooth(measurements)
'''
=============================
EM for Linear-Gaussian Models
=============================
This example shows how one may use the EM algorithm to estimate model
parameters with a Kalman Filter.
The EM algorithm is a meta-algorithm for learning parameters in probabilistic
models. The algorithm works by first fixing the parameters and finding a closed
form distribution over the unobserved variables, then finds new parameters that
maximize the expected likelihood of the observed variables (where the
expectation is taken over the unobserved ones). Due to convexity arguments, we
are guaranteed that each iteration of the algorithm will increase the
likelihood of the observed data and that it will eventually reach a local
optimum.
The EM algorithm is applied to the Linear-Gaussian system (that is, the model
assumed by the Kalman Filter) by first using the Kalman Smoother to calculate
the distribution over all unobserved variables (in this case, the hidden target
states), then closed-form update equations are used to update the model
parameters.
The first figure plotted contains 4 sets of lines. The first, labeled `true`,
represents the true, unobserved state of the system. The second, labeled
`blind`, represents the predicted state of the system if no measurements are
incorporated. The third, labeled `filtered`, are the state estimates given
measurements up to and including the current time step. Finally, the fourth,
labeled `smoothed`, are the state estimates using all observations for all time
steps. The latter three estimates use parameters learned via 10 iterations of
the EM algorithm.
The second figure contains a single line representing the likelihood of the
observed data as a function of the EM Algorithm iteration.
'''
from pykalman import KalmanFilter
import numpy as np
import matplotlib.pyplot as plt
import time
measurements = np.asarray([(399,293),(403,299),(409,308),(416,315),(418,318),(420,323),(429,326),(423,328),(429,334),(431,337),(433,342),(434,352),(434,349),(433,350),(431,350),(430,349),(428,347),(427,345),(425,341),(429,338),(431,328),(410,313),(406,306),(402,299),(397,291),(391,294),(376,270),(372,272),(351,248),(336,244),(327,236),(307,220)])
initial_state_mean = [measurements[0, 0],
0,
measurements[0, 1],
0]
transition_matrix = [[1, 1, 0, 0],
[0, 1, 0, 0],
[0, 0, 1, 1],
[0, 0, 0, 1]]
observation_matrix = [[1, 0, 0, 0],
[0, 0, 1, 0]]
kf1 = KalmanFilter(transition_matrices = transition_matrix,
observation_matrices = observation_matrix,
initial_state_mean = initial_state_mean)
kf1 = kf1.em(measurements, n_iter=5)
(smoothed_state_means, smoothed_state_covariances) = kf1.smooth(measurements)
plt.figure(1)
times = range(measurements.shape[0])
plt.plot(times, measurements[:, 0], 'bo',
times, measurements[:, 1], 'ro',
times, smoothed_state_means[:, 0], 'b--',
times, smoothed_state_means[:, 2], 'r--',)
plt.show() | 49.541667 | 348 | 0.697056 |
67fd71b159a22e60b64a07348a0a3e35c2a3b7e5 | 382 | py | Python | phyutil/__init__.py | frib-high-level-controls/phyhlc | 6486607e3aa0212054a12e9f2ad1a3ef15542f48 | [
"BSD-3-Clause"
] | 1 | 2018-03-22T15:18:54.000Z | 2018-03-22T15:18:54.000Z | phyutil/__init__.py | frib-high-level-controls/phyhlc | 6486607e3aa0212054a12e9f2ad1a3ef15542f48 | [
"BSD-3-Clause"
] | null | null | null | phyutil/__init__.py | frib-high-level-controls/phyhlc | 6486607e3aa0212054a12e9f2ad1a3ef15542f48 | [
"BSD-3-Clause"
] | null | null | null | # encoding: UTF-8
"""Physics Applications Utility"""
__copyright__ = "Copyright (c) 2015, Facility for Rare Isotope Beams"
__author__ = "Dylan Maxwell"
__version__ = "0.0.1"
import logging
import phylib
import machine
from machine import *
from phylib.libCore import *
# configure the root logger
logging.basicConfig(format="%(levelname)s: %(asctime)s: %(name)s: %(message)s")
| 21.222222 | 79 | 0.740838 |
67fdbf96ac87d3b403bf853041d7bc6c394c1dfd | 1,902 | py | Python | pydyn/explicit_blocks.py | chhokrad/PYPOWER-Dynamics | e6e42fc6975828a51cd01c42a81d7a45844f323f | [
"BSD-3-Clause"
] | null | null | null | pydyn/explicit_blocks.py | chhokrad/PYPOWER-Dynamics | e6e42fc6975828a51cd01c42a81d7a45844f323f | [
"BSD-3-Clause"
] | null | null | null | pydyn/explicit_blocks.py | chhokrad/PYPOWER-Dynamics | e6e42fc6975828a51cd01c42a81d7a45844f323f | [
"BSD-3-Clause"
] | 1 | 2021-09-13T14:34:41.000Z | 2021-09-13T14:34:41.000Z | #!python3
#
# Copyright (C) 2014-2015 Julius Susanto. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""
PYPOWER-Dynamics
Functions for standard blocks (solves a step)
"""
import numpy as np
# Gain block
# yo = p * yi
# p is a scalar gain coefficient
# Divide block
# yo = yi / p
# p is a scalar gain coefficient
# Integrator block
# K / sT
# p = [K, T]
# Lag block
# K / (1 + sT)
# p = [K, T]
# Lead-Lag block
# (1 + sTa) / (1 + sTb)
# p = [Ta, Tb]
# Limiter block
# yo = min_lim, if yi < min_lim
# yo = max_lim, if yi > max_lim
# yo = yi, min_lim <= yi <= max_lim
# p = [min_lim, max_lim]
# Multiplication block
# yo = yi1 * yi2 * ... * yin
# yi = [yi1, yi2, ... yin]
# Summation block
# yo = yi1 + yi2 + ... + yin
# yi = [yi1, yi2, ... yin]
# Washout block
# (s / (1 + sT)
# p is the time constant T
| 15.463415 | 69 | 0.532072 |
67ff40cfd4c8a6b2e69d26c388ef6020f73b4c94 | 2,151 | py | Python | river/migrations/0012_auto_20191113_1550.py | xuziheng1002/django-river | 7c7f23aa4790e451019c3e2b4d29f35852de17e6 | [
"BSD-3-Clause"
] | null | null | null | river/migrations/0012_auto_20191113_1550.py | xuziheng1002/django-river | 7c7f23aa4790e451019c3e2b4d29f35852de17e6 | [
"BSD-3-Clause"
] | null | null | null | river/migrations/0012_auto_20191113_1550.py | xuziheng1002/django-river | 7c7f23aa4790e451019c3e2b4d29f35852de17e6 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.25 on 2019-11-13 21:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 45.765957 | 191 | 0.664807 |
db00271e05f78081485f6f0bf77fff9b5da0dd36 | 929 | py | Python | nesta/packages/examples/tests/test_example_package.py | anniyanvr/nesta | 4b3ae79922cebde0ad33e08ac4c40b9a10e8e7c3 | [
"MIT"
] | 13 | 2019-06-18T16:53:53.000Z | 2021-03-04T10:58:52.000Z | nesta/packages/examples/tests/test_example_package.py | nestauk/old_nesta_daps | 4b3ae79922cebde0ad33e08ac4c40b9a10e8e7c3 | [
"MIT"
] | 208 | 2018-08-10T13:15:40.000Z | 2021-07-21T10:16:07.000Z | nesta/packages/examples/tests/test_example_package.py | nestauk/old_nesta_daps | 4b3ae79922cebde0ad33e08ac4c40b9a10e8e7c3 | [
"MIT"
] | 8 | 2018-09-20T15:19:23.000Z | 2020-12-15T17:41:34.000Z | from collections import namedtuple
import pytest
from nesta.packages.examples.example_package import some_func
| 33.178571 | 85 | 0.697524 |
db0097f13bc0f850f8b50c6cc9087132aa46c5fd | 6,408 | py | Python | test/test_misc.py | mhthies/smarthomeconnect | d93d1038145285af66769ebf10589c1088b323ed | [
"Apache-2.0"
] | 5 | 2021-07-02T21:48:45.000Z | 2021-12-12T21:55:42.000Z | test/test_misc.py | mhthies/smarthomeconnect | d93d1038145285af66769ebf10589c1088b323ed | [
"Apache-2.0"
] | 49 | 2020-09-18T20:05:55.000Z | 2022-03-05T19:51:33.000Z | test/test_misc.py | mhthies/smarthomeconnect | d93d1038145285af66769ebf10589c1088b323ed | [
"Apache-2.0"
] | 1 | 2021-12-10T14:50:43.000Z | 2021-12-10T14:50:43.000Z | import asyncio
import unittest
import unittest.mock
import shc.misc
from test._helper import ExampleSubscribable, ExampleWritable, async_test, ExampleReadable
| 37.473684 | 106 | 0.666042 |
db00bdc9b4970c171632e8c7e85bbb5706127395 | 27,709 | py | Python | pysatSpaceWeather/instruments/sw_f107.py | JonathonMSmith/pysatSpaceWeather | b403a14bd9a37dd010e97be6e5da15c54a87b888 | [
"BSD-3-Clause"
] | 3 | 2021-02-02T05:33:46.000Z | 2022-01-20T16:54:35.000Z | pysatSpaceWeather/instruments/sw_f107.py | JonathonMSmith/pysatSpaceWeather | b403a14bd9a37dd010e97be6e5da15c54a87b888 | [
"BSD-3-Clause"
] | 48 | 2020-08-13T22:05:06.000Z | 2022-01-21T22:48:14.000Z | pysatSpaceWeather/instruments/sw_f107.py | JonathonMSmith/pysatSpaceWeather | b403a14bd9a37dd010e97be6e5da15c54a87b888 | [
"BSD-3-Clause"
] | 3 | 2021-02-02T05:33:54.000Z | 2021-08-19T17:14:24.000Z | # -*- coding: utf-8 -*-
"""Supports F10.7 index values. Downloads data from LASP and the SWPC.
Properties
----------
platform
'sw'
name
'f107'
tag
- 'historic' LASP F10.7 data (downloads by month, loads by day)
- 'prelim' Preliminary SWPC daily solar indices
- 'daily' Daily SWPC solar indices (contains last 30 days)
- 'forecast' Grab forecast data from SWPC (next 3 days)
- '45day' 45-Day Forecast data from the Air Force
Example
-------
Download and load all of the historic F10.7 data. Note that it will not
stop on the current date, but a point in the past when post-processing has
been successfully completed.
::
f107 = pysat.Instrument('sw', 'f107', tag='historic')
f107.download(start=f107.lasp_stime, stop=f107.today(), freq='MS')
f107.load(date=f107.lasp_stime, end_date=f107.today())
Note
----
The forecast data is stored by generation date, where each file contains the
forecast for the next three days. Forecast data downloads are only supported
for the current day. When loading forecast data, the date specified with the
load command is the date the forecast was generated. The data loaded will span
three days. To always ensure you are loading the most recent data, load
the data with tomorrow's date.
::
f107 = pysat.Instrument('sw', 'f107', tag='forecast')
f107.download()
f107.load(date=f107.tomorrow())
Warnings
--------
The 'forecast' F10.7 data loads three days at a time. Loading multiple files,
loading multiple days, the data padding feature, and multi_file_day feature
available from the pyast.Instrument object is not appropriate for 'forecast'
data.
Like 'forecast', the '45day' forecast loads a specific period of time (45 days)
and subsequent files contain overlapping data. Thus, loading multiple files,
loading multiple days, the data padding feature, and multi_file_day feature
available from the pyast.Instrument object is not appropriate for '45day' data.
"""
import datetime as dt
import ftplib
import json
import numpy as np
import os
import requests
import sys
import warnings
import pandas as pds
import pysat
from pysatSpaceWeather.instruments.methods import f107 as mm_f107
from pysatSpaceWeather.instruments.methods.ace import load_csv_data
from pysatSpaceWeather.instruments.methods import general
logger = pysat.logger
# ----------------------------------------------------------------------------
# Instrument attributes
platform = 'sw'
name = 'f107'
tags = {'historic': 'Daily LASP value of F10.7',
'prelim': 'Preliminary SWPC daily solar indices',
'daily': 'Daily SWPC solar indices (contains last 30 days)',
'forecast': 'SWPC Forecast F107 data next (3 days)',
'45day': 'Air Force 45-day Forecast'}
# Dict keyed by inst_id that lists supported tags for each inst_id
inst_ids = {'': [tag for tag in tags.keys()]}
# Dict keyed by inst_id that lists supported tags and a good day of test data
# generate todays date to support loading forecast data
now = dt.datetime.utcnow()
today = dt.datetime(now.year, now.month, now.day)
tomorrow = today + pds.DateOffset(days=1)
# The LASP archive start day is also important
lasp_stime = dt.datetime(1947, 2, 14)
# ----------------------------------------------------------------------------
# Instrument test attributes
_test_dates = {'': {'historic': dt.datetime(2009, 1, 1),
'prelim': dt.datetime(2009, 1, 1),
'daily': tomorrow,
'forecast': tomorrow,
'45day': tomorrow}}
# Other tags assumed to be True
_test_download_travis = {'': {'prelim': False}}
# ----------------------------------------------------------------------------
# Instrument methods
preprocess = general.preprocess
def init(self):
"""Initializes the Instrument object with instrument specific values.
Runs once upon instantiation.
"""
self.acknowledgements = mm_f107.acknowledgements(self.name, self.tag)
self.references = mm_f107.references(self.name, self.tag)
logger.info(self.acknowledgements)
# Define the historic F10.7 starting time
if self.tag == 'historic':
self.lasp_stime = lasp_stime
return
def clean(self):
""" Cleaning function for Space Weather indices
Note
----
F10.7 doesn't require cleaning
"""
return
# ----------------------------------------------------------------------------
# Instrument functions
def load(fnames, tag=None, inst_id=None):
"""Load F10.7 index files
Parameters
----------
fnames : pandas.Series
Series of filenames
tag : str or NoneType
tag or None (default=None)
inst_id : str or NoneType
satellite id or None (default=None)
Returns
-------
data : pandas.DataFrame
Object containing satellite data
meta : pysat.Meta
Object containing metadata such as column names and units
Note
----
Called by pysat. Not intended for direct use by user.
"""
# Get the desired file dates and file names from the daily indexed list
file_dates = list()
if tag in ['historic', 'prelim']:
unique_files = list()
for fname in fnames:
file_dates.append(dt.datetime.strptime(fname[-10:], '%Y-%m-%d'))
if fname[0:-11] not in unique_files:
unique_files.append(fname[0:-11])
fnames = unique_files
# Load the CSV data files
data = load_csv_data(fnames, read_csv_kwargs={"index_col": 0,
"parse_dates": True})
# If there is a date range, downselect here
if len(file_dates) > 0:
idx, = np.where((data.index >= min(file_dates))
& (data.index < max(file_dates) + dt.timedelta(days=1)))
data = data.iloc[idx, :]
# Initialize the metadata
meta = pysat.Meta()
meta['f107'] = {meta.labels.units: 'SFU',
meta.labels.name: 'F10.7 cm solar index',
meta.labels.notes: '',
meta.labels.desc:
'F10.7 cm radio flux in Solar Flux Units (SFU)',
meta.labels.fill_val: np.nan,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
if tag == '45day':
meta['ap'] = {meta.labels.units: '',
meta.labels.name: 'Daily Ap index',
meta.labels.notes: '',
meta.labels.desc: 'Daily average of 3-h ap indices',
meta.labels.fill_val: np.nan,
meta.labels.min_val: 0,
meta.labels.max_val: 400}
elif tag == 'daily' or tag == 'prelim':
meta['ssn'] = {meta.labels.units: '',
meta.labels.name: 'Sunspot Number',
meta.labels.notes: '',
meta.labels.desc: 'SESC Sunspot Number',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['ss_area'] = {meta.labels.units: '10$^-6$ Solar Hemisphere',
meta.labels.name: 'Sunspot Area',
meta.labels.notes: '',
meta.labels.desc:
''.join(['Sunspot Area in Millionths of the ',
'Visible Hemisphere']),
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: 1.0e6}
meta['new_reg'] = {meta.labels.units: '',
meta.labels.name: 'New Regions',
meta.labels.notes: '',
meta.labels.desc: 'New active solar regions',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['smf'] = {meta.labels.units: 'G',
meta.labels.name: 'Solar Mean Field',
meta.labels.notes: '',
meta.labels.desc: 'Standford Solar Mean Field',
meta.labels.fill_val: -999,
meta.labels.min_val: 0,
meta.labels.max_val: np.inf}
meta['goes_bgd_flux'] = {meta.labels.units: 'W/m^2',
meta.labels.name: 'X-ray Background Flux',
meta.labels.notes: '',
meta.labels.desc:
'GOES15 X-ray Background Flux',
meta.labels.fill_val: '*',
meta.labels.min_val: -np.inf,
meta.labels.max_val: np.inf}
meta['c_flare'] = {meta.labels.units: '',
meta.labels.name: 'C X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'C-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['m_flare'] = {meta.labels.units: '',
meta.labels.name: 'M X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'M-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['x_flare'] = {meta.labels.units: '',
meta.labels.name: 'X X-Ray Flares',
meta.labels.notes: '',
meta.labels.desc: 'X-class X-Ray Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o1_flare'] = {meta.labels.units: '',
meta.labels.name: '1 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '1-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o2_flare'] = {meta.labels.units: '',
meta.labels.name: '2 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '2-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
meta['o3_flare'] = {meta.labels.units: '',
meta.labels.name: '3 Optical Flares',
meta.labels.notes: '',
meta.labels.desc: '3-class Optical Flares',
meta.labels.fill_val: -1,
meta.labels.min_val: 0,
meta.labels.max_val: 9}
return data, meta
def list_files(tag=None, inst_id=None, data_path=None, format_str=None):
"""Return a Pandas Series of every file for F10.7 data
Parameters
----------
tag : string or NoneType
Denotes type of file to load.
(default=None)
inst_id : string or NoneType
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : string or NoneType
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
format_str : string or NoneType
User specified file format. If None is specified, the default
formats associated with the supplied tags are used. (default=None)
Returns
-------
out_files : pysat._files.Files
A class containing the verified available files
Note
----
Called by pysat. Not intended for direct use by user.
"""
if data_path is not None:
if tag == 'historic':
# Files are by month, going to add date to monthly filename for
# each day of the month. The load routine will load a month of
# data and use the appended date to select out appropriate data.
if format_str is None:
format_str = 'f107_monthly_{year:04d}-{month:02d}.txt'
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if not out_files.empty:
out_files.loc[out_files.index[-1] + pds.DateOffset(months=1)
- pds.DateOffset(days=1)] = out_files.iloc[-1]
out_files = out_files.asfreq('D', 'pad')
out_files = out_files + '_' + out_files.index.strftime(
'%Y-%m-%d')
elif tag == 'prelim':
# Files are by year (and quarter)
if format_str is None:
format_str = ''.join(['f107_prelim_{year:04d}_{month:02d}',
'_v{version:01d}.txt'])
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
if not out_files.empty:
# Set each file's valid length at a 1-day resolution
orig_files = out_files.sort_index().copy()
new_files = list()
for orig in orig_files.iteritems():
# Version determines each file's valid length
version = int(orig[1].split("_v")[1][0])
doff = pds.DateOffset(years=1) if version == 2 \
else pds.DateOffset(months=3)
istart = orig[0]
iend = istart + doff - pds.DateOffset(days=1)
# Ensure the end time does not extend past the number of
# possible days included based on the file's download time
fname = os.path.join(data_path, orig[1])
dend = dt.datetime.utcfromtimestamp(os.path.getctime(fname))
dend = dend - pds.DateOffset(days=1)
if dend < iend:
iend = dend
# Pad the original file index
out_files.loc[iend] = orig[1]
out_files = out_files.sort_index()
# Save the files at a daily cadence over the desired period
new_files.append(out_files.loc[istart:
iend].asfreq('D', 'pad'))
# Add the newly indexed files to the file output
out_files = pds.concat(new_files, sort=True)
out_files = out_files.dropna()
out_files = out_files.sort_index()
out_files = out_files + '_' + out_files.index.strftime(
'%Y-%m-%d')
elif tag in ['daily', 'forecast', '45day']:
format_str = ''.join(['f107_', tag,
'_{year:04d}-{month:02d}-{day:02d}.txt'])
out_files = pysat.Files.from_os(data_path=data_path,
format_str=format_str)
# Pad list of files data to include most recent file under tomorrow
if not out_files.empty:
pds_off = pds.DateOffset(days=1)
out_files.loc[out_files.index[-1]
+ pds_off] = out_files.values[-1]
out_files.loc[out_files.index[-1]
+ pds_off] = out_files.values[-1]
else:
raise ValueError(' '.join(('Unrecognized tag name for Space',
'Weather Index F107:', tag)))
else:
raise ValueError(' '.join(('A data_path must be passed to the loading',
'routine for F107')))
return out_files
def download(date_array, tag, inst_id, data_path, update_files=False):
"""Routine to download F107 index data
Parameters
-----------
date_array : list-like
Sequence of dates to download date for.
tag : string or NoneType
Denotes type of file to load.
inst_id : string or NoneType
Specifies the satellite ID for a constellation.
data_path : string or NoneType
Path to data directory.
update_files : bool
Re-download data for files that already exist if True (default=False)
Note
----
Called by pysat. Not intended for direct use by user.
Warnings
--------
Only able to download current forecast data, not archived forecasts.
"""
# download standard F107 data
if tag == 'historic':
# Test the date array, updating it if necessary
if date_array.freq != 'MS':
warnings.warn(''.join(['Historic F10.7 downloads should be invoked',
" with the `freq='MS'` option."]))
date_array = pysat.utils.time.create_date_range(
dt.datetime(date_array[0].year, date_array[0].month, 1),
date_array[-1], freq='MS')
# Download from LASP, by month
for dl_date in date_array:
# Create the name to which the local file will be saved
str_date = dl_date.strftime('%Y-%m')
data_file = os.path.join(data_path,
'f107_monthly_{:s}.txt'.format(str_date))
if update_files or not os.path.isfile(data_file):
# Set the download webpage
dstr = ''.join(['http://lasp.colorado.edu/lisird/latis/dap/',
'noaa_radio_flux.json?time%3E=',
dl_date.strftime('%Y-%m-%d'),
'T00:00:00.000Z&time%3C=',
(dl_date + pds.DateOffset(months=1)
- pds.DateOffset(days=1)).strftime('%Y-%m-%d'),
'T00:00:00.000Z'])
# The data is returned as a JSON file
req = requests.get(dstr)
# Process the JSON file
raw_dict = json.loads(req.text)['noaa_radio_flux']
data = pds.DataFrame.from_dict(raw_dict['samples'])
if data.empty:
warnings.warn("no data for {:}".format(dl_date),
UserWarning)
else:
# The file format changed over time
try:
# This is the new data format
times = [dt.datetime.strptime(time, '%Y%m%d')
for time in data.pop('time')]
except ValueError:
# Accepts old file formats
times = [dt.datetime.strptime(time, '%Y %m %d')
for time in data.pop('time')]
data.index = times
# Replace fill value with NaNs
idx, = np.where(data['f107'] == -99999.0)
data.iloc[idx, :] = np.nan
# Create a local CSV file
data.to_csv(data_file, header=True)
elif tag == 'prelim':
ftp = ftplib.FTP('ftp.swpc.noaa.gov') # connect to host, default port
ftp.login() # user anonymous, passwd anonymous@
ftp.cwd('/pub/indices/old_indices')
bad_fname = list()
# Get the local files, to ensure that the version 1 files are
# downloaded again if more data has been added
local_files = list_files(tag, inst_id, data_path)
# To avoid downloading multiple files, cycle dates based on file length
dl_date = date_array[0]
while dl_date <= date_array[-1]:
# The file name changes, depending on how recent the requested
# data is
qnum = (dl_date.month - 1) // 3 + 1 # Integer floor division
qmonth = (qnum - 1) * 3 + 1
quar = 'Q{:d}_'.format(qnum)
fnames = ['{:04d}{:s}DSD.txt'.format(dl_date.year, ss)
for ss in ['_', quar]]
versions = ["01_v2", "{:02d}_v1".format(qmonth)]
vend = [dt.datetime(dl_date.year, 12, 31),
dt.datetime(dl_date.year, qmonth, 1)
+ pds.DateOffset(months=3) - pds.DateOffset(days=1)]
downloaded = False
rewritten = False
# Attempt the download(s)
for iname, fname in enumerate(fnames):
# Test to see if we already tried this filename
if fname in bad_fname:
continue
local_fname = fname
saved_fname = os.path.join(data_path, local_fname)
ofile = '_'.join(['f107', 'prelim',
'{:04d}'.format(dl_date.year),
'{:s}.txt'.format(versions[iname])])
outfile = os.path.join(data_path, ofile)
if os.path.isfile(outfile):
downloaded = True
# Check the date to see if this should be rewritten
checkfile = os.path.split(outfile)[-1]
has_file = local_files == checkfile
if np.any(has_file):
if has_file[has_file].index[-1] < vend[iname]:
# This file will be updated again, but only attempt
# to do so if enough time has passed from the
# last time it was downloaded
yesterday = today - pds.DateOffset(days=1)
if has_file[has_file].index[-1] < yesterday:
rewritten = True
else:
# The file does not exist, if it can be downloaded, it
# should be 'rewritten'
rewritten = True
# Attempt to download if the file does not exist or if the
# file has been updated
if rewritten or not downloaded:
try:
sys.stdout.flush()
ftp.retrbinary('RETR ' + fname,
open(saved_fname, 'wb').write)
downloaded = True
logger.info(' '.join(('Downloaded file for ',
dl_date.strftime('%x'))))
except ftplib.error_perm as exception:
# Could not fetch, so cannot rewrite
rewritten = False
# Test for an error
if str(exception.args[0]).split(" ", 1)[0] != '550':
raise RuntimeError(exception)
else:
# file isn't actually there, try the next name
os.remove(saved_fname)
# Save this so we don't try again
# Because there are two possible filenames for
# each time, it's ok if one isn't there. We just
# don't want to keep looking for it.
bad_fname.append(fname)
# If the first file worked, don't try again
if downloaded:
break
if not downloaded:
logger.info(' '.join(('File not available for',
dl_date.strftime('%x'))))
elif rewritten:
with open(saved_fname, 'r') as fprelim:
lines = fprelim.read()
mm_f107.rewrite_daily_file(dl_date.year, outfile, lines)
os.remove(saved_fname)
# Cycle to the next date
dl_date = vend[iname] + pds.DateOffset(days=1)
# Close connection after downloading all dates
ftp.close()
elif tag == 'daily':
logger.info('This routine can only download the latest 30 day file')
# Set the download webpage
furl = 'https://services.swpc.noaa.gov/text/daily-solar-indices.txt'
req = requests.get(furl)
# Save the output
data_file = 'f107_daily_{:s}.txt'.format(today.strftime('%Y-%m-%d'))
outfile = os.path.join(data_path, data_file)
mm_f107.rewrite_daily_file(today.year, outfile, req.text)
elif tag == 'forecast':
logger.info(' '.join(('This routine can only download the current',
'forecast, not archived forecasts')))
# Set the download webpage
furl = ''.join(('https://services.swpc.noaa.gov/text/',
'3-day-solar-geomag-predictions.txt'))
req = requests.get(furl)
# Parse text to get the date the prediction was generated
date_str = req.text.split(':Issued: ')[-1].split(' UTC')[0]
dl_date = dt.datetime.strptime(date_str, '%Y %b %d %H%M')
# Get starting date of the forecasts
raw_data = req.text.split(':Prediction_dates:')[-1]
forecast_date = dt.datetime.strptime(raw_data[3:14], '%Y %b %d')
# Set the times for output data
times = pds.date_range(forecast_date, periods=3, freq='1D')
# String data is the forecast value for the next three days
raw_data = req.text.split('10cm_flux:')[-1]
raw_data = raw_data.split('\n')[1]
val1 = int(raw_data[24:27])
val2 = int(raw_data[38:41])
val3 = int(raw_data[52:])
# Put data into nicer DataFrame
data = pds.DataFrame([val1, val2, val3], index=times, columns=['f107'])
# Write out as a file
data_file = 'f107_forecast_{:s}.txt'.format(
dl_date.strftime('%Y-%m-%d'))
data.to_csv(os.path.join(data_path, data_file), header=True)
elif tag == '45day':
logger.info(' '.join(('This routine can only download the current',
'forecast, not archived forecasts')))
# Set the download webpage
furl = 'https://services.swpc.noaa.gov/text/45-day-ap-forecast.txt'
req = requests.get(furl)
# Parse text to get the date the prediction was generated
date_str = req.text.split(':Issued: ')[-1].split(' UTC')[0]
dl_date = dt.datetime.strptime(date_str, '%Y %b %d %H%M')
# Get to the forecast data
raw_data = req.text.split('45-DAY AP FORECAST')[-1]
# Grab AP part
raw_ap = raw_data.split('45-DAY F10.7 CM FLUX FORECAST')[0]
raw_ap = raw_ap.split('\n')[1:-1]
# Get the F107
raw_f107 = raw_data.split('45-DAY F10.7 CM FLUX FORECAST')[-1]
raw_f107 = raw_f107.split('\n')[1:-4]
# Parse the AP data
ap_times, ap = mm_f107.parse_45day_block(raw_ap)
# Parse the F10.7 data
f107_times, f107 = mm_f107.parse_45day_block(raw_f107)
# Collect into DataFrame
data = pds.DataFrame(f107, index=f107_times, columns=['f107'])
data['ap'] = ap
# Write out as a file
data_file = 'f107_45day_{:s}.txt'.format(dl_date.strftime('%Y-%m-%d'))
data.to_csv(os.path.join(data_path, data_file), header=True)
return
| 40.688693 | 80 | 0.521383 |
db031f4543bacf2c603d4a3ccb452d553dc3e0d6 | 486 | py | Python | user/migrations/0004_auto_20200813_1948.py | VladimirZubavlenko/ikaf42-app | 240e012675e4347370289554f34d9c60c8b6f35d | [
"MIT"
] | null | null | null | user/migrations/0004_auto_20200813_1948.py | VladimirZubavlenko/ikaf42-app | 240e012675e4347370289554f34d9c60c8b6f35d | [
"MIT"
] | null | null | null | user/migrations/0004_auto_20200813_1948.py | VladimirZubavlenko/ikaf42-app | 240e012675e4347370289554f34d9c60c8b6f35d | [
"MIT"
] | null | null | null | # Generated by Django 3.0.5 on 2020-08-13 19:48
from django.db import migrations, models
| 25.578947 | 144 | 0.652263 |
db03fc21b23af129e340ee65486e184e179cf632 | 1,394 | py | Python | vfoot/graphics/__init__.py | filipecn/vfoot | 3059f5bb471b6bdf92a18a7cdb6b33a2c8852046 | [
"MIT"
] | null | null | null | vfoot/graphics/__init__.py | filipecn/vfoot | 3059f5bb471b6bdf92a18a7cdb6b33a2c8852046 | [
"MIT"
] | null | null | null | vfoot/graphics/__init__.py | filipecn/vfoot | 3059f5bb471b6bdf92a18a7cdb6b33a2c8852046 | [
"MIT"
] | null | null | null | import glfw
import OpenGL.GL as gl
import imgui
from imgui.integrations.glfw import GlfwRenderer
| 27.333333 | 67 | 0.677188 |
db04b4c5b6cb46accefdb0e93dbb064e76e6bb44 | 1,472 | py | Python | master/rabbitvcs-master/rabbitvcs-master/rabbitvcs/util/_locale.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 4 | 2018-09-07T15:35:24.000Z | 2019-03-27T09:48:12.000Z | master/rabbitvcs-master/rabbitvcs-master/rabbitvcs/util/_locale.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 371 | 2020-03-04T21:51:56.000Z | 2022-03-31T20:59:11.000Z | master/rabbitvcs-master/rabbitvcs-master/rabbitvcs/util/_locale.py | AlexRogalskiy/DevArtifacts | 931aabb8cbf27656151c54856eb2ea7d1153203a | [
"MIT"
] | 3 | 2019-06-18T19:57:17.000Z | 2020-11-06T03:55:08.000Z | from __future__ import absolute_import
import locale
import os
from rabbitvcs.util.log import Log
import rabbitvcs.util.settings
import rabbitvcs.util.helper
log = Log("rabbitvcs.util.locale")
| 36.8 | 87 | 0.63587 |
db05538cc85061ce7b28bead1b966a843722b5be | 7,378 | py | Python | vectorize_enriched_api.py | mfejzer/tracking_buggy_files | 161095f315a94709ef74ab4bb6696889537aaa6a | [
"MIT"
] | 3 | 2019-08-06T05:29:53.000Z | 2021-05-23T08:23:10.000Z | vectorize_enriched_api.py | mfejzer/tracking_buggy_files | 161095f315a94709ef74ab4bb6696889537aaa6a | [
"MIT"
] | 5 | 2020-04-23T18:29:06.000Z | 2021-12-09T21:21:57.000Z | vectorize_enriched_api.py | mfejzer/tracking_buggy_files | 161095f315a94709ef74ab4bb6696889537aaa6a | [
"MIT"
] | 1 | 2021-05-23T08:23:12.000Z | 2021-05-23T08:23:12.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Usage: %(scriptName) <bug_report_file> <data_prefix>
"""
import json
from timeit import default_timer
import datetime
import numpy as np
import pickle
import sys
from multiprocessing import Pool
from operator import itemgetter
from scipy import sparse
from sklearn.feature_extraction.text import TfidfTransformer
from tqdm import tqdm
from unqlite import UnQLite
from date_utils import convert_commit_date
def load_bug_reports(bug_report_file_path):
"""load bug report file (the one generated from xml)"""
with open(bug_report_file_path) as bug_report_file:
bug_reports = json.load(bug_report_file)
return bug_reports
if __name__ == '__main__':
main()
| 33.234234 | 126 | 0.719301 |
db063dcff6ca568e771df05b7ae7f650c6cd2aea | 4,270 | py | Python | interpreter.py | bendmorris/beaver | 4db3e1690145dee89d30144f3632396313218214 | [
"MIT"
] | 2 | 2018-10-06T08:35:41.000Z | 2019-04-03T21:15:02.000Z | interpreter.py | bendmorris/beaver | 4db3e1690145dee89d30144f3632396313218214 | [
"MIT"
] | null | null | null | interpreter.py | bendmorris/beaver | 4db3e1690145dee89d30144f3632396313218214 | [
"MIT"
] | null | null | null | import argparse
import os
import sys
from lib.graph import Graph
from lib.types import BeaverException, Uri
from lib.command import OutCommand
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from __init__ import __version__
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--version', help='print version and exit', action='version', version=__version__)
arg_parser.add_argument('-t', '--test', help='run unit tests and exit', action='store_true')
arg_parser.add_argument('file', nargs='*', help='file to be interpreted')
arg_parser.add_argument('-i', '--interactive', help='enter interactive mode after interpreting file', action='store_true')
arg_parser.add_argument('-e', '--eval', help='string to be evaluated')
arg_parser.add_argument('-v', '--verbose', help='print each triple statement as evaluated', action='store_true')
arg_parser.add_argument('-d', '--draw', help='output an image of the resulting graph to the given image file; image type is inferred from file extension')
arg_parser.add_argument('-o', '--out', help='serialize the resulting graph to the given output file (using Turtle)', nargs='?', const=True, default=None)
args = arg_parser.parse_args()
#print args.__dict__
if args.test:
import tests
tests.run_tests(verbose=args.verbose)
sys.exit()
if not sys.stdin.isatty():
# read and evaluate piped input
if args.eval is None: args.eval = ''
args.eval = sys.stdin.read() + args.eval
interactive = (not args.file and not args.eval) or (args.interactive and sys.stdin.isatty())
if __name__ == '__main__': run()
| 32.846154 | 154 | 0.544028 |
db0693e026c74e759573c7252d4aff5ef90ae5ad | 242 | py | Python | euler/28.py | DevStarSJ/algorithmExercise | 66b42c54cdd594ff3f229613fd83446f8c1f9153 | [
"MIT"
] | null | null | null | euler/28.py | DevStarSJ/algorithmExercise | 66b42c54cdd594ff3f229613fd83446f8c1f9153 | [
"MIT"
] | null | null | null | euler/28.py | DevStarSJ/algorithmExercise | 66b42c54cdd594ff3f229613fd83446f8c1f9153 | [
"MIT"
] | null | null | null |
print(get_cross_sum(501)) | 18.615385 | 37 | 0.516529 |
db06e9490bbc299985803b6daf8dbca9d83d6fc3 | 1,509 | py | Python | titan/react_view_pkg/router/resources.py | mnieber/gen | 65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9 | [
"MIT"
] | null | null | null | titan/react_view_pkg/router/resources.py | mnieber/gen | 65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9 | [
"MIT"
] | null | null | null | titan/react_view_pkg/router/resources.py | mnieber/gen | 65f8aa4fb671c4f90d5cbcb1a0e10290647a31d9 | [
"MIT"
] | null | null | null | import typing as T
from dataclasses import dataclass, field
from moonleap import Resource
from titan.react_pkg.component import Component
def reduce_router_configs(router_configs, base_route):
result = []
for router_config in router_configs:
child_components = getattr(router_config.component.typ, "child_components", [])
for child_component in child_components:
# The last router config always corresponds to the child component itself.
# Any preceeding router configs supply dependencies
# (e.g. state providers, load effects, etc)
supporting_router_configs = child_component.typ.create_router_configs(
named_component=child_component
)[:-1]
if not supporting_router_configs:
continue
preceeding_router_configs = reduce_router_configs(supporting_router_configs)
result = concat_router_configs(preceeding_router_configs, result)
result.extend(router_configs)
return result
def concat_router_configs(first, second):
first_components = [x.component for x in first]
second_filtered = [x for x in second if x.component not in first_components]
return first + second_filtered
| 32.106383 | 88 | 0.713718 |
db077393470e53a796d0d72580ad3f3064dd2bda | 2,119 | py | Python | lab-taxi/agent.py | JunShern/deep-reinforcement-learning | 4c99d8e3b5c6df0ec7985a33611a16a791eb0041 | [
"MIT"
] | null | null | null | lab-taxi/agent.py | JunShern/deep-reinforcement-learning | 4c99d8e3b5c6df0ec7985a33611a16a791eb0041 | [
"MIT"
] | null | null | null | lab-taxi/agent.py | JunShern/deep-reinforcement-learning | 4c99d8e3b5c6df0ec7985a33611a16a791eb0041 | [
"MIT"
] | null | null | null | import numpy as np
from collections import defaultdict | 33.634921 | 153 | 0.591789 |
db07a7ea8e4f0634af5cfc5dde1a21fb51caf3b5 | 11,271 | py | Python | visicom_reverse_geocoding.py | zimirrr/visicom_reverse_geocoding | 3da913f80e934f8352bcc8abe9d24ba54bbc482a | [
"MIT"
] | null | null | null | visicom_reverse_geocoding.py | zimirrr/visicom_reverse_geocoding | 3da913f80e934f8352bcc8abe9d24ba54bbc482a | [
"MIT"
] | null | null | null | visicom_reverse_geocoding.py | zimirrr/visicom_reverse_geocoding | 3da913f80e934f8352bcc8abe9d24ba54bbc482a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
/***************************************************************************
VisicomReverseGeocoder
A QGIS plugin
plugin for reverse geocoding from visicom api
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2018-09-21
git sha : $Format:%H$
copyright : (C) 2018 by zimirrr
email : zimirrr@mail.ru
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from qgis.gui import *
from qgis.core import *
# Initialize Qt resources from file resources.py
from .resources import *
# Import the code for the dialog
from .settings_dialog import Config
from .utils import pointToWGS84
from .visicom_api_parser import *
import os.path
import requests
| 35.332288 | 126 | 0.58167 |
db08017fe044db65092dd00ed22dea1c4564f406 | 699 | py | Python | test/module_dir/mymodule/__init__.py | honzajavorek/mkdocs_macros_plugin | c97c2e08e3c1cb9023b28a605784e0a7ac45b885 | [
"MIT"
] | null | null | null | test/module_dir/mymodule/__init__.py | honzajavorek/mkdocs_macros_plugin | c97c2e08e3c1cb9023b28a605784e0a7ac45b885 | [
"MIT"
] | null | null | null | test/module_dir/mymodule/__init__.py | honzajavorek/mkdocs_macros_plugin | c97c2e08e3c1cb9023b28a605784e0a7ac45b885 | [
"MIT"
] | null | null | null | import os
def define_env(env):
"""
This is the hook for the functions (new form)
"""
env.variables.cwd = os.getcwd()
# use dot notation for adding
env.variables.baz = env.variables.fix_url('foo')
# Optional: a special function for making relative urls point to root
fix_url = env.variables.fix_url
env.variables.special_docs_dir = env.variables.config['docs_dir'] | 20.558824 | 73 | 0.602289 |
db086691881d363f79126af6b8d208d584242b29 | 114,519 | py | Python | cisco-ios-xe/ydk/models/cisco_ios_xe/MPLS_LDP_STD_MIB.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xe/ydk/models/cisco_ios_xe/MPLS_LDP_STD_MIB.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | cisco-ios-xe/ydk/models/cisco_ios_xe/MPLS_LDP_STD_MIB.py | Maikor/ydk-py | b86c4a7c570ae3b2c5557d098420446df5de4929 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | """ MPLS_LDP_STD_MIB
Copyright (C) The Internet Society (2004). The
initial version of this MIB module was published
in RFC 3815. For full legal notices see the RFC
itself or see\:
http\://www.ietf.org/copyrights/ianamib.html
This MIB contains managed object definitions for the
'Multiprotocol Label Switching, Label Distribution
Protocol, LDP' document.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
| 55.323188 | 1,407 | 0.642164 |
db0ab3da5d70c76acedaa4a8af65bab398892ba2 | 9,104 | py | Python | app/models/user.py | tonyngophd/dronest | f0976c31cbbf6fb032851bd42ac566bb381608f0 | [
"MIT"
] | 13 | 2021-02-03T13:26:59.000Z | 2021-03-24T19:34:19.000Z | app/models/user.py | suasllc/dronest | f0976c31cbbf6fb032851bd42ac566bb381608f0 | [
"MIT"
] | null | null | null | app/models/user.py | suasllc/dronest | f0976c31cbbf6fb032851bd42ac566bb381608f0 | [
"MIT"
] | 1 | 2021-06-07T17:56:58.000Z | 2021-06-07T17:56:58.000Z | from .db import db
from .userfollower import UserFollower
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from sqlalchemy import Table, Column, Integer, ForeignKey, or_
from .directmessage import DirectMessage
from .userequipment import UserEquipment
from .equipment import Equipment
from .message import Message
from .messagereceiver import MessageReceiver
from sqlalchemy.orm import validates
def to_dict_as_generic_profile(self):
'''
compared to "for_self" this does not include:
- messages
and more later
'''
self.get_followers()
self.get_following()
return {
"id": self.id,
"username": self.username,
"name": self.name,
"email": self.email,
"bio": self.bio,
"websiteUrl": self.websiteUrl,
"profilePicUrl": self.profilePicUrl,
"ownPosts": [post.to_dict() for post in self.ownPosts],
"likedPosts": [post.to_dict() for post in self.likedPosts],
"savedPosts": [post.to_dict() for post in self.savedPosts],
"taggedInPosts": [post.to_dict() for post in self.taggedInPosts],
"followers": [user.to_dict() for user in self.followers],
"following": [user.to_dict() for user in self.following],
"likedComments": [comment.to_dict() for comment in self.likedComments],
"taggedInComments": [comment.to_dict() for comment in self.taggedInComments],
"equipmentList": [equipment.to_dict() for equipment in self.equipmentList],
}
'''
mapper(
User, t_users,
properties={
'followers': relation(
User,
secondary=t_follows,
primaryjoin=(t_follows.c.followee_id==t_users.c.id),
secondaryjoin=(t_follows.c.follower_id==t_users.c.id),
),
'followees': relation(
User,
secondary=t_follows,
primaryjoin=(t_follows.c.follower_id==t_users.c.id),
secondaryjoin=(t_follows.c.followee_id==t_users.c.id),
),
},
)
'''
| 35.286822 | 174 | 0.672781 |
db0bae1eb24630016d687ec03ec4ffa465df2055 | 397 | py | Python | pyflu/update/signals.py | flupke/pyflu | 8856759ced5367fc8439a418b3ce6570b82707ce | [
"BSD-3-Clause"
] | 1 | 2017-07-17T06:50:24.000Z | 2017-07-17T06:50:24.000Z | pyflu/update/signals.py | flupke/pyflu | 8856759ced5367fc8439a418b3ce6570b82707ce | [
"BSD-3-Clause"
] | null | null | null | pyflu/update/signals.py | flupke/pyflu | 8856759ced5367fc8439a418b3ce6570b82707ce | [
"BSD-3-Clause"
] | null | null | null | from louie import Signal
| 22.055556 | 79 | 0.697733 |
db0cc1dc2ea1b2e1fa0e57ca089770ba09f4f7f8 | 9,443 | py | Python | sentence_transformers/losses/BatchHardTripletLoss.py | zhangxieyang2/sentence-transformers | 87847b86954f92d200fbb4351b0576f4778d9381 | [
"Apache-2.0"
] | 5 | 2021-08-10T02:31:51.000Z | 2022-02-08T01:12:25.000Z | sentence_transformers/losses/BatchHardTripletLoss.py | zhangxieyang2/sentence-transformers | 87847b86954f92d200fbb4351b0576f4778d9381 | [
"Apache-2.0"
] | 5 | 2021-07-02T04:37:04.000Z | 2021-07-21T00:02:58.000Z | sentence_transformers/losses/BatchHardTripletLoss.py | zhangxieyang2/sentence-transformers | 87847b86954f92d200fbb4351b0576f4778d9381 | [
"Apache-2.0"
] | 5 | 2021-07-04T06:02:02.000Z | 2021-07-21T08:32:10.000Z | import torch
from torch import nn, Tensor
from typing import Union, Tuple, List, Iterable, Dict
from sentence_transformers import util
from sentence_transformers.SentenceTransformer import SentenceTransformer
| 46.517241 | 162 | 0.684528 |
db0cd377f76bee16bf9abd7de52027837704b690 | 2,505 | py | Python | wedding/card/route.py | ackneal/wedday | b57b524e3aa237a2568bda4fadb2d5709773c507 | [
"MIT"
] | null | null | null | wedding/card/route.py | ackneal/wedday | b57b524e3aa237a2568bda4fadb2d5709773c507 | [
"MIT"
] | null | null | null | wedding/card/route.py | ackneal/wedday | b57b524e3aa237a2568bda4fadb2d5709773c507 | [
"MIT"
] | null | null | null | from flask import Flask, Blueprint, request, make_response, jsonify
from sqlalchemy.sql.expression import func
from google.cloud import storage
from .card import Cards
from ..functions import valid_param, upload_file
from .. import db
bp = Blueprint('route', __name__, url_prefix = '/api')
# , limit
| 29.127907 | 91 | 0.602395 |
db0de61b39c2d473b879ae1a407b8e263bd53ec2 | 6,804 | py | Python | mudi/utils.py | getzlab/mudi | eda170119708e59920c23a03834af915ecca24ce | [
"MIT"
] | 1 | 2021-11-04T00:08:00.000Z | 2021-11-04T00:08:00.000Z | mudi/utils.py | getzlab/mudi | eda170119708e59920c23a03834af915ecca24ce | [
"MIT"
] | null | null | null | mudi/utils.py | getzlab/mudi | eda170119708e59920c23a03834af915ecca24ce | [
"MIT"
] | null | null | null | import numpy as np
import h5py
import scipy
import gc
import pandas as pd
import os
import time
import pkg_resources
import scanpy as sc
import scanpy.external as sce
import sys
import scrublet as scr
# ---------------------------------
# Scanpy Helpers
# ---------------------------------
def scanpy_adata_loader(path, genome='GRCh38', verbose=True):
"""
Loader function.
------------------
Can handle lists of file/dir paths, a file (.h5) or a directory format.
Use this to load/aggregate immediately into a scanpy object.
"""
if isinstance(path, list):
if verbose:
print("Combining {} inputs.".format(len(path)))
tmp = [scanpy_adata_loader(f, genome=genome) for f in path]
return tmp[0].concatenate(tmp[1:])
if os.path.isfile(path):
ad = sc.read_10x_h5(path, genome=genome)
ad.var_names_make_unique()
return ad
elif os.path.isdir(path):
ad = sc.read_10x_mtx(path)
ad.var_names_make_unique()
return ad
else:
raise FileError("Provide proper path.")
def get_percent_expr(adata, groupby):
"""
Get percent expressed & mean expression.
------------------------------
Requires:
* adata.layers['counts'] -> counting percent of cells with gene expressed
* adata.raw.X -> for computing mean expression (log1p)
"""
from tqdm import tqdm
groups = list(adata.obs[groupby].cat.categories)
res_in = pd.DataFrame(columns=adata.var_names, index=groups)
res_out = pd.DataFrame(columns=adata.var_names, index=groups)
res_mean_in = pd.DataFrame(columns=adata.var_names, index=groups)
res_mean_out = pd.DataFrame(columns=adata.var_names, index=groups)
for group in tqdm(groups, desc="Computing metrics per group"):
res_in.loc[group] = (adata[adata.obs[groupby].isin([group]),:].layers['counts'] > 0).mean(0)
res_out.loc[group] = (adata[~adata.obs[groupby].isin([group]),:].layers['counts'] > 0).mean(0)
res_mean_in.loc[group] = adata[adata.obs[groupby].isin([group]),:].raw.X.mean(0)
res_mean_out.loc[group] = adata[~adata.obs[groupby].isin([group]),:].raw.X.mean(0)
res_in = res_in.T
res_out = res_out.T
res_mean_in = res_mean_in.T
res_mean_out = res_mean_out.T
res_in = res_in.reset_index().melt(id_vars=['index']).rename(columns={'index':'names','variable':"group", 'value':'percent_in'})
res_out = res_out.reset_index().melt(id_vars=['index']).rename(columns={'index':'names','variable':"group", 'value':'percent_out'})
res_mean_in = res_mean_in.reset_index().melt(id_vars=['index']).rename(columns={'index':'names','variable':"group", 'value':'mean_expr_in'})
res_mean_out = res_mean_out.reset_index().melt(id_vars=['index']).rename(columns={'index':'names','variable':"group", 'value':'mean_expr_out'})
return pd.merge(pd.merge(res_in, res_out), pd.merge(res_mean_in, res_mean_out))
def aggr_markers(adata, uns='rank_genes_groups', expr_metrics=True):
"""
Aggregate markers.
------------------
Returns an easy to view marker list dataframe.
Assumes 'rank_genes_groups' has already been called to find group markers
in AnnData Object.
* expr_metrics -> compute percent of cells expressed & mean expression for in/out groups.
"""
assert adata.uns[uns], 'Compute differentially expressed genes first.'
aggr_df = sc.get.rank_genes_groups_df(adata, None)
if expr_metrics:
aggr_percent_expr = get_percent_expr(adata, adata.uns[uns]['params']['groupby'])
return pd.merge(aggr_df, aggr_percent_expr)
else:
return aggr_df
def get_de_genes_to_plot(markers_df, lfc_thresh=1, padj_thresh=0.1, n_to_plot=5):
"""
Top DiffExp Genes.
Return as dict for easy plotting with sc.pl.dotplot.
"""
markers_df = markers_df[
(markers_df['logfoldchanges']>=lfc_thresh) &
(markers_df['pvals_adj']<=padj_thresh)
].groupby("group").head(n_to_plot)
return markers_df.groupby("group").agg(list)['names'].to_dict()
def get_uns(adata, tag):
"""
Retrieve unstructured data stored in AnnData.
------------------------
Inputs:
- adata: AnnData Object
- tag: name of key in adata.uns
Outputs:
- pd.DataFrame: formatted information in adata.uns
"""
assert tag in adata.uns, "{} not found in adata.uns".format(tag)
try:
return pd.DataFrame(adata.uns[tag]['values'], index=adata.uns[tag]['rows'], columns=adata.uns[tag]['cols'])
except:
raise ValueError("Unable to return structured dataframe from data.uns[{}]".format(tag))
def get_a_by_b(adata, a, b, norm=False):
"""
Get A x B.
----------------
Number of each .obs b per .obs a
returns pd.Dataframe
"""
hm = adata.obs.groupby([a,b]).size().reset_index().set_index(a).pivot(columns=b)
if norm:
hm = hm.div(hm.sum(1), 0)
hm.columns = hm.columns.droplevel()
hm.columns.name = None
return hm
# ---------------------------------
# Utilities
# ---------------------------------
def score_cc_genes(adata, cc_genes_file=pkg_resources.resource_filename('mudi', './ref/cell_cycle_genes/Macosko_cell_cycle_genes.txt')):
"""
Score Cell-Cycle Genes
------------------------------------
How to run:
score_cc_genes(adata)
Loads cell cycle genes list (ex. Macosko et al 2015) and runs cycle-scoring
on input anndata. Does everything in place. Stores the following in .obs:
- S_score
- G2M_score
- phase
"""
cc_genes = pd.read_table(cc_genes_file, delimiter='\t')
s_genes = cc_genes['S'].dropna()
g2m_genes = cc_genes['G2.M'].dropna()
s_genes_i = adata.var_names[np.in1d(adata.var_names, s_genes)]
g2m_genes_i = adata.var_names[np.in1d(adata.var_names, g2m_genes)]
sc.tl.score_genes_cell_cycle(adata, s_genes_i, g2m_genes_i)
def score_doublets(adata, key='batch', n_prin_comps=20, verbose=False):
"""
Scrubber: wrapper for Scrublet.
------------------------------------
How to run:
score_doublets(adata)
Adds the following to anndata object:
- adata.obs['scrublet_score'] --> float (0 - 1.0)
- adata.obs['doublet'] --> bool
"""
doublet_scores, predicted_doublets = list(),list()
for batch in adata.obs[key].drop_duplicates().values:
scrub = scr.Scrublet(adata[adata.obs[key]==batch].X)
_doublet_scores, _predicted_doublets = scrub.scrub_doublets(n_prin_comps=n_prin_comps, verbose=verbose)
doublet_scores.append(_doublet_scores)
predicted_doublets.append(_predicted_doublets)
adata.obs['scrublet_score'] = np.concatenate(doublet_scores)
adata.obs['doublet'] = np.concatenate(predicted_doublets)
| 36.191489 | 147 | 0.640212 |
db0e44fa6d9ec7326e7caba29ef74b40e65149d4 | 1,518 | py | Python | src/quacks/mypy.py | ariebovenberg/quacks | 839d307b24f3f37d9a5318c16acb631b9a1153f0 | [
"MIT"
] | 11 | 2021-12-12T20:51:15.000Z | 2022-02-02T12:08:32.000Z | src/quacks/mypy.py | ariebovenberg/quacks | 839d307b24f3f37d9a5318c16acb631b9a1153f0 | [
"MIT"
] | 8 | 2021-12-14T12:53:51.000Z | 2022-03-15T04:29:44.000Z | src/quacks/mypy.py | ariebovenberg/quacks | 839d307b24f3f37d9a5318c16acb631b9a1153f0 | [
"MIT"
] | 1 | 2021-12-15T16:50:34.000Z | 2021-12-15T16:50:34.000Z | from typing import Callable, Optional, Type
from mypy.nodes import AssignmentStmt, NameExpr, Statement, TempNode, Var
from mypy.plugin import ClassDefContext, Plugin
READONLY_DECORATOR_NAME = "quacks.readonly"
# this logic is mostly derived from the dataclasses plugin
def plugin(version: str) -> Type[Plugin]:
"""Plugin's public API and entrypoint."""
return _QuacksPlugin
| 26.631579 | 77 | 0.667984 |
db0eabb87d8f110b34f799008d45115ae3494a8a | 470 | py | Python | tests/test_toolbar.py | WilliamMayor/django-mail-panel | 2c41f808a645d5d7bad90510f44e53d29981cf22 | [
"Apache-2.0"
] | null | null | null | tests/test_toolbar.py | WilliamMayor/django-mail-panel | 2c41f808a645d5d7bad90510f44e53d29981cf22 | [
"Apache-2.0"
] | null | null | null | tests/test_toolbar.py | WilliamMayor/django-mail-panel | 2c41f808a645d5d7bad90510f44e53d29981cf22 | [
"Apache-2.0"
] | null | null | null | from .context import *
import unittest
from mail_panel.panels import MailToolbarPanel
if __name__ == "__main__":
unittest.TextTestRunner().run(suite())
| 21.363636 | 51 | 0.668085 |
db0fa33383a316fc52554465b3c7c6c0aa5f9ac3 | 8,130 | py | Python | tests/project/operations/operational_types/test_common_functions.py | anamileva/gridpath | e55eacb88ca5e6c034a90b18819e17cbd6f43854 | [
"Apache-2.0"
] | 44 | 2020-10-27T19:05:44.000Z | 2022-03-22T17:17:37.000Z | tests/project/operations/operational_types/test_common_functions.py | anamileva/gridpath | e55eacb88ca5e6c034a90b18819e17cbd6f43854 | [
"Apache-2.0"
] | 67 | 2020-10-08T22:36:53.000Z | 2022-03-22T22:58:33.000Z | tests/project/operations/operational_types/test_common_functions.py | anamileva/gridpath | e55eacb88ca5e6c034a90b18819e17cbd6f43854 | [
"Apache-2.0"
] | 21 | 2020-10-08T23:23:48.000Z | 2022-03-28T01:21:21.000Z | # Copyright 2016-2020 Blue Marble Analytics LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from builtins import str
from importlib import import_module
import numpy as np
import os.path
import pandas as pd
import sys
import unittest
from tests.common_functions import add_components_and_load_data
from gridpath.project.operations.operational_types.common_functions import \
determine_relevant_timepoints
TEST_DATA_DIRECTORY = \
os.path.join(os.path.dirname(__file__), "..", "..", "..", "test_data")
# Import prerequisite modules
PREREQUISITE_MODULE_NAMES = [
"temporal.operations.timepoints", "temporal.operations.horizons",
"geography.load_zones",
"project.__init__"
]
IMPORTED_PREREQ_MODULES = list()
for mdl in PREREQUISITE_MODULE_NAMES:
try:
imported_module = import_module("." + str(mdl), package='gridpath')
IMPORTED_PREREQ_MODULES.append(imported_module)
except ImportError:
print("ERROR! Module " + str(mdl) + " not found.")
sys.exit(1)
if __name__ == "__main__":
unittest.main()
| 44.42623 | 79 | 0.605904 |
db0fa4a708c3b8da99f0eb3651ee65d3e1405fa0 | 338 | py | Python | top_links.py | judge2020/crossover-viz | 61fef8750f2b64a2e71b9737a3c992f99c47c300 | [
"0BSD"
] | null | null | null | top_links.py | judge2020/crossover-viz | 61fef8750f2b64a2e71b9737a3c992f99c47c300 | [
"0BSD"
] | null | null | null | top_links.py | judge2020/crossover-viz | 61fef8750f2b64a2e71b9737a3c992f99c47c300 | [
"0BSD"
] | null | null | null | from main import extract_data
if __name__ == '__main__':
top = {}
out = extract_data('CrossoverWiki.xml')
for name in out:
for link in name['links']:
w = link['with']
top[w] = top[w] + 1 if w in top else 1
top = dict(reversed(sorted(top.items(), key=lambda item: item[1])))
print(top)
| 28.166667 | 71 | 0.573964 |
db0fc2a14bd242c50cea5efa838e162798fc3772 | 316 | py | Python | instance/settings.py | isaacjohnwesley/digfont | 0f0a088151e52e972eec04dbc0b8c7fd6a30a52d | [
"MIT"
] | 2 | 2017-01-27T03:22:21.000Z | 2018-10-30T15:26:33.000Z | instance/settings.py | isaacjohnwesley/digfont | 0f0a088151e52e972eec04dbc0b8c7fd6a30a52d | [
"MIT"
] | null | null | null | instance/settings.py | isaacjohnwesley/digfont | 0f0a088151e52e972eec04dbc0b8c7fd6a30a52d | [
"MIT"
] | null | null | null | """
Flask application settings.
"""
import os
DEBUG = True
# Output un-merged files in debug mode.
#ASSETS_DEBUG = DEBUG
SECRET_KEY = os.environ.get('SECRET_KEY', None)
MY_VAR = os.environ.get('MY_VAR', None)
#: Mongodb settings
MONGODB_SETTINGS = {'DB' : 'digfont'}
#: CSRF key
SECRET_KEY = "dig.font.s3cr3t"
| 15.8 | 47 | 0.702532 |
db116d889b8b1d94133fabaa9ee920a870375f4b | 839 | py | Python | pangram.py | ZorbaTheStrange/pangram | f9fda95f119d328224f21f19690122e36be34482 | [
"MIT"
] | null | null | null | pangram.py | ZorbaTheStrange/pangram | f9fda95f119d328224f21f19690122e36be34482 | [
"MIT"
] | null | null | null | pangram.py | ZorbaTheStrange/pangram | f9fda95f119d328224f21f19690122e36be34482 | [
"MIT"
] | null | null | null | #! /usr/bin/python3
'''
panogram.py - this program recongizes pangrams.
by zorba
'''
import sys
def pangram_check(sentence_or_word):
'''
checks the user input to see if it is a pangram.
'''
letters = set('abcdefghijklmnopqrstuvwxyz')
if sentence_or_word.lower() == 'done':
z
for letter in sentence_or_word.lower():
if letter in letters:
letters.remove(letter)
if len(letters) == 0:
print('\nThe sentence or word is a panogram!')
else:
print('\nThis sentence or word is not a panogram.')
def main():
'''
main
'''
sentence_or_word = input('\nPlease enter a sentence or a word to check to see if it is a pangram: \nIf Done, Please type Done')
pangram_check(sentence_or_word)
if __name__ == '__main__':
sys.exit(main())
| 19.511628 | 131 | 0.623361 |
db13d0f32b95cfef64253a43f004918a6c18619d | 232 | py | Python | Chapter-4 Sequence/Dictionary.py | jaiswalIT02/pythonprograms | bc94e52121202b04c3e9112d9786f93ed6707f7a | [
"MIT"
] | null | null | null | Chapter-4 Sequence/Dictionary.py | jaiswalIT02/pythonprograms | bc94e52121202b04c3e9112d9786f93ed6707f7a | [
"MIT"
] | null | null | null | Chapter-4 Sequence/Dictionary.py | jaiswalIT02/pythonprograms | bc94e52121202b04c3e9112d9786f93ed6707f7a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Oct 10 15:31:57 2020
@author: Tarun Jaiswal
"""
dictone = {
"bookname": "Recursion Sutras",
"subject": "Recursion",
"author": "Champak Roy"
}
dicttwo = dict(dictone)
print(dicttwo) | 15.466667 | 35 | 0.633621 |
db15276b717208ef752639b4aaf944577ef66238 | 1,032 | py | Python | mportal/wsgi_start.py | auyeongwy/mportal | e406baea802093569c90c7206649c5afd9431dab | [
"Apache-2.0"
] | null | null | null | mportal/wsgi_start.py | auyeongwy/mportal | e406baea802093569c90c7206649c5afd9431dab | [
"Apache-2.0"
] | null | null | null | mportal/wsgi_start.py | auyeongwy/mportal | e406baea802093569c90c7206649c5afd9431dab | [
"Apache-2.0"
] | null | null | null | # Copyright 2014 Au Yeong Wing Yau
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" All start-up processes to be called when the WSGI process starts.
"""
from mportal_tools import mportal_log
from mportal_tools import mportal_db
import mportal_urls, template_mgr
mportal_log.init_log() # Initializes logging file handler.
mportal_db.init_db() # Initializes database connections.
mportal_urls.init_urls() # Initializes URL list.
template_mgr.init_templates() # Initializes HTML templates.
| 34.4 | 74 | 0.774225 |
db16a58a234af950b25d6e13e770b9afd148413c | 1,252 | py | Python | lecture_04/312_plan_motion_ros_artist.py | farzanehesk/COMPAS-II-FS2022 | 857eb40000f0532d0c04689331eadefd38dce6b7 | [
"MIT"
] | 11 | 2022-01-24T15:07:15.000Z | 2022-03-29T12:58:05.000Z | lecture_04/312_plan_motion_ros_artist.py | farzanehesk/COMPAS-II-FS2022 | 857eb40000f0532d0c04689331eadefd38dce6b7 | [
"MIT"
] | 4 | 2022-03-16T06:06:45.000Z | 2022-03-29T22:59:11.000Z | lecture_04/312_plan_motion_ros_artist.py | farzanehesk/COMPAS-II-FS2022 | 857eb40000f0532d0c04689331eadefd38dce6b7 | [
"MIT"
] | 20 | 2022-03-02T10:36:41.000Z | 2022-03-09T00:12:33.000Z | import math
import time
from compas_fab.backends import RosClient
from compas.artists import Artist
from compas.geometry import Frame
with RosClient("localhost") as client:
robot = client.load_robot(load_geometry=True)
group = robot.main_group_name
frame = Frame((0.4, 0.3, 0.05), (-1, 0, 0), (0, 1, 0))
tolerance_position = 0.001
tolerance_axes = [math.radians(1)] * 3
start_configuration = robot.zero_configuration()
start_configuration.joint_values = (-0.106, 5.351, 2.231, -2.869, 4.712, 1.465)
# create goal constraints from frame
goal_constraints = robot.constraints_from_frame(frame, tolerance_position, tolerance_axes, group)
trajectory = robot.plan_motion(goal_constraints, start_configuration, group, options=dict(planner_id="RRT"))
print("Computed kinematic path with %d configurations." % len(trajectory.points))
print("Executing this path at full speed would take approx. %.3f seconds." % trajectory.time_from_start)
artist = Artist(robot.model)
for tp in trajectory.points:
config = robot.zero_configuration()
config.joint_values = tp.joint_values
artist.update(config)
artist.draw_visual()
artist.redraw()
time.sleep(0.02)
| 33.837838 | 112 | 0.713259 |
db16e37393c0ecb2b013bb3800feb96ec755b22d | 1,306 | py | Python | awxkit/test/cli/test_client.py | vrevelas/awx | 858f43fd2aeccacd3172b1efa44fb37c7a48e92e | [
"Apache-2.0"
] | null | null | null | awxkit/test/cli/test_client.py | vrevelas/awx | 858f43fd2aeccacd3172b1efa44fb37c7a48e92e | [
"Apache-2.0"
] | null | null | null | awxkit/test/cli/test_client.py | vrevelas/awx | 858f43fd2aeccacd3172b1efa44fb37c7a48e92e | [
"Apache-2.0"
] | null | null | null | from io import StringIO
import pytest
from requests.exceptions import ConnectionError
from awxkit.cli import run, CLI
def test_connection_error(capfd):
cli = CLI()
cli.parse_args(['awx'])
with pytest.raises(ConnectionError):
cli.connect()
| 21.409836 | 56 | 0.608729 |
db18a54ed6a35015f51619ef8bd59e64ab56a6ea | 10,797 | py | Python | tests/python/pants_test/tasks/test_what_changed.py | areitz/pants | 9bfb3feb0272c05f36e190c9147091b97ee1950d | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/tasks/test_what_changed.py | areitz/pants | 9bfb3feb0272c05f36e190c9147091b97ee1950d | [
"Apache-2.0"
] | null | null | null | tests/python/pants_test/tasks/test_what_changed.py | areitz/pants | 9bfb3feb0272c05f36e190c9147091b97ee1950d | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from textwrap import dedent
from pants.backend.codegen.targets.java_protobuf_library import JavaProtobufLibrary
from pants.backend.codegen.targets.java_thrift_library import JavaThriftLibrary
from pants.backend.codegen.targets.python_thrift_library import PythonThriftLibrary
from pants.backend.core.from_target import FromTarget
from pants.backend.core.targets.resources import Resources
from pants.backend.core.tasks.what_changed import WhatChanged
from pants.backend.core.wrapped_globs import RGlobs
from pants.backend.jvm.targets.jar_dependency import JarDependency
from pants.backend.jvm.targets.jar_library import JarLibrary
from pants.backend.jvm.targets.java_library import JavaLibrary
from pants.backend.jvm.targets.unpacked_jars import UnpackedJars
from pants.backend.python.targets.python_library import PythonLibrary
from pants.base.build_file_aliases import BuildFileAliases
from pants.base.source_root import SourceRoot
from pants.goal.workspace import Workspace
from pants_test.tasks.task_test_base import ConsoleTaskTestBase
class WhatChangedTestBasic(BaseWhatChangedTest):
class WhatChangedTest(BaseWhatChangedTest):
| 29.662088 | 100 | 0.643605 |
db1c1956b75c3a0483a601da0add4f5327ce2ad0 | 364 | py | Python | utils/image_utils.py | novicasarenac/car-racing-rl | 5bb3b2c47fb6ceda3e8f2c149485652da5a079ba | [
"MIT"
] | 10 | 2019-08-08T03:17:39.000Z | 2021-12-15T08:43:29.000Z | utils/image_utils.py | novicasarenac/car-racing-rl | 5bb3b2c47fb6ceda3e8f2c149485652da5a079ba | [
"MIT"
] | 7 | 2019-11-29T04:00:22.000Z | 2022-03-11T23:38:20.000Z | utils/image_utils.py | novicasarenac/car-racing-rl | 5bb3b2c47fb6ceda3e8f2c149485652da5a079ba | [
"MIT"
] | 4 | 2019-11-28T10:14:48.000Z | 2020-04-08T08:10:37.000Z | import PIL
import numpy as np
| 17.333333 | 55 | 0.653846 |
db1c1d0e4cd2adbba4dafd1f97c64d82fddfdf36 | 102 | py | Python | sharing_groups/apps.py | sthagen/misp-hub | 5b528b40796a74dc7e8367d75cb3c84920b87bfb | [
"BSD-3-Clause"
] | 2 | 2020-10-08T18:35:04.000Z | 2020-10-08T18:35:08.000Z | sharing_groups/apps.py | sthagen/misp-hub | 5b528b40796a74dc7e8367d75cb3c84920b87bfb | [
"BSD-3-Clause"
] | null | null | null | sharing_groups/apps.py | sthagen/misp-hub | 5b528b40796a74dc7e8367d75cb3c84920b87bfb | [
"BSD-3-Clause"
] | 1 | 2020-10-08T18:35:17.000Z | 2020-10-08T18:35:17.000Z | from django.apps import AppConfig
| 17 | 37 | 0.784314 |
db1fd3d38056cafb0f7ff39c5a005804f923571f | 5,310 | py | Python | GoogleCloud/backend.py | ryanjsfx2424/HowToNFTs | f4cff7ad676d272815bd936eb142556f92540a32 | [
"MIT"
] | null | null | null | GoogleCloud/backend.py | ryanjsfx2424/HowToNFTs | f4cff7ad676d272815bd936eb142556f92540a32 | [
"MIT"
] | null | null | null | GoogleCloud/backend.py | ryanjsfx2424/HowToNFTs | f4cff7ad676d272815bd936eb142556f92540a32 | [
"MIT"
] | null | null | null | ## backend.py
"""
The purpose of this script is to continuously monitor the blockchain to
1) determine if a holder aquires or loses an NFT:
2) if they do, generate a new image/movie for the tokens they hold,
3) upload the new image/movie to the hosting service
4) update the metadata file
Repeat :)
(The above ordering matters!)
"""
## use python3!!!
import os
import io
import json
from web3 import Web3
## PARAMETERS
DEPLOYER_ADDRESS = "0x01656d41e041b50fc7c1eb270f7d891021937436"
INFURA_URL = "https://rinkeby.infura.io/v3/37de3193ccf345fe810932c3d0f103d8"
EXT_IMG = ".mp4"
EXT_METADATA = ".json"
ADDRESS = "0xb552E0dDd94EA72DBc089619115c81529cd8CA70" # address for deployed smart contract
## web3 stuff
w3 = Web3(Web3.HTTPProvider(INFURA_URL))
with open("../contract/abi_v020.json", "r") as fid:
rl = "".join(fid.readlines())
abi = json.loads(rl)
# end with open
## goal is to update token URI based on how many are held
## by that owner (but deployer doesn't count!)
contract = w3.eth.contract(address=ADDRESS, abi=abi)
totalSupply = contract.functions.totalSupply().call()
print("total supply: ", totalSupply)
for ii in range(totalSupply):
token = contract.functions.tokenByIndex(ii).call()
owner = contract.functions.ownerOf(token).call()
tokenList = contract.functions.walletOfOwner(owner).call()
## string comparison fails for some mysterious reason
if int(owner,16) == int(DEPLOYER_ADDRESS,16):
tokenList = [ii+1]
# end if
print("token: ", token)
print("owner: ", owner)
print("tokenList: ", tokenList)
newTokenName = str(token)
for jj in range(len(tokenList)):
if tokenList[jj] != token:
newTokenName += "_" + str(tokenList[jj])
# end if
# end for jj
print("newTokenName: ", newTokenName)
## first, check if metadata on hosting service has newTokenName.
## if so, we're good! If not, update it!
old_foos = []
metadata_correct = False
os.system("gsutil ls gs://how-to-nfts-metadata/foo" + str(token) + ".txt"
+ " > foo_file0.txt")
os.system("gsutil ls gs://how-to-nfts-metadata/foo" + str(token) + "_*.txt"
+ " > foo_file1.txt")
for jj in range(2):
with open("foo_file" + str(jj) + ".txt", "r") as fid:
for line in fid:
old_foos.append(line)
if "foo" + newTokenName + ".txt" in line:
metadata_correct = True
# end if
# end for
# end with
os.system("rm foo_file" + str(jj) + ".txt")
# end for jj
print("old_foos: ", old_foos)
if metadata_correct:
print("metadata correct (supposedly) so skipping")
continue
# end if
if len(old_foos) > 1:
print("error! only expected one old foo file.")
raise
# end if
old_foo = old_foos[0][:-1] # strip trailing newline character
old_foo = old_foo.split("metadata/")[1]
print("old_foo: ", old_foo)
## evidently metadata is not correct...
## first, we generate a new movie (if needed) and rsync with
## the GCP bucket.
## then, we'll update the metadata file, remove the old foo
## file and touch a new one
## then we'll rsync the metadata folder with the bucket.
target = "../nftmp4s/HowToKarate" + str(token) + ".mp4"
destination = "../nftmp4s/HowToKarate" + newTokenName + ".mp4"
if not os.path.exists(destination):
os.system("cp " + target + " " + destination)
for jj in range(len(tokenList)):
if tokenList[jj] != token:
print("destination: ", destination)
print("tokenList[jj]: ", tokenList[jj])
os.system('ffmpeg -y -i ' + destination + ' -i nftmp4s/HowToKarate' + str(tokenList[jj]) + '.mp4' + \
' -filter_complex "[0:v] [1:v]' + \
' concat=n=2:v=1 [v]"' + \
' -map "[v]" ' + "concat.mp4")
os.system("mv concat.mp4 " + destination)
# end if
# end for jj
## note, can rsync in parallel via rsync -m...
os.system("gsutil rsync ../nftmp4s/ gs://how-to-nfts-data/")
# end if
## next, we'll update the metadata file, remove the old foo
## file and touch a new one
## then we'll rsync the metadata folder with the bucket.
os.system("cp ../metadata/" + str(token) + ".json temp.json")
with open("../metadata/" + str(token) + ".json", "w") as fid_write:
with open("temp.json", "r") as fid_read:
for line in fid_read:
if '"image":' in line:
line = line.split("HowToKarate")[0] + "HowToKarate" + \
str(newTokenName) + '.mp4",\n'
# end i
fid_write.write(line)
# end for line
# end with open write
# end with open read
os.system("rm temp.json")
os.system("touch ../metadata/foo" + str(newTokenName) + ".txt")
os.system("rm ../metadata/" + old_foo)
## last, we need to update the _metadata file and then rsync.
with open("../metadata/_metadata.json", "w") as fid_write:
fid_write.write("{\n")
for jj in range(1,25):
with open("../metadata/" + str(jj) + ".json", "r") as fid_read:
for line in fid_read:
if "}" in line and len(line) == 2 and jj != 24:
line = "},\n"
# end if
fid_write.write(line)
# end for
# end with open
fid_write.write("}")
# end with open
os.system("gsutil rsync -d ../metadata/ gs://how-to-nfts-metadata/")
# end for ii
## end test.py
| 32.378049 | 109 | 0.628625 |
db20e9e55635779f1f3c32e48206263757ae91d0 | 10,875 | py | Python | dependencies/pyffi/formats/tga/__init__.py | korri123/fnv-blender-niftools-addon | ce8733e011c7d74c79be265832e1b06e85faf5ee | [
"BSD-3-Clause"
] | 4 | 2021-09-27T09:58:44.000Z | 2022-02-05T16:12:28.000Z | io_scene_niftools_updater/backup/dependencies/pyffi/formats/tga/__init__.py | korri123/fnv-blender-niftools-addon | ce8733e011c7d74c79be265832e1b06e85faf5ee | [
"BSD-3-Clause"
] | 5 | 2019-11-10T16:20:09.000Z | 2019-12-02T14:23:58.000Z | .venv/Lib/site-packages/pyffi/formats/tga/__init__.py | ndaley7/BodySlide-Group-Generator | 3ed7b78c5f5ccec103b6bf06bc24398cfb6ad014 | [
"BSD-3-Clause"
] | null | null | null | """
:mod:`pyffi.formats.tga` --- Targa (.tga)
=========================================
Implementation
--------------
.. autoclass:: TgaFormat
:show-inheritance:
:members:
Regression tests
----------------
Read a TGA file
^^^^^^^^^^^^^^^
>>> # check and read tga file
>>> import os
>>> from os.path import dirname
>>> dirpath = __file__
>>> for i in range(4): #recurse up to root repo dir
... dirpath = dirname(dirpath)
>>> repo_root = dirpath
>>> format_root = os.path.join(repo_root, 'tests', 'formats', 'tga')
>>> file = os.path.join(format_root, 'test.tga').replace("\\\\", "/")
>>> stream = open(file, 'rb')
>>> data = TgaFormat.Data()
>>> data.inspect(stream)
>>> data.read(stream)
>>> stream.close()
>>> data.header.width
60
>>> data.header.height
20
Parse all TGA files in a directory tree
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>>> for stream, data in TgaFormat.walkData(format_root):
... try:
... # the replace call makes the doctest also pass on windows
... os_path = stream.name
... split = (os_path.split(os.sep))[-4:]
... rejoin = os.path.join(*split).replace("\\\\", "/")
... print("reading %s" % rejoin)
... except Exception:
... print(
... "Warning: read failed due corrupt file,"
... " corrupt format description, or bug.") # doctest: +REPORT_NDIFF
reading tests/formats/tga/test.tga
reading tests/formats/tga/test_footer.tga
Create a TGA file from scratch and write to file
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
>>> data = TgaFormat.Data()
>>> from tempfile import TemporaryFile
>>> stream = TemporaryFile()
>>> data.write(stream)
>>> stream.close()
"""
# ***** BEGIN LICENSE BLOCK *****
#
# Copyright (c) 2007-2012, Python File Format Interface
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Python File Format Interface
# project nor the names of its contributors may be used to endorse
# or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# ***** END LICENSE BLOCK *****
import struct, os, re
import pyffi.object_models.xml
import pyffi.object_models.common
import pyffi.object_models.xml.basic
import pyffi.object_models.xml.struct_
import pyffi.object_models
import pyffi.utils.graph
from pyffi.utils.graph import EdgeFilter
if __name__ == '__main__':
import doctest
doctest.testmod()
| 34.090909 | 80 | 0.57269 |
db2348f24a291f4c0fb84c5876a92a0022f59eed | 355 | py | Python | python/push.py | swallowstalker/postopush | 6ec7e791aff1e3d868711d62e6c702a231bc1d65 | [
"MIT"
] | 1 | 2020-02-11T03:41:49.000Z | 2020-02-11T03:41:49.000Z | python/push.py | swallowstalker/postopush | 6ec7e791aff1e3d868711d62e6c702a231bc1d65 | [
"MIT"
] | null | null | null | python/push.py | swallowstalker/postopush | 6ec7e791aff1e3d868711d62e6c702a231bc1d65 | [
"MIT"
] | null | null | null | import telegram
import os
if __name__ == "__main__":
main() | 23.666667 | 87 | 0.687324 |
db23585c3e9e1de8759f993492930d5a53b54101 | 4,309 | py | Python | advent-of-code-2018/day 13/main.py | gikf/advent-of-code | 923b026ce87121b73093554734746c2ecb17c5e2 | [
"MIT"
] | null | null | null | advent-of-code-2018/day 13/main.py | gikf/advent-of-code | 923b026ce87121b73093554734746c2ecb17c5e2 | [
"MIT"
] | null | null | null | advent-of-code-2018/day 13/main.py | gikf/advent-of-code | 923b026ce87121b73093554734746c2ecb17c5e2 | [
"MIT"
] | null | null | null | """Advent of Code 2018 Day 13."""
from copy import deepcopy
CARTS = '<>^v'
INTERSECTION = '+'
CURVES = '\\/'
cart_to_direction = {
'<': 180,
'^': 90,
'>': 0,
'v': 270,
}
direction_to_move = {
0: (0, 1),
90: (-1, 0),
180: (0, -1),
270: (1, 0),
}
direction_to_cart = {
0: '>',
90: '^',
180: '<',
270: 'v',
}
turns = {
0: 90,
1: 0,
2: -90,
}
next_direction = {
0: {
'\\': 270,
'/': 90,
},
90: {
'\\': 180,
'/': 0,
},
180: {
'\\': 90,
'/': 270,
},
270: {
'\\': 0,
'/': 180,
},
}
def follow_tracks(tracks, carts, prevent_collision=False):
"""Follow tracks with carts. Optionally prevent ending with collision."""
while len(carts) > 1:
carts, collisions = move_carts(tracks, carts)
if collisions and not prevent_collision:
return collisions[0]
return carts[0][0]
def find_repeated_position(carts):
"""Find position taken by two carts - colliding."""
repeated = []
seen_positions = set()
for cur_position, *_ in carts:
position = tuple(cur_position)
if position in seen_positions:
repeated.append(cur_position)
seen_positions.add(position)
return repeated
def move_carts(tracks, carts):
"""Move carts by one on tracks."""
collisions = []
for cart in sorted(carts):
position, direction, turn = cart
move = direction_to_move[direction]
next_position = [pos + change for pos, change in zip(position, move)]
next_square = get_square(tracks, next_position)
if next_square == INTERSECTION:
next_direction, next_turn = turn_cart(direction, turn)
cart[1] = next_direction
cart[2] = next_turn
elif is_curve(next_square):
next_direction = curve_cart(direction, next_square)
cart[1] = next_direction
cart[0] = next_position
repeated_position = find_repeated_position(carts)
if repeated_position:
collisions.extend(repeated_position)
carts = remove_collided_carts(carts, repeated_position)
return carts, collisions
def remove_collided_carts(carts, repeated_position):
"""Remove carts colliding on the repeated_position."""
return [cart for cart in carts
if cart[0] not in repeated_position]
def curve_cart(direction, curve):
"""Move cart over the curve."""
return next_direction[direction][curve]
def turn_cart(direction, turn):
"""Turn cart from direction, depending on the turn type."""
return (direction + turns[turn]) % 360, (turn + 1) % len(turns)
def is_curve(square):
"""Check if square is one of the curves."""
return square in CURVES
def get_square(tracks, position):
"""Get square from tracks with position."""
row, col = position
return tracks[row][col]
def remove_carts(lines):
"""Remove carts from lines, replacing them with normal tracks."""
for row_no, row in enumerate(lines):
for col_no, square in enumerate(row):
if square in '<>':
lines[row_no][col_no] = '-'
elif square in 'v^':
lines[row_no][col_no] = '|'
return lines
def find_carts(lines):
"""Find carts in lines. Return list of lists with cart parameters."""
carts = []
for row_no, row in enumerate(lines):
for col_no, square in enumerate(row):
if square not in CARTS:
continue
carts.append([[row_no, col_no], cart_to_direction[square], 0])
return carts
def get_file_contents(file):
"""Read all lines from file."""
with open(file) as f:
return f.readlines()
if __name__ == '__main__':
main()
| 26.115152 | 77 | 0.598747 |
db24a982814e1d245a07e054f71ca678690fe6ad | 13,037 | py | Python | goopylib/applications/custom_ease.py | YuvrajThorat/goopylib | b6bc593b7bcc92498a507f34b2190365a0ac51e7 | [
"MIT"
] | null | null | null | goopylib/applications/custom_ease.py | YuvrajThorat/goopylib | b6bc593b7bcc92498a507f34b2190365a0ac51e7 | [
"MIT"
] | null | null | null | goopylib/applications/custom_ease.py | YuvrajThorat/goopylib | b6bc593b7bcc92498a507f34b2190365a0ac51e7 | [
"MIT"
] | null | null | null | from goopylib.imports import *
from pathlib import Path as pathlib_Path
# I kinda wanted to scrap this, it wasn't that good.
| 49.570342 | 120 | 0.535246 |
db26ca941f83e142751cfd4f2744ef8039848b25 | 537 | py | Python | app/lib/duplication_check/train.py | WHUT-XGP/ASoulCnki | 98f29532e43e73f8e364d55b284558de5803b8b9 | [
"Apache-2.0"
] | null | null | null | app/lib/duplication_check/train.py | WHUT-XGP/ASoulCnki | 98f29532e43e73f8e364d55b284558de5803b8b9 | [
"Apache-2.0"
] | null | null | null | app/lib/duplication_check/train.py | WHUT-XGP/ASoulCnki | 98f29532e43e73f8e364d55b284558de5803b8b9 | [
"Apache-2.0"
] | null | null | null | # -*- encoding: utf-8 -*-
"""
Filename :train.py
Description :
Time :2021/06/22 15:21:08
Author :hwa
Version :1.0
"""
from app.lib.duplication_check.reply_database import ReplyDatabase
import time
if __name__ == "__main__":
train_data()
| 23.347826 | 70 | 0.646182 |
db28a45f5705fff1d415e5578ed431780d73980b | 5,837 | py | Python | buildscripts/task_generation/evg_config_builder.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
] | null | null | null | buildscripts/task_generation/evg_config_builder.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
] | null | null | null | buildscripts/task_generation/evg_config_builder.py | benety/mongo | 203430ac9559f82ca01e3cbb3b0e09149fec0835 | [
"Apache-2.0"
] | null | null | null | """Builder for generating evergreen configuration."""
from threading import Lock
from typing import Set, List, Dict
import inject
from shrub.v2 import ShrubProject, BuildVariant, ExistingTask, Task
from buildscripts.patch_builds.task_generation import validate_task_generation_limit
from buildscripts.task_generation.constants import ACTIVATE_ARCHIVE_DIST_TEST_DEBUG_TASK
from buildscripts.task_generation.gen_task_service import GenTaskService, \
GenTaskOptions, ResmokeGenTaskParams, FuzzerGenTaskParams
from buildscripts.task_generation.generated_config import GeneratedFile, GeneratedConfiguration
from buildscripts.task_generation.resmoke_proxy import ResmokeProxyService
from buildscripts.task_generation.suite_split import SuiteSplitService, GeneratedSuite, \
SuiteSplitParameters
from buildscripts.task_generation.task_types.fuzzer_tasks import FuzzerTask
# pylint: disable=too-many-instance-attributes
| 46.325397 | 99 | 0.716807 |
db299a97d65e80dbbfa712b50525b811276c7bff | 4,424 | py | Python | test/unit/vint/ast/plugin/scope_plugin/stub_node.py | mosheavni/vint | 9078dd626415cfe37ddaf03032e714bbaca8b336 | [
"MIT"
] | 538 | 2015-01-03T18:54:53.000Z | 2020-01-11T01:34:51.000Z | test/unit/vint/ast/plugin/scope_plugin/stub_node.py | mosheavni/vint | 9078dd626415cfe37ddaf03032e714bbaca8b336 | [
"MIT"
] | 235 | 2015-01-01T06:20:01.000Z | 2020-01-17T11:32:39.000Z | test/unit/vint/ast/plugin/scope_plugin/stub_node.py | mosheavni/vint | 9078dd626415cfe37ddaf03032e714bbaca8b336 | [
"MIT"
] | 43 | 2015-01-23T16:59:49.000Z | 2019-12-27T10:56:12.000Z | from vint.ast.node_type import NodeType
from vint.ast.plugin.scope_plugin.identifier_attribute import (
IDENTIFIER_ATTRIBUTE,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG,
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT,
)
def create_curlyname(is_declarative=True):
""" Create a node as a `my_{'var'}`
"""
return {
'type': NodeType.CURLYNAME.value,
'value': [
{
'type': NodeType.CURLYNAMEPART.value,
'value': 'my_',
},
{
'type': NodeType.CURLYNAMEEXPR.value,
'value': {
'type': NodeType.CURLYNAMEEXPR.value,
'value': 'var',
},
}
],
IDENTIFIER_ATTRIBUTE: {
IDENTIFIER_ATTRIBUTE_DECLARATION_FLAG: is_declarative,
IDENTIFIER_ATTRIBUTE_DYNAMIC_FLAG: True,
IDENTIFIER_ATTRIBUTE_MEMBER_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_FLAG: False,
IDENTIFIER_ATTRIBUTE_AUTOLOAD_FLAG: False,
IDENTIFIER_ATTRIBUTE_FUNCTION_ARGUMENT_FLAG: False,
IDENTIFIER_ATTRIBUTE_LAMBDA_STRING_CONTEXT: False,
},
}
| 35.96748 | 82 | 0.667043 |
db2cccb8706be958cee0c18ee9e554aac314a720 | 348 | py | Python | grpr2-ch/maci/policies/__init__.py | saarcohen30/GrPR2-CH | ba8c32f5b4caeebfc93ca30fa1fcc8223176183f | [
"MIT"
] | null | null | null | grpr2-ch/maci/policies/__init__.py | saarcohen30/GrPR2-CH | ba8c32f5b4caeebfc93ca30fa1fcc8223176183f | [
"MIT"
] | null | null | null | grpr2-ch/maci/policies/__init__.py | saarcohen30/GrPR2-CH | ba8c32f5b4caeebfc93ca30fa1fcc8223176183f | [
"MIT"
] | null | null | null | from .nn_policy import NNPolicy
# from .gmm import GMMPolicy
# from .latent_space_policy import LatentSpacePolicy
from .uniform_policy import UniformPolicy
# from .gaussian_policy import GaussianPolicy
from .stochastic_policy import StochasticNNPolicy, StochasticNNConditionalPolicy
from .deterministic_policy import DeterministicNNPolicy
| 38.666667 | 81 | 0.850575 |
db2d0faef6bb46b40a8c415250b0a2a6b57926d0 | 3,841 | py | Python | sugarpidisplay/sugarpiconfig/views.py | szpaku80/SugarPiDisplay | 793c288afaad1b1b6921b0d29ee0e6a537e42384 | [
"MIT"
] | 1 | 2022-02-12T20:39:20.000Z | 2022-02-12T20:39:20.000Z | sugarpidisplay/sugarpiconfig/views.py | szpaku80/SugarPiDisplay | 793c288afaad1b1b6921b0d29ee0e6a537e42384 | [
"MIT"
] | null | null | null | sugarpidisplay/sugarpiconfig/views.py | szpaku80/SugarPiDisplay | 793c288afaad1b1b6921b0d29ee0e6a537e42384 | [
"MIT"
] | null | null | null | """
Routes and views for the flask application.
"""
import os
import json
from flask import Flask, redirect, request, render_template, flash
from pathlib import Path
from flask_wtf import FlaskForm
from wtforms import StringField,SelectField,PasswordField,BooleanField
from wtforms.validators import InputRequired,ValidationError
from . import app
source_dexcom = 'dexcom'
source_nightscout = 'nightscout'
LOG_FILENAME="sugarpidisplay.log"
folder_name = '.sugarpidisplay'
config_file = 'config.json'
pi_sugar_path = os.path.join(str(Path.home()), folder_name)
Path(pi_sugar_path).mkdir(exist_ok=True)
def handle_submit(form):
config = { 'data_source': form.data_source.data }
config['use_animation'] = form.use_animation.data
if (form.data_source.data == source_dexcom):
config['dexcom_username'] = form.dexcom_user.data
config['dexcom_password'] = form.dexcom_pass.data
else:
config['nightscout_url'] = form.ns_url.data
config['nightscout_access_token'] = form.ns_token.data
#__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
f = open(os.path.join(pi_sugar_path, config_file), "w")
json.dump(config, f, indent = 4)
f.close()
def loadData(form):
config_full_path = os.path.join(pi_sugar_path, config_file)
if (not Path(config_full_path).exists()):
return
try:
f = open(config_full_path, "r")
config = json.load(f)
f.close()
if ('data_source' in config):
form.data_source.data = config['data_source']
if (config['data_source'] == source_dexcom):
if ('dexcom_username' in config):
form.dexcom_user.data = config['dexcom_username']
if ('dexcom_password' in config):
form.dexcom_pass.data = config['dexcom_password']
if (config['data_source'] == source_nightscout):
if ('nightscout_url' in config):
form.ns_url.data = config['nightscout_url']
if ('nightscout_access_token' in config):
form.ns_token.data = config['nightscout_access_token']
form.use_animation.data = config['use_animation']
except:
pass
| 35.564815 | 93 | 0.664931 |
db2d5607d06728d0c91675bdab230c329ed3e400 | 2,001 | py | Python | progressao_aritmeticav3.py | eduardobaltazarmarfim/PythonC | 8e44b4f191582c73cca6df98120ab142145c4ba1 | [
"MIT"
] | null | null | null | progressao_aritmeticav3.py | eduardobaltazarmarfim/PythonC | 8e44b4f191582c73cca6df98120ab142145c4ba1 | [
"MIT"
] | null | null | null | progressao_aritmeticav3.py | eduardobaltazarmarfim/PythonC | 8e44b4f191582c73cca6df98120ab142145c4ba1 | [
"MIT"
] | null | null | null |
verificar() | 17.4 | 91 | 0.410795 |
db2d89c006750b429af0eb85221902cff310ad5b | 3,278 | py | Python | policies/plc_migrate_default.py | PaloAltoNetworks/pcs-migration-management | 766c8c861befa92e593b23ad6d248e33f62054bb | [
"ISC"
] | 1 | 2022-03-17T12:51:45.000Z | 2022-03-17T12:51:45.000Z | policies/plc_migrate_default.py | PaloAltoNetworks/pcs-migration-management | 766c8c861befa92e593b23ad6d248e33f62054bb | [
"ISC"
] | 2 | 2021-11-03T15:34:40.000Z | 2021-12-14T19:50:20.000Z | policies/plc_migrate_default.py | PaloAltoNetworks/pcs-migration-management | 766c8c861befa92e593b23ad6d248e33f62054bb | [
"ISC"
] | 4 | 2021-11-09T17:57:01.000Z | 2022-01-24T17:41:21.000Z | from policies import plc_get, plc_add, plc_update
from sdk.color_print import c_print
from tqdm import tqdm
def migrate_builtin_policies(tenant_sessions: list, logger):
'''
Updates the default/built in policies of all clone tenants so they are the same as the
source tenant. Default policies can not be added or deleted.
'''
tenant_updated_policies = []
tenant_default_policies = []
for tenant_session in tenant_sessions:
tenant_default_policies.append(plc_get.api_get_default(tenant_session, logger))
original_tenant = tenant_default_policies[0]
clone_tenant_default_policies = tenant_default_policies[1:]
for index, tenant in enumerate(clone_tenant_default_policies):
added = 0
for plc in tqdm(tenant, desc='Syncing Default Policies', leave=False):
for old_plc in original_tenant:
if plc['name'] == old_plc['name']:
#Compliance metadata is not apart of every policy so it has to be compared situationally
complianceMetadata = []
if 'complianceMetadata' in plc:
complianceMetadata = plc['complianceMetadata']
old_complianceMetadata = []
if 'complianceMetadata' in old_plc:
old_complianceMetadata = old_plc['complianceMetadata']
compFlag = False
for el in old_complianceMetadata:
name = el['standardName']
if name not in [cmp['standardName'] for cmp in complianceMetadata]:
compFlag = True
break
req_id = el['requirementId']
if req_id not in [cmp['requirementId'] for cmp in complianceMetadata]:
compFlag = True
break
sec_id = el['sectionId']
if sec_id not in [cmp['sectionId'] for cmp in complianceMetadata]:
compFlag = True
break
#Sort Labels
labels = plc['labels']
o_labels = old_plc['labels']
labels.sort()
o_labels.sort()
#If there is a difference between the source tenant policy and the destination tenant policy, then update the policy
# if plc['severity'] != old_plc['severity'] or plc['labels'] != old_plc['labels'] or plc['rule'] != old_plc['rule'] or compFlag:
if plc['severity'] != old_plc['severity'] or labels != o_labels or plc['rule'] != old_plc['rule'] or compFlag:
res = plc_add.update_default_policy(tenant_sessions[index + 1], old_plc, logger)
if res != 'BAD':
added += 1
tenant_updated_policies.append(added)
logger.info('Finished migrating Default Policies')
return tenant_updated_policies
if __name__ == '__main__':
from sdk.load_config import load_config_create_sessions
tenant_sessions = load_config_create_sessions()
migrate_builtin_policies(tenant_sessions)
| 46.169014 | 148 | 0.57352 |
db2e05e89e1db86e733714d3d045b8d52021205c | 8,158 | py | Python | MAIN VERSION 2.py | HorridHanu/Notepad-Python | 5c40ddf0cc01b88387bf3052117581cba6e8ab6f | [
"Apache-2.0"
] | 1 | 2021-07-03T09:16:26.000Z | 2021-07-03T09:16:26.000Z | MAIN VERSION 2.py | HorridHanu/Notepad-Python | 5c40ddf0cc01b88387bf3052117581cba6e8ab6f | [
"Apache-2.0"
] | null | null | null | MAIN VERSION 2.py | HorridHanu/Notepad-Python | 5c40ddf0cc01b88387bf3052117581cba6e8ab6f | [
"Apache-2.0"
] | null | null | null |
########################################################################################
########################################################################################
## # CODE LANGUAGE IS PYHTON! ## ## ##
## # DATE: 1-JULY-2021 ## ## ######## ## ## ## ##
## # CODE BY HANU! ########## ## ######### ## ## ##
## # ONLY FOR EDUCATIONAL PURPOSE! ########## ####### ## ## ## ## ##
## # NOTEPAD COPY MAIN! ## ## ## ## ## ## ## ## ##
## # ITS ONLY DEMO! ## ## ####### ## ## ######## ##
########################################################################################
########################################################################################
#Define Functions For Cammand!
import os.path
import os
from tkinter.filedialog import askopenfilename, asksaveasfilename
import tkinter.messagebox as tmsg
# function for help!
from tkinter import *
root=Tk()
root.geometry("700x390")
root.title("Untitled - Notpad")
root.bell() #used to bell on opening!
# root.iconphoto("1.ICON.png")
# STATUS BAR!
statusbar = StringVar()
statusbar.set(" Be Happy....")
sbar = Label(root, textvariable=statusbar, relief=SUNKEN, anchor="w").pack(fill=X, side=BOTTOM)
# DEFINE FUNCTION FOR STATUS BAR!
from datetime import datetime
now = datetime.now()
Time = now.strftime("%H:%M")
Date = now.strftime("%D")
sb = Scrollbar(root)
sb.pack(fill=Y, side=RIGHT)
# Text area using text widget and connect with scroll bar!
text = Text(root, font="lucida 17", yscrollcommand=sb.set)
# for taking the full geometry
text.pack(fill=BOTH, expand=True)
file = None
sb.config(command=text.yview)
#Main Menu!
mainmenu=Menu(root)
# Submenu File!
m1 = Menu(mainmenu, tearoff=0)
m1.add_separator()
# to new file
m1.add_command(label="New Ctrl+N", command=newfile)
# m1.add_separator()
# to open existing file
m1.add_command(label="Open.. Ctrl+O", command=openfile)
# m1.add_separator()
# to save current file
m1.add_command(label="save Ctrl+s", command=savefile)
m1.add_separator()
# to print
m1.add_command(label="Print Ctrl+P", command=fun)
# to Exit!
m1.add_separator()
m1.add_command(label="Exit", command=exit) #exit has pre-function to exit!
mainmenu.add_cascade(label="File", menu=m1)
# file menu END
#Submenu Edit!
m2 = Menu(mainmenu, tearoff = 0)
m2.add_separator()
# to cut
m2.add_command(label="Cut Ctrl+X", command=cut)
# to copy
m2.add_command(label="Copy Ctrl+C", command=copy)
# to paste
m2.add_command(label="Paste Ctrl+V", command=paste)
m2.add_separator()
# to delete
m2.add_command(label="Delete Del", command=delete)
m2.add_separator()
m2.add_command(label="Select Ctrl+A",command=fun)
# to time
m2.add_command(label="Time/Date F5",command=time_now)
mainmenu.add_cascade(label="Edit", menu=m2)
# edit menu END
#Submenu Format
m3 = Menu(mainmenu, tearoff = 0)
m3.add_separator()
m3.add_command(label="WordWrap", command=fun)
# to font
m3.add_command(label="font..", command=font)
mainmenu.add_cascade(label="Format", menu=m3)
#Submenu Veiw
m4 = Menu(mainmenu, tearoff=0)
m4.add_separator()
# to view statusbar
m4.add_command(label="Status Bar", command=status_bar)
mainmenu.add_cascade(label="View", menu=m4)
#Submenu View Help
m5=Menu(mainmenu, tearoff = 0)
m5.add_separator()
# to view help
m5.add_command(label="View Help", command=help)
m5.add_separator()
# m5.add_separator()
# m5.add_separator()
# to rate
m5.add_command(label="Rate us!", command=rate)
# m5.add_separator()
# to join
m5.add_command(label="Join us!", command=join_us)
m5.add_separator()
m5.add_separator()
# about
m5.add_command(label="About Notepad", command=about)
mainmenu.add_cascade(label="Help", menu=m5)
# View help menu END
root.config(menu=mainmenu) #configure the mainmenu as menu
root.mainloop()
########################################################################################
######################################################################################## | 28.131034 | 95 | 0.539103 |
db308acc7784941bed9244b19f0ab77519bcb972 | 512 | py | Python | unfollow_parfum.py | AntonPukhonin/InstaPy | 0c480474ec39e174fa4256b48bc25bc4ecf7b6aa | [
"MIT"
] | null | null | null | unfollow_parfum.py | AntonPukhonin/InstaPy | 0c480474ec39e174fa4256b48bc25bc4ecf7b6aa | [
"MIT"
] | null | null | null | unfollow_parfum.py | AntonPukhonin/InstaPy | 0c480474ec39e174fa4256b48bc25bc4ecf7b6aa | [
"MIT"
] | null | null | null | from instapy import InstaPy
#insta_username = 'antonpuhonin'
#insta_password = 'Bulbazavr36'
insta_username = 'tonparfums'
insta_password = 'ov9AN6NlnV'
try:
session = InstaPy(username=insta_username,
password=insta_password,
headless_browser=True,
multi_logs=True)
session.login()
session.unfollow_users(amount=200, onlyInstapyFollowed = True, onlyInstapyMethod = 'FIFO', unfollow_after=6*24*60*60 )
finally:
session.end()
| 24.380952 | 122 | 0.667969 |
db30f2130ff4ed72860f0513ddb8d069dd812ef8 | 1,462 | py | Python | portal/grading/serializers.py | LDSSA/portal | 9561da1e262678fe68dcf51c66007c0fb13eb51a | [
"MIT"
] | 2 | 2020-11-09T03:48:36.000Z | 2021-07-02T14:30:09.000Z | portal/grading/serializers.py | LDSSA/portal | 9561da1e262678fe68dcf51c66007c0fb13eb51a | [
"MIT"
] | 132 | 2020-04-25T15:57:56.000Z | 2022-03-10T19:15:51.000Z | portal/grading/serializers.py | LDSSA/portal | 9561da1e262678fe68dcf51c66007c0fb13eb51a | [
"MIT"
] | 1 | 2020-10-24T16:15:57.000Z | 2020-10-24T16:15:57.000Z | from rest_framework import serializers
from portal.academy import models
from portal.applications.models import Submission, Challenge
| 24.366667 | 64 | 0.613543 |
db3364ee622377b95d22e40cf02ce787e7812d16 | 323 | py | Python | Funcoes/ex106-sistemaInterativoAjuda.py | ascaniopy/python | 6d8892b7b9ff803b7422a61e68a383ec6ac7d62d | [
"MIT"
] | null | null | null | Funcoes/ex106-sistemaInterativoAjuda.py | ascaniopy/python | 6d8892b7b9ff803b7422a61e68a383ec6ac7d62d | [
"MIT"
] | null | null | null | Funcoes/ex106-sistemaInterativoAjuda.py | ascaniopy/python | 6d8892b7b9ff803b7422a61e68a383ec6ac7d62d | [
"MIT"
] | null | null | null | from time import sleep
c = ('\033[m', # 0 - Sem cores
'\033[0;30;41m', # 1 - Vermelho
'\033[0;30;42m', # 2 - Verde
'\033[0;30;43m', # 3 - Amarelo
'\033[0;30;44m', # 4 - Azul
'\033[0;30;45m', # 5 - Roxo
'\033[0;30m' # 6 - Branco
)
#Programa principal
| 19 | 40 | 0.439628 |
db3369b101ea183c503c1fa561b47c91b9100d56 | 36 | py | Python | deeptrack/extras/__init__.py | Margon01/DeepTrack-2.0_old | f4f4abc89ab1f63aeb4722f84dcfb93189c57ccf | [
"MIT"
] | 65 | 2020-04-29T01:06:01.000Z | 2022-03-28T12:44:02.000Z | deeptrack/extras/__init__.py | Margon01/DeepTrack-2.0_old | f4f4abc89ab1f63aeb4722f84dcfb93189c57ccf | [
"MIT"
] | 41 | 2020-04-20T16:09:07.000Z | 2022-03-29T15:40:08.000Z | deeptrack/extras/__init__.py | Margon01/DeepTrack-2.0_old | f4f4abc89ab1f63aeb4722f84dcfb93189c57ccf | [
"MIT"
] | 31 | 2020-04-27T18:04:06.000Z | 2022-03-18T17:24:50.000Z | from . import datasets, radialcenter | 36 | 36 | 0.833333 |
db33adbcb92391813fa24af06e3df16ea1f77a19 | 236 | py | Python | pyvisdk/enums/virtual_machine_ht_sharing.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/enums/virtual_machine_ht_sharing.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null | pyvisdk/enums/virtual_machine_ht_sharing.py | Infinidat/pyvisdk | f2f4e5f50da16f659ccc1d84b6a00f397fa997f8 | [
"MIT"
] | null | null | null |
########################################
# Automatically generated, do not edit.
########################################
from pyvisdk.thirdparty import Enum
VirtualMachineHtSharing = Enum(
'any',
'internal',
'none',
)
| 15.733333 | 40 | 0.440678 |
db33d4b02c61194e50c6a9e8e0140a09b33f011f | 1,710 | py | Python | reo/migrations/0118_auto_20210715_2148.py | NREL/REopt_API | fbc70f3b0cdeec9ee220266d6b3b0c5d64f257a6 | [
"BSD-3-Clause"
] | 7 | 2022-01-29T12:10:10.000Z | 2022-03-28T13:45:20.000Z | reo/migrations/0118_auto_20210715_2148.py | NREL/reopt_api | fbc70f3b0cdeec9ee220266d6b3b0c5d64f257a6 | [
"BSD-3-Clause"
] | 12 | 2022-02-01T18:23:18.000Z | 2022-03-31T17:22:17.000Z | reo/migrations/0118_auto_20210715_2148.py | NREL/REopt_API | fbc70f3b0cdeec9ee220266d6b3b0c5d64f257a6 | [
"BSD-3-Clause"
] | 3 | 2022-02-08T19:44:40.000Z | 2022-03-12T11:05:36.000Z | # Generated by Django 3.1.12 on 2021-07-15 21:48
from django.db import migrations, models
| 31.666667 | 59 | 0.588304 |
db34a67ee55a1e9b0a17aba6120305fef0d0c936 | 16,287 | py | Python | bpy_lambda/2.78/scripts/addons_contrib/io_scene_cod/__init__.py | resultant-gamedev/bpy_lambda | c8cf46c10c69e74a0892b621d76c62edaa5b04bc | [
"MIT"
] | null | null | null | bpy_lambda/2.78/scripts/addons_contrib/io_scene_cod/__init__.py | resultant-gamedev/bpy_lambda | c8cf46c10c69e74a0892b621d76c62edaa5b04bc | [
"MIT"
] | null | null | null | bpy_lambda/2.78/scripts/addons_contrib/io_scene_cod/__init__.py | resultant-gamedev/bpy_lambda | c8cf46c10c69e74a0892b621d76c62edaa5b04bc | [
"MIT"
] | 1 | 2019-11-24T18:43:42.000Z | 2019-11-24T18:43:42.000Z | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
"""
Blender-CoD: Blender Add-On for Call of Duty modding
Version: alpha 3
Copyright (c) 2011 CoDEmanX, Flybynyt -- blender-cod@online.de
http://code.google.com/p/blender-cod/
TODO
- UI for xmodel and xanim import (planned for alpha 4/5)
"""
bl_info = {
"name": "Blender-CoD - Add-On for Call of Duty modding (alpha 3)",
"author": "CoDEmanX, Flybynyt",
"version": (0, 3, 5),
"blender": (2, 62, 0),
"location": "File > Import | File > Export",
"description": "Export models to *.XMODEL_EXPORT and animations to *.XANIM_EXPORT",
"warning": "Alpha version, please report any bugs!",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Import-Export/Call_of_Duty_IO",
"tracker_url": "https://developer.blender.org/maniphest/task/edit/form/2/",
"support": "TESTING",
"category": "Import-Export"
}
# To support reload properly, try to access a package var, if it's there, reload everything
if "bpy" in locals():
import imp
if "import_xmodel" in locals():
imp.reload(import_xmodel)
if "export_xmodel" in locals():
imp.reload(export_xmodel)
if "import_xanim" in locals():
imp.reload(import_xanim)
if "export_xanim" in locals():
imp.reload(export_xanim)
import bpy
from bpy.props import BoolProperty, IntProperty, FloatProperty, StringProperty, EnumProperty
import bpy_extras.io_utils
from bpy_extras.io_utils import ExportHelper, ImportHelper
import time
# Planned for alpha 4/5
def menu_func_xmodel_import(self, context):
self.layout.operator(ImportXmodel.bl_idname, text="CoD Xmodel (.XMODEL_EXPORT)")
"""
def menu_func_xanim_import(self, context):
self.layout.operator(ImportXanim.bl_idname, text="CoD Xanim (.XANIM_EXPORT)")
"""
def menu_func_xmodel_export(self, context):
self.layout.operator(ExportXmodel.bl_idname, text="CoD Xmodel (.XMODEL_EXPORT)")
def menu_func_xanim_export(self, context):
self.layout.operator(ExportXanim.bl_idname, text="CoD Xanim (.XANIM_EXPORT)")
def register():
bpy.utils.register_module(__name__)
bpy.types.INFO_MT_file_import.append(menu_func_xmodel_import)
#bpy.types.INFO_MT_file_import.append(menu_func_xanim_import)
bpy.types.INFO_MT_file_export.append(menu_func_xmodel_export)
bpy.types.INFO_MT_file_export.append(menu_func_xanim_export)
def unregister():
bpy.utils.unregister_module(__name__)
bpy.types.INFO_MT_file_import.remove(menu_func_xmodel_import)
#bpy.types.INFO_MT_file_import.remove(menu_func_xanim_import)
bpy.types.INFO_MT_file_export.remove(menu_func_xmodel_export)
bpy.types.INFO_MT_file_export.remove(menu_func_xanim_export)
if __name__ == "__main__":
register()
| 34.144654 | 151 | 0.647203 |
db359edbcc421125b398c8492ccfbe1df5e59aa8 | 771 | py | Python | pynpact/tests/steps/test_extract.py | NProfileAnalysisComputationalTool/npact | d4495f5cba2a936f2be2f2c821edd5429d1a58da | [
"BSD-3-Clause"
] | 2 | 2015-09-18T02:01:19.000Z | 2021-09-03T18:40:59.000Z | pynpact/tests/steps/test_extract.py | NProfileAnalysisComputationalTool/npact | d4495f5cba2a936f2be2f2c821edd5429d1a58da | [
"BSD-3-Clause"
] | null | null | null | pynpact/tests/steps/test_extract.py | NProfileAnalysisComputationalTool/npact | d4495f5cba2a936f2be2f2c821edd5429d1a58da | [
"BSD-3-Clause"
] | 1 | 2015-09-25T18:58:21.000Z | 2015-09-25T18:58:21.000Z | import os.path
import pytest
import py
from pynpact.steps import extract
| 23.363636 | 47 | 0.713359 |
db3607c58d0cde5c1aa1bfb4ceddd2fc24ac1f1e | 16,994 | py | Python | dl_training/core.py | Duplums/SMLvsDL | b285717bd8d8e832b4bc9e2b42d18bd96b628def | [
"MIT"
] | null | null | null | dl_training/core.py | Duplums/SMLvsDL | b285717bd8d8e832b4bc9e2b42d18bd96b628def | [
"MIT"
] | null | null | null | dl_training/core.py | Duplums/SMLvsDL | b285717bd8d8e832b4bc9e2b42d18bd96b628def | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
Core classes.
"""
# System import
import os
import pickle
from copy import deepcopy
import subprocess
# Third party import
import torch
import torch.nn.functional as func
from torch.nn import DataParallel
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np
# Package import
from dl_training.utils import checkpoint
from dl_training.history import History
import dl_training.metrics as mmetrics
import logging | 41.550122 | 114 | 0.530658 |
db36254aae8d66e15ff58a16dc04f7e0fdb0d51b | 865 | py | Python | python/two_pointers/1004_max_consecutive_ones_iii.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 6 | 2019-07-15T13:23:57.000Z | 2020-01-22T03:12:01.000Z | python/two_pointers/1004_max_consecutive_ones_iii.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | null | null | null | python/two_pointers/1004_max_consecutive_ones_iii.py | linshaoyong/leetcode | ea052fad68a2fe0cbfa5469398508ec2b776654f | [
"MIT"
] | 1 | 2019-07-24T02:15:31.000Z | 2019-07-24T02:15:31.000Z | from collections import deque
| 27.03125 | 69 | 0.419653 |
db3658941378a7367cc8947a67be394b0c932596 | 3,000 | py | Python | student_files/lap_times_db.py | jstucken/DET-Python-Anki-Overdrive-v1-1 | 74cfcd9ea533cc0127fa2b8bd4ed26400da8a21b | [
"MIT"
] | null | null | null | student_files/lap_times_db.py | jstucken/DET-Python-Anki-Overdrive-v1-1 | 74cfcd9ea533cc0127fa2b8bd4ed26400da8a21b | [
"MIT"
] | null | null | null | student_files/lap_times_db.py | jstucken/DET-Python-Anki-Overdrive-v1-1 | 74cfcd9ea533cc0127fa2b8bd4ed26400da8a21b | [
"MIT"
] | null | null | null | #
# This script allows the user to control an Anki car using Python
# To control multiple cars at once, open a seperate Command Line Window for each car
# and call this script with the approriate car mac address.
# This script attempts to save lap times into local mysql db running on the pi
# Author: jstucken
# Created: 23-2-2021
#
SCRIPT_TITLE="Lap timer saving to Mysql"
# import required modules
import loader.bootstrapper
import time
from overdrive import Overdrive
from php_communicator import PhpCommunicator
from network import Network
# Setup our car
car = Overdrive(12) # init overdrive object
car.enableLocationData()
# get car mac address from our class object
car_mac = car.getMacAddress()
car_id = car.getCarId()
username = car.getUsername()
student_id = car.getStudentId()
# count number of laps completed
lap_count = 0
# start the car off
# usage: car.changeSpeed(speed, accel)
car.changeSpeed(400, 800)
last_lap_time = 0
last_lap_count = -1
# race 3 laps and time each one
while lap_count !=3:
time.sleep(0.1)
# lap count is incremented when cars pass over the finish line
lap_count = car.getLapCount()
# count laps done
if last_lap_count != lap_count:
last_lap_count = lap_count
print()
print("lap_count: "+str(lap_count))
# get lap time
prev_lap_time = car.getLapTime()
if last_lap_time != prev_lap_time:
print()
print("prev_lap_time: "+str(prev_lap_time))
# if car has completed at least 1 lap
if lap_count > 0:
# Save last_lap_time time to database now
# get cars current location and speed
location = car.getLocation()
speed = car.getSpeed()
# data to be sent to API
data = {
'student_id':student_id,
'car_id':car_id,
'lap_time':prev_lap_time,
'lap_count':lap_count,
'speed':speed
}
# get the local IP address of the server machine
local_ip_address = Network.getLocalIPAddress()
# build our PHP script URL where data will be sent to be saved
# eg "http://192.168.0.10/lap_times_save.php"
url = "http://"+local_ip_address+"/python_communicator/lap_times_save.php"
# Send data to PHP to save to database
php = PhpCommunicator()
return_text = php.getResponse(url, data) # get the response from PHP
# extracting response text
print("Response from PHP script: %s"%return_text)
# end if
print()
print("*****")
last_lap_time = prev_lap_time
# stop the car
car.stopCarFast()
print("Stopping as car has done the required number of laps")
car.disconnect()
quit() | 28.037383 | 87 | 0.606333 |
db377a3b2e18c647ed0d195a162511f6c719f4a5 | 9,992 | py | Python | flatsat/opensatkit/cfs/apps/adcs_io/adcs-drivers/cubewheel-driver/test/code.py | cromulencellc/hackasat-final-2021 | d01a1b5d7947b3e41ae2da3ec63d5f43278a5eac | [
"MIT"
] | 4 | 2022-02-25T05:45:27.000Z | 2022-03-10T01:05:27.000Z | flatsat/opensatkit/cfs/apps/adcs_io/adcs-drivers/cubewheel-driver/test/code.py | cromulencellc/hackasat-final-2021 | d01a1b5d7947b3e41ae2da3ec63d5f43278a5eac | [
"MIT"
] | null | null | null | flatsat/opensatkit/cfs/apps/adcs_io/adcs-drivers/cubewheel-driver/test/code.py | cromulencellc/hackasat-final-2021 | d01a1b5d7947b3e41ae2da3ec63d5f43278a5eac | [
"MIT"
] | 2 | 2022-03-02T02:14:16.000Z | 2022-03-05T07:36:18.000Z | import board
from i2cperipheral import I2CPeripheral
from analogio import AnalogOut
from digitalio import DigitalInOut, Direction, Pull
import struct
import math
import time
regs = [0] * 16
index = 0
i2c_addr = 0x68
frame_id = 0
motor_control_mode = 0
backup_mode = 0
motor_switch_state = 0
hall_switch_state = 0
encoder_switch_state = 0
error_flag = 0
unused = 0
invalidTelemetryFlag = 0
invalidTelecommandFlag = 0
encoderError = 0
uartError = 0
i2cError = 0
canError = 0
configurationError = 0
speedError = 0
reference_speed = 0
wheel_current = 290 # mA
wheel_speed = math.floor(100/2) #rpm
wheel_duty = 5
wheel_speed_backup = wheel_speed
vout = 0.95
dac_value = voltage_to_dac(vout)
print("Set analog output for testing: {0:f} ({1:d}) V".format(vout, dac_value))
analog_out = AnalogOut(board.A0)
analog_out.value = dac_value
enable_pin = DigitalInOut(board.D8)
enable_pin.direction = Direction.INPUT
# enable_pin.pull = Pull.DOWN
print("Waiting for wheel enable")
while enable_pin.value == False:
time.sleep(0.1)
print("Starting I2C response")
with I2CPeripheral(board.SCL, board.SDA, (i2c_addr,)) as device:
while True:
r = device.request()
if not r:
# Maybe do some housekeeping
continue
with r: # Closes the transfer if necessary by sending a NACK or feeding dummy bytes
# print("Process request")
# print("I2C Addr: 0x{0:02X}, Is Read {1:d}, Is Restart {2:d}".format(r.address, r.is_read, r.is_restart))
if r.address == i2c_addr:
if not r.is_read: # Main write which is Selected read
# print("Get Frame Id Byte")
b = r.read(1)
if b:
frame_id = struct.unpack("B", b)[0]
print("Recieved frame ID: " + str(frame_id))
if frame_id < 40:
# print("Telecommand Recieved")
if frame_id == 1:
reset_id = struct.unpack("B", r.read(1))[0]
# print("Reset telecommand recieved: {0:d}".format(reset_id))
elif frame_id == 2:
reference_speed = struct.unpack("h", r.read(2))[0]
reference_speed_rpm = float(reference_speed/2.0)
wheel_speed = reference_speed + 5
# print("Reference speed telecommand recieved. Speed: {0:d}:{1:f}".format(reference_speed, reference_speed_rpm))
elif frame_id == 3:
wheel_duty = struct.unpack("h", r.read(2))[0]
# print("Duty cycle command recieved. Duty Cycle: {0:d}".format(wheel_duty))
elif frame_id == 7:
motor_switch_state = r.read(1)
# print("Recieved motor power state command. State: {}".format(motor_switch_state))
elif frame_id == 8:
encoder_switch_state = r.read(1)
# print("Recieved encoder power state command. State: {}".format(encoder_switch_state))
elif frame_id == 8:
hall_switch_state = r.read(1)
# print("Recieved hall power state command. State: {}".format(encoder_switch_state))
elif frame_id == 10:
motor_control_mode = struct.unpack("B", r.read(1))[0]
# print("Control mode telecommand recieved. Mode: {0:d}".format(motor_control_mode))
elif frame_id == 12:
backup_mode = r.read(1)
# print("Recieved back-up mode state command. State: {}".format(backup_mode))
elif frame_id == 20:
clear_errors = r.read(1)
if clear_errors == 85:
invalidTelemetryFlag = 0
invalidTelecommandFlag = 0
encoderError = 0
uartError = 0
i2cError = 0
canError = 0
configurationError = 0
speedError = 0
elif frame_id == 31:
new_i2c_addr = r.read(1)
# print("Recieved set I2C addr command. I2C: {}".format(new_i2c_addr))
elif frame_id == 33:
new_can_mask = r.read(1)
# print("Recieved set CAN mask command. CAN Mask: {}".format(new_can_mask))
elif frame_id == 33:
b = r.read(3)
# print("Recieved PWM Gain Command: {0:s}".format(str(b)))
elif frame_id == 34:
b = r.read(6)
# print("Recieved Main Speed Controller Gain Command: {0:s}".format(str(b)))
elif frame_id == 35:
b = r.read(6)
# print("Recieved Backup Speed Controller Gain Command: {0:s}".format(str(b)))
else:
invalidTelecommandFlag = 1
else:
# print("No data to read")
continue
elif r.is_restart: # Combined transfer: This is the Main read message
# print("Recieved Telemetry Request")
n = 0
if frame_id == 128:
n = r.write(bytes(send_tlm_identification()))
elif frame_id == 129:
n = r.write(bytes(send_tlm_identification_ext()))
elif frame_id == 130:
n = r.write(bytes(send_tlm_status(motor_control_mode, backup_mode, motor_switch_state, hall_switch_state, encoder_switch_state, error_flag)))
elif frame_id == 133:
n = r.write(bytes(2))
elif frame_id == 134:
n = r.write(bytes(2))
elif frame_id == 135:
n = r.write(bytes(2))
elif frame_id == 137:
n = r.write(bytes(send_tlm_wheel_data_full(wheel_speed, reference_speed, wheel_current)))
elif frame_id == 138:
n = r.write(bytes(send_tlm_wheel_data_additional(wheel_duty, wheel_speed_backup)))
elif frame_id == 139:
n = r.write(bytearray([9,8,7]))
elif frame_id == 140:
n = r.write(bytearray([1,2,3,4,5,6]))
elif frame_id == 141:
n = r.write(bytearray([10, 11, 12, 13, 14, 15]))
elif frame_id == 145:
n = r.write(bytes(send_tlm_wheel_status_flags(invalidTelemetryFlag, invalidTelecommandFlag, encoderError, uartError, i2cError, canError, configurationError, speedError)))
else:
invalidTelemetryFlag = 1
# print("Wrote " + str(n) + " bytes to master")
| 46.910798 | 214 | 0.522218 |
db37c14354deeb12104130ebc747684e2912a561 | 360 | py | Python | constants.py | tooreht/airstripmap | 7a65e67e417870c6853fd1adb848cf91d724f566 | [
"MIT"
] | null | null | null | constants.py | tooreht/airstripmap | 7a65e67e417870c6853fd1adb848cf91d724f566 | [
"MIT"
] | null | null | null | constants.py | tooreht/airstripmap | 7a65e67e417870c6853fd1adb848cf91d724f566 | [
"MIT"
] | null | null | null | GOV_AIRPORTS = {
"Antananarivo/Ivato": "big",
"Antsiranana/Diego": "small",
"Fianarantsoa": "small",
"Tolagnaro/Ft. Dauphin": "small",
"Mahajanga": "medium",
"Mananjary": "small",
"Nosy Be": "medium",
"Morondava": "small",
"Sainte Marie": "small",
"Sambava": "small",
"Toamasina": "small",
"Toliary": "small",
}
| 24 | 37 | 0.561111 |
db399ce2f0303a23e925d9d8085ddcee798d396a | 608 | py | Python | practical_0/fibonacci.py | BarracudaPff/code-golf-data-pythpn | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | [
"MIT"
] | null | null | null | practical_0/fibonacci.py | BarracudaPff/code-golf-data-pythpn | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | [
"MIT"
] | null | null | null | practical_0/fibonacci.py | BarracudaPff/code-golf-data-pythpn | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | [
"MIT"
] | null | null | null | if __name__ == "__main__":
INPUT = sys.argv[1]
print(INPUT)
main(INPUT) | 30.4 | 80 | 0.6875 |
db3a4d55930ad8686d2de82e1838a1ca79a144ec | 24,800 | py | Python | UW_System/UW_System/UW_System/spiders/uw_system.py | Nouldine/MyCrawlerSystem | 7bba8ba3ec76e10f70a35700602812ee6f039b63 | [
"MIT"
] | null | null | null | UW_System/UW_System/UW_System/spiders/uw_system.py | Nouldine/MyCrawlerSystem | 7bba8ba3ec76e10f70a35700602812ee6f039b63 | [
"MIT"
] | null | null | null | UW_System/UW_System/UW_System/spiders/uw_system.py | Nouldine/MyCrawlerSystem | 7bba8ba3ec76e10f70a35700602812ee6f039b63 | [
"MIT"
] | null | null | null |
from scrapy import Spider
from scrapy.spiders import CrawlSpider, Rule
from scrapy.selector import Selector
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.linkextractors import LinkExtractor
import scrapy
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError, TCPTimedOutError
from w3lib.html import remove_tags
from UW_System.items import UwSystemItem
| 90.510949 | 303 | 0.798185 |
db3b169862361f20c4e85e1f3babf59d22b794c5 | 10,622 | py | Python | src/lib/GL/glutbindings/glutbind.py | kokizzu/v8cgi | eafd3bd7a5dd1d60e2f1483701a52e7ac0ae0eba | [
"BSD-3-Clause"
] | 4 | 2016-01-31T08:49:35.000Z | 2021-07-12T17:31:42.000Z | src/lib/GL/glutbindings/glutbind.py | kokizzu/v8cgi | eafd3bd7a5dd1d60e2f1483701a52e7ac0ae0eba | [
"BSD-3-Clause"
] | null | null | null | src/lib/GL/glutbindings/glutbind.py | kokizzu/v8cgi | eafd3bd7a5dd1d60e2f1483701a52e7ac0ae0eba | [
"BSD-3-Clause"
] | 1 | 2021-06-03T22:51:17.000Z | 2021-06-03T22:51:17.000Z | import sys
import re
PATH_GLUT = 'glut.h'
FILE_GLUT = 'glutbind.cpp'
TEMPLATES = ['glutInit', 'glutTimerFunc']
def main():
"""
Still some things have to be hand-made, like
changing argv pargc values in the glutInit method definition
Also change the TimerFunc method with some magic.
"""
make_glut()
def multiple_replace(dict, text):
""" Replace in 'text' all occurences of any key in the given
dictionary by its corresponding value. Returns the new tring."""
# Create a regular expression from the dictionary keys
regex = re.compile("(%s)" % "|".join(map(re.escape, dict.keys())))
# For each match, look-up corresponding value in dictionary
return regex.sub(lambda mo: dict[mo.string[mo.start():mo.end()]], text)
main()
| 33.828025 | 188 | 0.553568 |