blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
288
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 684
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 147
values | src_encoding
stringclasses 25
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 128
12.7k
| extension
stringclasses 142
values | content
stringlengths 128
8.19k
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2a21be4baa493ba4ebbd659cb4cf4c518e170455
|
11583bb4cbcd1518bec8f1316cb3588b2bc2c1c6
|
/zaif_last_price.py
|
c57d70cc6face15056fbc3e7e9755b12f1a1cb0f
|
[] |
no_license
|
aki-06/zaif_last_price
|
e41ac7bfa55f6a5e959e0b757ec4f6924e69a0aa
|
2f6eae8f1df5443d8da0776f2a617a1b7b938a06
|
refs/heads/master
| 2021-08-14T11:31:48.766514
| 2017-11-15T14:25:38
| 2017-11-15T14:25:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 818
|
py
|
import zaifapi
from zaifapi import *
if __name__ == '__main__':
zaif = ZaifPublicApi()
zaif_btc_jpy = zaif.last_price(('btc_jpy'))
zaif_xem_jpy = zaif.last_price(('xem_jpy'))
zaif_eth_jpy = zaif.last_price(('eth_jpy'))
zaif_mona_jpy = zaif.last_price(('mona_jpy'))
zaif_bch_jpy = zaif.last_price(('bch_jpy'))
zaif_zaif_jpy = zaif.last_price(('zaif_jpy'))
zaif_pepe_jpy = zaif.last_price(('pepecash_jpy'))
print('btc_jpy = ', zaif_btc_jpy['last_price'])
print('xem_jpy = ', zaif_xem_jpy['last_price'])
print('eth_jpy = ', zaif_eth_jpy['last_price'])
print('mona_jpy = ', zaif_mona_jpy['last_price'])
print('bch_jpy = ', zaif_bch_jpy['last_price'])
print('zaif_jpy = ', zaif_zaif_jpy['last_price'])
print('pepe_jpy = ', zaif_pepe_jpy['last_price'])
|
[
"akihiro.nemoto.06@gmail.com"
] |
akihiro.nemoto.06@gmail.com
|
0ea91e5a0cf17f128231b27e498ed8faef5c8244
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03131/s109288213.py
|
d83fd52fecf6c54b08e12805530c326dab31a380
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
K,A,B=map(int,input().split())
b_count=1
if B-A<=2:
b_count+=K
else:
b_chenge_count=max(0,int((K-(A-1))/2))
b_count+=b_chenge_count*(B-A)+(A-1)+(K-(A-1)-b_chenge_count*2)
print(b_count)
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
83240fb402ffb441f8ea27fc70d086d6e2787731
|
276dd5dd778adefd039e6f6a71dc574386729401
|
/demo2/grpc-services/users/client/sample_client.py
|
861eb08c3d04ea39c85a816e156ae04b0ad6b8a4
|
[
"MIT"
] |
permissive
|
amitsaha/python-grpc-demo
|
4880e64b4b993df4b7eb96f2946b6607fb2dfa82
|
48546bfda83062a3fcb015d352fecb46346e8c92
|
refs/heads/master
| 2023-01-12T10:01:36.396783
| 2022-10-08T05:10:39
| 2022-10-08T05:10:39
| 101,063,881
| 145
| 52
|
MIT
| 2022-12-27T17:26:21
| 2017-08-22T13:07:17
|
Python
|
UTF-8
|
Python
| false
| false
| 777
|
py
|
import users_pb2_grpc as users_service
import users_types_pb2 as users_messages
from client_wrapper import ServiceClient
def run():
users = ServiceClient(users_service, 'UsersStub', 'localhost', 50051)
# Insert example metadata
metadata = [('ip', '127.0.0.1')]
response = users.CreateUser(
users_messages.CreateUserRequest(username='tom'),
metadata=metadata
)
if response:
print("User created:", response.user.username)
request = users_messages.GetUsersRequest(
user=[users_messages.User(username="alexa", user_id=1),
users_messages.User(username="christie", user_id=1)]
)
response = users.GetUsers(request)
for resp in response:
print(resp)
if __name__ == '__main__':
run()
|
[
"amitsaha.in@gmail.com"
] |
amitsaha.in@gmail.com
|
787d0ead8e3e61a69f630621e29795a58e9c52c3
|
3531bbff49b36aff6289911bc061865e7e59bd46
|
/src/HttpGenerator/Plugins/PythonNullWebserverContentProcessorPlugin.py
|
ef8a2dc856cbfc76161efbbb5e7a1f253b0867ea
|
[
"LicenseRef-scancode-unknown-license-reference",
"BSL-1.0",
"Python-2.0"
] |
permissive
|
davidbrownell/Common_Web
|
c5e66fc7ca03cdbd640ef1fe7cc12dafa9c04ef6
|
fe3ee9d46cf9f2184434cf950d2c4281b5ec21e7
|
refs/heads/master
| 2022-09-05T18:33:34.014472
| 2022-06-08T16:44:54
| 2022-06-08T16:44:54
| 95,167,233
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,940
|
py
|
# ----------------------------------------------------------------------
# |
# | PythonNullWebserverContentProcessorPlugin.py
# |
# | David Brownell <db@DavidBrownell.com>
# | 2020-08-28 22:15:38
# |
# ----------------------------------------------------------------------
# |
# | Copyright David Brownell 2020-22
# | Distributed under the Boost Software License, Version 1.0. See
# | accompanying file LICENSE_1_0.txt or copy at
# | http://www.boost.org/LICENSE_1_0.txt.
# |
# ----------------------------------------------------------------------
"""Contains the Plugin object"""
import os
import textwrap
import six
import CommonEnvironment
from CommonEnvironment import Interface
from CommonEnvironment import StringHelpers
from CommonEnvironmentEx.Package import InitRelativeImports
# ----------------------------------------------------------------------
_script_fullpath = CommonEnvironment.ThisFullpath()
_script_dir, _script_name = os.path.split(_script_fullpath)
# ----------------------------------------------------------------------
with InitRelativeImports():
from ..Plugin import Plugin as PluginBase
# ----------------------------------------------------------------------
@Interface.staticderived
class Plugin(PluginBase):
# ----------------------------------------------------------------------
# | Public Properties
Name = Interface.DerivedProperty("PythonNullWebserverContentProcessor")
Description = Interface.DerivedProperty("Noop ContentProcessor used by generated Webservers")
# ----------------------------------------------------------------------
# | Public Methods
@staticmethod
@Interface.override
def IsValidEnvironment():
return True
# ----------------------------------------------------------------------
@staticmethod
@Interface.override
def GenerateCustomSettingsAndDefaults():
yield "no_helpers", False
# ----------------------------------------------------------------------
@classmethod
@Interface.override
def GenerateOutputFilenames(cls, context):
filenames = ["__init__.py", "NullContentProcessor.py"]
if not context["plugin_settings"]["no_helpers"]:
filenames += [
os.path.join("Helpers", "__init__.py"),
os.path.join("Helpers", "ContentProcessors.py"),
]
cls._filenames = filenames
return filenames
# ----------------------------------------------------------------------
@classmethod
@Interface.override
def Generate(
cls,
http_code_generator,
invoke_reason,
output_dir,
roots,
status_stream,
verbose_stream,
verbose,
no_helpers,
):
file_header = cls._GenerateFileHeader(
prefix="# ",
)
filenames = [os.path.join(output_dir, filename) for filename in cls._filenames]
# Update the endpoints for processing
endpoints = []
for endpoint_info in six.itervalues(roots):
endpoints += endpoint_info.endpoints
# ----------------------------------------------------------------------
def Impl(endpoint):
endpoint.unique_name = endpoint.unique_name.replace(".", "")
for child in endpoint.children:
Impl(child)
# ----------------------------------------------------------------------
for endpoint in endpoints:
Impl(endpoint)
# __init__.py
assert filenames
status_stream.write("Writing '{}'...".format(filenames[0]))
with status_stream.DoneManager():
with open(filenames[0], "w") as f:
f.write(file_header)
filenames.pop(0)
# NullContentProcessor.py
assert filenames
status_stream.write("Writing '{}'...".format(filenames[0]))
with status_stream.DoneManager():
with open(filenames[0], "w") as f:
f.write(file_header)
WriteNullContentProcessor(f, endpoints)
filenames.pop(0)
if not no_helpers:
# __init__.py
assert filenames
status_stream.write("Writing '{}'...".format(filenames[0]))
with status_stream.DoneManager():
with open(filenames[0], "w") as f:
f.write(file_header)
filenames.pop(0)
# Authenticator
assert filenames
status_stream.write("Writing '{}'...".format(filenames[0]))
with status_stream.DoneManager():
with open(filenames[0], "w") as f:
f.write(file_header)
WriteContentProcessor(f)
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
# ----------------------------------------------------------------------
def WriteNullContentProcessor(f, endpoints):
content = []
# ----------------------------------------------------------------------
def Impl(endpoint):
for method in endpoint.methods:
content.append(
textwrap.dedent(
"""\
# ----------------------------------------------------------------------
@staticmethod
@Interface.override
def {unique_name}_{method_name}_Request(debug, uri_args, headers, form_data, query_data_body):
return None
# ----------------------------------------------------------------------
@staticmethod
@Interface.override
def {unique_name}_{method_name}_Response(get_ids_func, debug, uri, result):
return 200, [], "{unique_name}_{method_name}: {{}}".format(result)
""",
).format(
unique_name=endpoint.unique_name,
method_name=method.verb,
),
)
for child in endpoint.children:
Impl(child)
# ----------------------------------------------------------------------
for endpoint in endpoints:
Impl(endpoint)
f.write(
textwrap.dedent(
"""\
import sys
import six
from CommonEnvironment import Interface
# Get the ContentProcessorInterface
for name, module in six.iteritems(sys.modules):
if name.split(".")[-1] == "Interfaces" and hasattr(module, "ContentProcessorInterface"):
ContentProcessorInterface = module.ContentProcessorInterface
break
# ----------------------------------------------------------------------
@Interface.staticderived
class NullContentProcessor(ContentProcessorInterface):
{}
""",
).format(StringHelpers.LeftJustify("".join(content).rstrip(), 4)),
)
# ----------------------------------------------------------------------
def WriteContentProcessor(f):
f.write(
textwrap.dedent(
"""\
from CommonEnvironmentEx.Package import InitRelativeImports
with InitRelativeImports():
from ..NullContentProcessor import NullContentProcessor
content_processors = {
None : NullContentProcessor,
}
""",
),
)
|
[
"db@DavidBrownell.com"
] |
db@DavidBrownell.com
|
3e859dd4d8c7da1dd8f6989bb05ffe7879d48075
|
505343f6ace00d22f8753c1a943a5794a619e698
|
/katas/Python/8 kyu/Transportation on vacation 568d0dd208ee69389d000016.py
|
b4a45d8a8d95e7a907075fea11eac61bcc059cae
|
[] |
no_license
|
bullet1337/codewars
|
7652e50bf768bc47976a9124dd98b93602d4d458
|
ba7f13ddd766158b41e036dae5d6b15f7f08761a
|
refs/heads/master
| 2020-03-27T05:04:03.751302
| 2019-04-30T17:45:39
| 2019-04-30T17:45:39
| 145,991,995
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 140
|
py
|
# https://www.codewars.com/kata/568d0dd208ee69389d000016
def rental_car_cost(d):
return d * 40 - (50 if d >= 7 else 20 if d >= 3 else 0)
|
[
"alichek95@mail.ru"
] |
alichek95@mail.ru
|
a17c64a742a45832da07a086ce11578fdb82b72e
|
d532b85841b459c61d88d380e88dd08d29836d43
|
/solutions/1418_display_table_of_food_orders_in_a_restaurant.py
|
70d472684cea39e66b5cec60e2c88f8f5d159460
|
[
"MIT"
] |
permissive
|
YiqunPeng/leetcode_pro
|
ad942468df5506de9dc48a4019933f658e2a3121
|
4a508a982b125a3a90ea893ae70863df7c99cc70
|
refs/heads/master
| 2022-05-15T09:32:02.699180
| 2022-05-14T16:32:17
| 2022-05-14T16:32:17
| 182,453,966
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 744
|
py
|
class Solution:
def displayTable(self, orders: List[List[str]]) -> List[List[str]]:
"""Hash table.
"""
foods = set()
tables = {}
for order in orders:
t, f = int(order[1]), order[2]
if t not in tables:
tables[t] = {f: 1}
else:
tables[t][f] = tables[t].get(f, 0) + 1
foods.add(f)
res = []
sfoods = sorted(list(foods))
header = ['Table'] + [f for f in sfoods]
res.append(header)
for k in sorted(tables):
row = [str(k)]
for i in range(1, len(header)):
row.append(str(tables[k].get(header[i], 0)))
res.append(row)
return res
|
[
"ypeng1@andrew.cmu.edu"
] |
ypeng1@andrew.cmu.edu
|
8050dc76614b274c3c99011b53fa8d657839b7a3
|
c3c7398ec14865ea34c7f03aa5e012ddb19f0d5b
|
/DjangoUeditor/adminx.py
|
e98927add372e607762b88e5328ade5003c6c29b
|
[] |
no_license
|
mzm5466/blog
|
0e022f0ce85a0079cb72ffd9f472c7684f94d9fb
|
13625fe7028a0df11a30d7de32751e34d681de00
|
refs/heads/master
| 2021-01-23T16:51:58.296591
| 2018-11-17T06:05:50
| 2018-11-17T06:05:50
| 102,748,039
| 0
| 0
| null | 2018-11-12T23:28:57
| 2017-09-07T14:36:32
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 1,388
|
py
|
#!/usr/bin/python
#-*- coding:utf-8 -*-
#__author__ = 'sai'
#DjangoUeditor Xadmin plugin
import xadmin
from django.db.models import TextField
from xadmin.views import BaseAdminPlugin, ModelFormAdminView, DetailAdminView
from DjangoUeditor.models import UEditorField
from DjangoUeditor.widgets import UEditorWidget
from django.conf import settings
class XadminUEditorWidget(UEditorWidget):
def __init__(self,**kwargs):
self.ueditor_settings=kwargs
self.Media.js = None
super(XadminUEditorWidget, self).__init__(kwargs)
class UeditorPlugin(BaseAdminPlugin):
def get_field_style(self, attrs, db_field, style, **kwargs):
if style == 'ueditor':
if isinstance(db_field, UEditorField):
return {'widget': XadminUEditorWidget(**db_field.formfield().widget.attrs)}
if isinstance(db_field, TextField):
return {'widget': XadminUEditorWidget}
return attrs
def block_extrahead(self, context, nodes):
js = '<script type="text/javascript" src="%s"></script>' % (settings.STATIC_URL + "ueditor/ueditor.config.js")
js += '<script type="text/javascript" src="%s"></script>' % (settings.STATIC_URL + "ueditor/ueditor.all.min.js")
nodes.append(js)
xadmin.site.register_plugin(UeditorPlugin, DetailAdminView)
xadmin.site.register_plugin(UeditorPlugin, ModelFormAdminView)
|
[
"you@example.com"
] |
you@example.com
|
c102131c6d4a516bfdb9a46b2857ea494a2a4e61
|
2dfedaf52ea247e7545bcf1b62e78e306dbb369f
|
/tele_theta_gang_bot.py
|
a352295798f52ff7912945a3006f56b417923c49
|
[
"MIT"
] |
permissive
|
nasgoncalves/trading-utils
|
5602d6e52d6b62f138d38fe80877af0d0bec58e3
|
aa9d3cdb6e9052ff1022a23d5074ec1c540f9161
|
refs/heads/main
| 2023-04-19T19:47:16.050050
| 2021-05-17T18:53:13
| 2021-05-17T18:53:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,794
|
py
|
import logging
import os
from datetime import datetime
from telegram import Update
from telegram.ext import (
Updater,
CommandHandler,
MessageHandler,
Filters,
CallbackContext,
)
from common.bot_wrapper import start, help_command
from common.environment import TELEGRAM_THETA_GANG_BOT
from common.external_charts import build_chart_link
from common.logger import init_logging
from common.options import combined_options_df
from common.reporting import build_links_in_markdown
def select_strikes_for(
options_df,
selected_expiry,
option_type,
additional_filters,
sort_criteria,
fetch_limit,
):
option_query = f"(expiration_date == '{selected_expiry}') and (option_type == '{option_type}') and {additional_filters}"
return (
options_df.query(option_query).sort_values(**sort_criteria).head(n=fetch_limit)
)
def filter_strikes(options_df, selected_expiry, delta=0.3):
selected_call_strikes = select_strikes_for(
options_df,
selected_expiry,
option_type="call",
additional_filters=f"(greeks_delta < {delta})",
sort_criteria=dict(by="greeks_delta", ascending=False),
fetch_limit=1,
)
selected_put_strikes = select_strikes_for(
options_df,
selected_expiry,
option_type="put",
additional_filters=f"(greeks_delta > -{delta})",
sort_criteria=dict(by="greeks_delta", ascending=True),
fetch_limit=1,
)
return (
selected_call_strikes.iloc[0].to_dict(),
selected_put_strikes.iloc[0].to_dict(),
)
def collect_strikes(options_df):
unique_expiries = options_df.expiration_date.unique()
selected_strikes = [
filter_strikes(options_df, unique_expiries[0]),
filter_strikes(options_df, unique_expiries[1]),
filter_strikes(options_df, unique_expiries[2]),
]
return selected_strikes
def build_options_trade_info(ticker, options_df):
selected_strikes = collect_strikes(options_df)
referrer = "@thetagangbot"
m = ["_Possible trades_"]
for idx, (call_strike_record, put_strike_record) in enumerate(selected_strikes):
selected_expiry = call_strike_record.get("expiration_date")
call_strike = call_strike_record.get("strike")
call_credit = call_strike_record.get("bid")
put_strike = put_strike_record.get("strike")
put_delta = put_strike_record.get("greeks_delta")
put_credit = put_strike_record.get("bid")
put_break_even = put_strike - put_credit
short_hand_date = datetime.strptime(selected_expiry, "%Y-%m-%d").strftime(
"%y%m%d"
)
short_put_link = f"https://optionstrat.com/build/short-put/{ticker}/-{short_hand_date}P{put_strike}?referral={referrer}"
short_strangle_credit = call_credit + put_credit
strangle_break_even = "(${} <-> ${})".format(
put_strike - short_strangle_credit, call_strike + short_strangle_credit
)
short_strangle_link = f"https://optionstrat.com/build/short-strangle/{ticker}/-{short_hand_date}P{put_strike},-{short_hand_date}C{call_strike}?referral={referrer}"
time_emoji_msg = (idx + 1) * "🕐"
m.append(
f"{time_emoji_msg} *Expiry* {selected_expiry} [Short Put]({short_put_link}) *Strike* ${put_strike}, *Delta* {put_delta}, *Credit* ${'%0.2f' % (put_credit * 100)} *Breakeven* ${put_break_even}"
)
m.append(
f"{time_emoji_msg} *Expiry* {selected_expiry} [Short Strangle]({short_strangle_link}) *Strikes* (${put_strike} <-> ${call_strike}), *Credit* ${'%0.2f' % (short_strangle_credit * 100)}, *Breakeven* {strangle_break_even}"
)
m.append(os.linesep)
return os.linesep.join(m)
def populate_additional_info(ticker):
options_df = combined_options_df(ticker, expiries=3)
options_trade_info = build_options_trade_info(ticker, options_df)
return options_trade_info
def build_response_message(ticker):
logging.info("Processing ticker: {}".format(ticker))
daily_chart_link = build_chart_link(ticker)
sites_urls = build_links_in_markdown(ticker)
additional_info = populate_additional_info(ticker)
disclaimer = "_ Disclaimer: Not financial advice _"
return (
daily_chart_link,
sites_urls + os.linesep + additional_info + os.linesep + disclaimer,
)
def generate_report(ticker, update: Update, context: CallbackContext):
bot = context.bot
cid = update.effective_chat.id
update.message.reply_text(f"Looking up #{ticker}", quote=True)
try:
chart_file, full_message = build_response_message(ticker)
bot.send_photo(cid, chart_file)
bot.send_message(
cid, full_message, disable_web_page_preview=True, parse_mode="Markdown"
)
except (NameError, AttributeError) as e:
bot.send_message(cid, str(e))
def handle_cmd(update: Update, context: CallbackContext) -> None:
maybe_symbol: str = update.message.text
if maybe_symbol.startswith("$"):
ticker = maybe_symbol[1:]
generate_report(ticker, update, context)
def main():
"""Start the bot."""
logging.info("Starting tele-theta-gang bot")
updater = Updater(TELEGRAM_THETA_GANG_BOT, use_context=True)
dispatcher = updater.dispatcher
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("help", help_command))
dispatcher.add_handler(MessageHandler(Filters.text & ~Filters.command, handle_cmd))
updater.start_polling()
updater.idle()
def run_once(ticker):
# Testing
m = populate_additional_info(ticker)
print(m)
exit(-1)
if __name__ == "__main__":
init_logging()
# run_once("GBTC") # - Enable for testing
main()
|
[
"575441+namuan@users.noreply.github.com"
] |
575441+namuan@users.noreply.github.com
|
df02039395f5072d5716d3dda7922462f52db60f
|
09e57dd1374713f06b70d7b37a580130d9bbab0d
|
/benchmark/startQiskit_QC35.py
|
47df0d95aeb43e7c3b36125d2db8b174fe628db7
|
[
"BSD-3-Clause"
] |
permissive
|
UCLA-SEAL/QDiff
|
ad53650034897abb5941e74539e3aee8edb600ab
|
d968cbc47fe926b7f88b4adf10490f1edd6f8819
|
refs/heads/main
| 2023-08-05T04:52:24.961998
| 2021-09-19T02:56:16
| 2021-09-19T02:56:16
| 405,159,939
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,509
|
py
|
# qubit number=3
# total number=9
import numpy as np
from qiskit import QuantumCircuit, execute, Aer, QuantumRegister, ClassicalRegister, transpile, BasicAer, IBMQ
import networkx as nx
from qiskit.visualization import plot_histogram
from typing import *
from pprint import pprint
from math import log2
from collections import Counter
from qiskit.test.mock import FakeVigo, FakeYorktown
kernel = 'circuit/bernstein'
def make_circuit(n:int) -> QuantumCircuit:
# circuit begin
input_qubit = QuantumRegister(n,"qc")
prog = QuantumCircuit(input_qubit)
prog.h(input_qubit[0]) # number=1
prog.h(input_qubit[1]) # number=2
prog.h(input_qubit[2]) # number=3
prog.h(input_qubit[3]) # number=4
for edge in E:
k = edge[0]
l = edge[1]
prog.cp(-2 * gamma, input_qubit[k-1], input_qubit[l-1])
prog.p(gamma, k)
prog.p(gamma, l)
prog.rx(2 * beta, range(len(V)))
prog.swap(input_qubit[3],input_qubit[0]) # number=5
prog.swap(input_qubit[3],input_qubit[0]) # number=6
prog.x(input_qubit[1]) # number=7
prog.x(input_qubit[1]) # number=8
# circuit end
return prog
if __name__ == '__main__':
n = 4
V = np.arange(0, n, 1)
E = [(0, 1, 1.0), (0, 2, 1.0), (1, 2, 1.0), (3, 2, 1.0), (3, 1, 1.0)]
G = nx.Graph()
G.add_nodes_from(V)
G.add_weighted_edges_from(E)
step_size = 0.1
a_gamma = np.arange(0, np.pi, step_size)
a_beta = np.arange(0, np.pi, step_size)
a_gamma, a_beta = np.meshgrid(a_gamma, a_beta)
F1 = 3 - (np.sin(2 * a_beta) ** 2 * np.sin(2 * a_gamma) ** 2 - 0.5 * np.sin(4 * a_beta) * np.sin(4 * a_gamma)) * (
1 + np.cos(4 * a_gamma) ** 2)
result = np.where(F1 == np.amax(F1))
a = list(zip(result[0], result[1]))[0]
gamma = a[0] * step_size
beta = a[1] * step_size
prog = make_circuit(4)
sample_shot =3962
writefile = open("../data/startQiskit_QC35.csv", "w")
# prog.draw('mpl', filename=(kernel + '.png'))
IBMQ.load_account()
provider = IBMQ.get_provider(hub='ibm-q')
provider.backends()
backend = provider.get_backend("ibmq_5_yorktown")
circuit1 = transpile(prog, FakeYorktown())
circuit1.measure_all()
prog = circuit1
info = execute(prog,backend=backend, shots=sample_shot).result().get_counts()
print(info, file=writefile)
print("results end", file=writefile)
print(circuit1.depth(), file=writefile)
print(circuit1, file=writefile)
writefile.close()
|
[
"wangjiyuan123@yeah.net"
] |
wangjiyuan123@yeah.net
|
5c1262d982e0f6f45641b15959d0970ac3994a78
|
312d40d6023858891dd32bda67579f7284a54c15
|
/29/00/add_history.py
|
2bd0fedf2437bfc094faa83500c7f3938f0ff799
|
[
"CC0-1.0"
] |
permissive
|
pylangstudy/201708
|
b67a49f091f5f949954e7b9a910a07761fe9a7d1
|
126b1af96a1d1f57522d5a1d435b58597bea2e57
|
refs/heads/master
| 2021-01-01T20:49:15.973299
| 2017-08-31T00:18:55
| 2017-08-31T00:18:55
| 98,936,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,005
|
py
|
import readline
import datetime
history_file = 'history_file.txt'
readline.set_history_length(1000)
readline.read_history_file(history_file)
print(f'readline.get_history_length():{readline.get_history_length()}')
#ファイル保存はせず、履歴に追加する
for i in range(5): readline.add_history(f'{datetime.datetime.now():%Y-%m-%d %H:%M:%S.%f}')
readline.parse_and_bind('tab: complete')
readline.parse_and_bind('set editing-mode vi')
while True:
line = input('Prompt ("stop" to quit): ')
if line == 'stop':
break
print('ENTERED: "%s"' % line)
readline.append_history_file(1, history_file)
index = readline.get_current_history_length() - 1
# readline.replace_history_item(index, f'{datetime.datetime.now():%Y-%m-%d %H:%M:%S.%f}' + readline.get_history_item(index))#入力直後のを置き換えたかったのに、その1つ前のになってしまう……
readline.replace_history_item(index, f'{datetime.datetime.now():%Y-%m-%d %H:%M:%S.%f}\t' + line)
|
[
"pylangstudy@yahoo.co.jp"
] |
pylangstudy@yahoo.co.jp
|
a76bf89cd9ee66550c29ffffd5bd08c7c8ea0fbd
|
3cb6bf94471cf493963c62103bffd3522d432a1e
|
/backend/users/migrations/0002_auto_20210110_2318.py
|
5bf0cc03769bacd76deade31efff899f15fa33a4
|
[] |
no_license
|
crowdbotics-apps/triss-tv-app-23760
|
be8603d67beacf744474d8bf193d629188e209a4
|
a2d50e494b7b601657d70b970d6d161ba3c4bd3f
|
refs/heads/master
| 2023-02-13T14:18:11.525879
| 2021-01-10T23:19:06
| 2021-01-10T23:19:06
| 328,499,013
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 894
|
py
|
# Generated by Django 2.2.17 on 2021-01-10 23:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('users', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='user',
name='email',
field=models.EmailField(blank=True, max_length=254, null=True),
),
migrations.AlterField(
model_name='user',
name='first_name',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='user',
name='last_name',
field=models.TextField(blank=True, null=True),
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=255, null=True),
),
]
|
[
"team@crowdbotics.com"
] |
team@crowdbotics.com
|
3c8c38ea7cc58deaec59f3c9d44ae9a8059ae81c
|
367d2670c75d385d122bca60b9f550ca5b3888c1
|
/gem5/env/lib/python3.6/site-packages/celery/security/utils.py
|
4714a945c6eed54165b302ff53b82d81957f194c
|
[
"BSD-3-Clause",
"LicenseRef-scancode-proprietary-license",
"LGPL-2.0-or-later",
"MIT"
] |
permissive
|
Anish-Saxena/aqua_rowhammer_mitigation
|
4f060037d50fb17707338a6edcaa0ac33c39d559
|
3fef5b6aa80c006a4bd6ed4bedd726016142a81c
|
refs/heads/main
| 2023-04-13T05:35:20.872581
| 2023-01-05T21:10:39
| 2023-01-05T21:10:39
| 519,395,072
| 4
| 3
|
Unlicense
| 2023-01-05T21:10:40
| 2022-07-30T02:03:02
|
C++
|
UTF-8
|
Python
| false
| false
| 845
|
py
|
"""Utilities used by the message signing serializer."""
import sys
from contextlib import contextmanager
import cryptography.exceptions
from cryptography.hazmat.primitives import hashes
from celery.exceptions import SecurityError, reraise
__all__ = ('get_digest_algorithm', 'reraise_errors',)
def get_digest_algorithm(digest='sha256'):
"""Convert string to hash object of cryptography library."""
assert digest is not None
return getattr(hashes, digest.upper())()
@contextmanager
def reraise_errors(msg='{0!r}', errors=None):
"""Context reraising crypto errors as :exc:`SecurityError`."""
errors = (cryptography.exceptions,) if errors is None else errors
try:
yield
except errors as exc:
reraise(SecurityError,
SecurityError(msg.format(exc)),
sys.exc_info()[2])
|
[
"asaxena317@krishna-srv4.ece.gatech.edu"
] |
asaxena317@krishna-srv4.ece.gatech.edu
|
c88e2f5ce525712cffc7291eb254f0f2c831710b
|
0499622f93b196f828db7443c05e029fb07f7819
|
/miic/src/app/course/serializers.py
|
eba8782401a4b63095df45533942a46179813cc0
|
[] |
no_license
|
kradalby/enigma
|
ba868d09fb86151de8bab60f2e8c96621d96817c
|
1be944ae0a731fc39fdcb2733661520060c8b8f9
|
refs/heads/master
| 2021-03-22T05:00:31.626446
| 2018-12-21T11:47:27
| 2018-12-21T11:47:27
| 88,087,880
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 199
|
py
|
from rest_framework import serializers
from .models import Course
class CourseSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Course
field = '__all__'
|
[
"kradalby@kradalby.no"
] |
kradalby@kradalby.no
|
ec374bf3fdb296ed277d629d332a16e10b662344
|
390a705b795ce223a932cb9fed39f28dcb96933a
|
/fts/test_ph4_4addcommittee.py
|
c0ce75fb2fc9e4ab0d706f3e4bc2fd4326cf9874
|
[
"LicenseRef-scancode-public-domain"
] |
permissive
|
DonaldMcC/gdms
|
d74765b4c2356b709adf44438fe929f1e848117a
|
ee427c28435c9fc76b5c81835fd749789529dd1a
|
refs/heads/master
| 2021-06-22T17:36:08.806735
| 2021-06-18T18:35:39
| 2021-06-18T18:35:39
| 30,919,501
| 6
| 3
| null | 2016-05-30T21:09:09
| 2015-02-17T14:05:53
|
Python
|
UTF-8
|
Python
| false
| false
| 4,258
|
py
|
# These tests are all based on the tutorial at http://killer-web-development.com/
# if registration is successful this may work but lets
# try and get user logged in first
from functional_tests import FunctionalTest, ROOT, USERS
import time
from selenium.webdriver.support.ui import WebDriverWait
class AddEvent (FunctionalTest):
def setUp(self):
self.url = ROOT + '/default/user/login'
get_browser = self.browser.get(self.url)
mailstring = USERS['USER1'] + '@user.com'
email = WebDriverWait(self, 10).until(lambda self: self.browser.find_element_by_name("email"))
email.send_keys(mailstring)
password = self.browser.find_element_by_name("password")
password.send_keys(USERS['PASSWORD1'])
submit_button = self.browser.find_element_by_css_selector("#submit_record__row input")
submit_button.click()
time.sleep(1)
self.url = ROOT + '/admin/access_group'
get_browser = self.browser.get(self.url)
time.sleep(1)
def test_has_right_heading(self):
body = self.browser.find_element_by_tag_name('body')
self.assertIn('Access Group Maintenance', body.text)
def test4(self):
toclick = WebDriverWait(self, 12).until(lambda self: self.browser.find_element_by_css_selector("span.buttontext.button"))
toclick.click()
time.sleep(2)
self.browser.find_element_by_id("access_group_group_name").clear()
self.browser.find_element_by_id("access_group_group_name").send_keys("Committee")
self.browser.find_element_by_id("access_group_group_desc").clear()
self.browser.find_element_by_id("access_group_group_desc").send_keys("This is an admin appointed committee group")
self.browser.find_element_by_id("access_group_group_type").send_keys("admin")
self.browser.find_element_by_css_selector("input.btn.btn-primary").click()
time.sleep(2)
body = self.browser.find_element_by_tag_name('body')
self.assertIn('committee', body.text)
def test_adduser2fromgrid(self):
self.url = ROOT + '/admin/group_members'
get_browser = self.browser.get(self.url)
time.sleep(1)
toclick = WebDriverWait(self, 12).until(lambda self: self.browser.find_element_by_css_selector("span.buttontext.button"))
toclick.click()
time.sleep(5)
self.browser.find_element_by_id("group_members_access_group").send_keys("committee")
self.browser.find_element_by_id("group_members_auth_userid").send_keys("Testuser2")
self.browser.find_element_by_css_selector("input.btn.btn-primary").click()
time.sleep(2)
body = self.browser.find_element_by_tag_name('body')
self.assertIn('user2', body.text)
def test_adduser3fromgrid(self):
self.url = ROOT + '/admin/group_members'
get_browser = self.browser.get(self.url)
time.sleep(1)
toclick = WebDriverWait(self, 12).until(lambda self: self.browser.find_element_by_css_selector("span.buttontext.button"))
toclick.click()
time.sleep(5)
self.browser.find_element_by_id("group_members_access_group").send_keys("committee")
self.browser.find_element_by_id("group_members_auth_userid").send_keys("Testuser3")
self.browser.find_element_by_css_selector("input.btn.btn-primary").click()
time.sleep(2)
body = self.browser.find_element_by_tag_name('body')
self.assertIn('user3', body.text)
def test_adduser4fromgrid(self):
self.url = ROOT + '/admin/group_members'
get_browser = self.browser.get(self.url)
time.sleep(1)
toclick = WebDriverWait(self, 12).until(lambda self: self.browser.find_element_by_css_selector("span.buttontext.button"))
toclick.click()
time.sleep(5)
self.browser.find_element_by_id("group_members_access_group").send_keys("committee")
self.browser.find_element_by_id("group_members_auth_userid").send_keys("Testuser4")
self.browser.find_element_by_css_selector("input.btn.btn-primary").click()
time.sleep(2)
body = self.browser.find_element_by_tag_name('body')
self.assertIn('user4', body.text)
|
[
"donaldm2020@gmail.com"
] |
donaldm2020@gmail.com
|
a7c91ccdca3907968a379edf1b5c5e93736b9841
|
1aaba2be0479b43a76f3e85ea62cad8d42827d49
|
/lib/pymedphys/_experimental/streamlit/apps/wlutz/_config.py
|
729cb4145a1f603bd63282aa604a23adbd1edcdf
|
[
"Apache-2.0"
] |
permissive
|
changran/pymedphys
|
a44a9aa9ec375c17ea73815c1a8e2a6a5a002c1e
|
164a7a5c6051ab4c8fd6efdb79c3bfb0684b65df
|
refs/heads/main
| 2023-07-30T21:32:07.697743
| 2021-09-10T11:37:02
| 2021-09-10T11:37:02
| 407,394,958
| 1
| 0
|
Apache-2.0
| 2021-09-17T03:42:49
| 2021-09-17T03:42:48
| null |
UTF-8
|
Python
| false
| false
| 426
|
py
|
import pathlib
import pymedphys
from pymedphys._streamlit.utilities import config as st_config
def download_demo_data():
cwd = pathlib.Path.cwd()
pymedphys.zip_data_paths("wlutz-demo-files.zip", extract_directory=cwd)
return cwd.joinpath("wlutz-demo-files")
def get_config(demo_mode):
if demo_mode:
path = download_demo_data()
else:
path = None
return st_config.get_config(path)
|
[
"me@simonbiggs.net"
] |
me@simonbiggs.net
|
699b4704ebf635c6003115c3036eecf1060a2e4c
|
45ba55b4fbdaf1657fde92beaeba4f173265afcd
|
/strawberry/experimental/pydantic/_compat.py
|
acc666f6203a928f1eb2d7b33d69d2381976ca54
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
strawberry-graphql/strawberry
|
af96afd4edd1788c59e150597a12501fbc7bf444
|
6d86d1c08c1244e00535840d9d87925431bc6a1c
|
refs/heads/main
| 2023-08-30T03:34:12.929874
| 2023-08-24T12:01:09
| 2023-08-24T12:01:09
| 162,690,887
| 3,408
| 529
|
MIT
| 2023-09-14T21:49:44
| 2018-12-21T08:56:55
|
Python
|
UTF-8
|
Python
| false
| false
| 3,165
|
py
|
import dataclasses
from dataclasses import dataclass
from typing import TYPE_CHECKING, Any, Callable, Dict, Optional, Type
import pydantic
from pydantic import BaseModel
from pydantic.version import VERSION as PYDANTIC_VERSION
if TYPE_CHECKING:
from pydantic.fields import FieldInfo
IS_PYDANTIC_V2: bool = PYDANTIC_VERSION.startswith("2.")
IS_PYDANTIC_V1: bool = not IS_PYDANTIC_V2
@dataclass
class CompatModelField:
name: str
type_: Any
outer_type_: Any
default: Any
default_factory: Optional[Callable[[], Any]]
required: bool
alias: Optional[str]
allow_none: bool
has_alias: bool
description: Optional[str]
if pydantic.VERSION[0] == "2":
from typing_extensions import get_args, get_origin
from pydantic._internal._typing_extra import is_new_type
from pydantic._internal._utils import lenient_issubclass, smart_deepcopy
from pydantic_core import PydanticUndefined
PYDANTIC_MISSING_TYPE = PydanticUndefined
def new_type_supertype(type_: Any) -> Any:
return type_.__supertype__
def get_model_fields(model: Type[BaseModel]) -> Dict[str, CompatModelField]:
field_info: dict[str, FieldInfo] = model.model_fields
new_fields = {}
# Convert it into CompatModelField
for name, field in field_info.items():
new_fields[name] = CompatModelField(
name=name,
type_=field.annotation,
outer_type_=field.annotation,
default=field.default,
default_factory=field.default_factory,
required=field.is_required(),
alias=field.alias,
# v2 doesn't have allow_none
allow_none=False,
has_alias=field is not None,
description=field.description,
)
return new_fields
else:
from pydantic.typing import ( # type: ignore[no-redef]
get_args,
get_origin,
is_new_type,
new_type_supertype,
)
from pydantic.utils import ( # type: ignore[no-redef]
lenient_issubclass,
smart_deepcopy,
)
PYDANTIC_MISSING_TYPE = dataclasses.MISSING # type: ignore[assignment]
def get_model_fields(model: Type[BaseModel]) -> Dict[str, CompatModelField]:
new_fields = {}
# Convert it into CompatModelField
for name, field in model.__fields__.items(): # type: ignore[attr-defined]
new_fields[name] = CompatModelField(
name=name,
type_=field.type_,
outer_type_=field.outer_type_,
default=field.default,
default_factory=field.default_factory,
required=field.required,
alias=field.alias,
allow_none=field.allow_none,
has_alias=field.has_alias,
description=field.field_info.description,
)
return new_fields
__all__ = [
"smart_deepcopy",
"lenient_issubclass",
"get_args",
"get_origin",
"is_new_type",
"new_type_supertype",
"get_model_fields",
"PYDANTIC_MISSING_TYPE",
]
|
[
"noreply@github.com"
] |
strawberry-graphql.noreply@github.com
|
b5ee6d5a3b546ae9bfd3fe883befddb05375e281
|
4324d19af69080f45ff60b733c940f7dc1aa6dae
|
/google-ads-python/google/ads/google_ads/v0/proto/resources/topic_view_pb2.py
|
8479d48ea067e902b438526e2c67da3d9c2985f8
|
[
"Apache-2.0",
"LicenseRef-scancode-generic-cla"
] |
permissive
|
ljborton/Forked_Work
|
cc8a3813c146ea4547aca9caeb03e649bbdb9076
|
7aaf67af8d9f86f9dc0530a1ad23951bcb535c92
|
refs/heads/master
| 2023-07-19T22:26:48.085129
| 2019-11-27T02:53:51
| 2019-11-27T02:53:51
| 224,321,748
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| true
| 3,068
|
py
|
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v0/proto/resources/topic_view.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v0/proto/resources/topic_view.proto',
package='google.ads.googleads.v0.resources',
syntax='proto3',
serialized_options=_b('\n%com.google.ads.googleads.v0.resourcesB\016TopicViewProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v0/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V0.Resources\312\002!Google\\Ads\\GoogleAds\\V0\\Resources\352\002%Google::Ads::GoogleAds::V0::Resources'),
serialized_pb=_b('\n8google/ads/googleads_v0/proto/resources/topic_view.proto\x12!google.ads.googleads.v0.resources\"\"\n\tTopicView\x12\x15\n\rresource_name\x18\x01 \x01(\tB\xfb\x01\n%com.google.ads.googleads.v0.resourcesB\x0eTopicViewProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v0/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V0.Resources\xca\x02!Google\\Ads\\GoogleAds\\V0\\Resources\xea\x02%Google::Ads::GoogleAds::V0::Resourcesb\x06proto3')
)
_TOPICVIEW = _descriptor.Descriptor(
name='TopicView',
full_name='google.ads.googleads.v0.resources.TopicView',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v0.resources.TopicView.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=95,
serialized_end=129,
)
DESCRIPTOR.message_types_by_name['TopicView'] = _TOPICVIEW
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TopicView = _reflection.GeneratedProtocolMessageType('TopicView', (_message.Message,), dict(
DESCRIPTOR = _TOPICVIEW,
__module__ = 'google.ads.googleads_v0.proto.resources.topic_view_pb2'
,
__doc__ = """A topic view.
Attributes:
resource_name:
The resource name of the topic view. Topic view resource names
have the form: ``customers/{customer_id}/topicViews/{ad_group
_id}_{criterion_id}``
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v0.resources.TopicView)
))
_sym_db.RegisterMessage(TopicView)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
|
[
"noreply@github.com"
] |
ljborton.noreply@github.com
|
0c7a2e5aca3dd8f431def73dd1ccb655570c9897
|
84297380d00453e71f65c591dca046bd41a32184
|
/ABC/ABC165/A.py
|
52ca6878fd2f4b0799ac8c12ddf0293950e1d009
|
[] |
no_license
|
daiki1998/atcoder
|
a5ef25245b1bbc3a5e33044846a3c16213603bd3
|
d864a7cb11e41dbf6a691f5d128fdfe122b07046
|
refs/heads/main
| 2023-03-06T22:55:29.863716
| 2021-02-18T12:01:24
| 2021-02-18T12:01:24
| 323,401,954
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 135
|
py
|
K = int(input())
A, B = map(int, input().split())
res = "NG"
for i in range(B+1):
if A <= K*i <= B:
res = "OK"
print(res)
|
[
"shimokawadaiki@shimokawadaikinoMacBook-Pro.local"
] |
shimokawadaiki@shimokawadaikinoMacBook-Pro.local
|
c25fc95d3fd80a2aaae02c8f79e75ce5c24a2c92
|
4f8a6af26d8220238e608577202ffc88c14f403e
|
/pedido/migrations/0002_remove_pedido_numero.py
|
e3535aa67a570fbec3c9f9f6c9be5a3d236b2e69
|
[] |
no_license
|
ryujiin/dc
|
87338fb9820d8768391ea65c5b6cc1a6ea94db4f
|
b6f5b6a8624611f6513fd581457f171783800935
|
refs/heads/master
| 2021-01-01T06:33:38.940622
| 2015-08-24T02:42:55
| 2015-08-24T02:42:55
| 31,668,034
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 340
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('pedido', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='pedido',
name='numero',
),
]
|
[
"ryujiin22@gmail.com"
] |
ryujiin22@gmail.com
|
a92c0a10aeeed0a16c66d9b710b73c346446f7a8
|
d99eba79ac942f4ca4dbc3187ef97f593d6dbc46
|
/anatomy_tagging/migrations/0019_auto_constraints.py
|
b6c370a32b240276bc6d4139c4420397cab504f0
|
[] |
no_license
|
adaptive-learning/anatomy-tagging
|
6028b0749b54efaac7d32738959b5eaf4d78f0bd
|
46561468d96c5cc9cc4c6c9b093b27cea69b65b6
|
refs/heads/master
| 2020-04-10T01:47:14.149395
| 2017-06-19T17:08:36
| 2017-06-19T17:08:36
| 30,142,622
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,569
|
py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('anatomy_tagging', '0018_auto_composite_relation_type'),
]
operations = [
migrations.AlterField(
model_name='term',
name='body_part',
field=models.CharField(default=b'', max_length=10, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='term',
name='name_cs',
field=models.TextField(max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='term',
name='name_en',
field=models.TextField(max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='term',
name='name_la',
field=models.TextField(max_length=200, null=True, blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='term',
name='parent',
field=models.ForeignKey(blank=True, to='anatomy_tagging.Term', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='term',
name='system',
field=models.CharField(default=b'', max_length=30, null=True, blank=True),
preserve_default=True,
),
]
|
[
"jan.papousek@gmail.com"
] |
jan.papousek@gmail.com
|
46c93d3cf171f2c155c9f767abf4d8d7896ab549
|
890c8b8e90e516a5a3880eca9b2d217662fe7d84
|
/armulator/armv6/opcodes/abstract_opcodes/ldrh_immediate_arm.py
|
82362526630b6123504a3ea2f746261e0e88d05c
|
[
"MIT"
] |
permissive
|
doronz88/armulator
|
b864135996f876c7857b79a314d4aa06cc19c549
|
0294feac2785c8947e5943ac0c34f941ee4b5fff
|
refs/heads/master
| 2022-11-05T08:14:42.405335
| 2020-06-18T23:53:17
| 2020-06-18T23:53:17
| 273,363,061
| 2
| 0
| null | 2020-06-18T23:51:03
| 2020-06-18T23:51:02
| null |
UTF-8
|
Python
| false
| false
| 1,383
|
py
|
from armulator.armv6.opcodes.abstract_opcode import AbstractOpcode
from armulator.armv6.bits_ops import add as bits_add, sub as bits_sub, zero_extend
from bitstring import BitArray
class LdrhImmediateArm(AbstractOpcode):
def __init__(self, add, wback, index, imm32, t, n):
super(LdrhImmediateArm, self).__init__()
self.add = add
self.wback = wback
self.index = index
self.imm32 = imm32
self.t = t
self.n = n
def execute(self, processor):
if processor.condition_passed():
offset_addr = bits_add(processor.registers.get(self.n), self.imm32, 32) if self.add else bits_sub(
processor.registers.get(self.n), self.imm32, 32)
address = offset_addr if self.index else processor.registers.get(self.n)
data = processor.mem_u_get(address, 2)
if self.wback:
processor.registers.set(self.n, offset_addr)
if processor.unaligned_support() or not address[31]:
processor.registers.set(self.t, zero_extend(data, 32))
else:
processor.registers.set(self.t, BitArray(length=32)) # unknown
def instruction_syndrome(self):
if self.t == 15 or self.wback:
return BitArray(length=9)
else:
return BitArray(bin="10100") + BitArray(uint=self.t, length=4)
|
[
"matan1008@gmail.com"
] |
matan1008@gmail.com
|
de1e554cbb1e97d15a561b0089ad7efb7e68848a
|
f9d564f1aa83eca45872dab7fbaa26dd48210d08
|
/huaweicloud-sdk-codeartsdeploy/huaweicloudsdkcodeartsdeploy/v2/model/env_execution_body.py
|
b1206af38ed6c9f476f85bc485956faecbbe6825
|
[
"Apache-2.0"
] |
permissive
|
huaweicloud/huaweicloud-sdk-python-v3
|
cde6d849ce5b1de05ac5ebfd6153f27803837d84
|
f69344c1dadb79067746ddf9bfde4bddc18d5ecf
|
refs/heads/master
| 2023-09-01T19:29:43.013318
| 2023-08-31T08:28:59
| 2023-08-31T08:28:59
| 262,207,814
| 103
| 44
|
NOASSERTION
| 2023-06-22T14:50:48
| 2020-05-08T02:28:43
|
Python
|
UTF-8
|
Python
| false
| false
| 5,498
|
py
|
# coding: utf-8
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class EnvExecutionBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'params': 'list[DynamicConfigInfo]',
'record_id': 'str',
'trigger_source': 'str'
}
attribute_map = {
'params': 'params',
'record_id': 'record_id',
'trigger_source': 'trigger_source'
}
def __init__(self, params=None, record_id=None, trigger_source=None):
"""EnvExecutionBody
The model defined in huaweicloud sdk
:param params: 部署应用时传递的参数
:type params: list[:class:`huaweicloudsdkcodeartsdeploy.v2.DynamicConfigInfo`]
:param record_id: 应用的部署id,可通过record_id回滚至之前的部署状态。选中应用历史部署记录,在URL中获取
:type record_id: str
:param trigger_source: 限制触发来源,0不限制任何部署请求来源,1时只允许通过流水线触发部署
:type trigger_source: str
"""
self._params = None
self._record_id = None
self._trigger_source = None
self.discriminator = None
if params is not None:
self.params = params
if record_id is not None:
self.record_id = record_id
if trigger_source is not None:
self.trigger_source = trigger_source
@property
def params(self):
"""Gets the params of this EnvExecutionBody.
部署应用时传递的参数
:return: The params of this EnvExecutionBody.
:rtype: list[:class:`huaweicloudsdkcodeartsdeploy.v2.DynamicConfigInfo`]
"""
return self._params
@params.setter
def params(self, params):
"""Sets the params of this EnvExecutionBody.
部署应用时传递的参数
:param params: The params of this EnvExecutionBody.
:type params: list[:class:`huaweicloudsdkcodeartsdeploy.v2.DynamicConfigInfo`]
"""
self._params = params
@property
def record_id(self):
"""Gets the record_id of this EnvExecutionBody.
应用的部署id,可通过record_id回滚至之前的部署状态。选中应用历史部署记录,在URL中获取
:return: The record_id of this EnvExecutionBody.
:rtype: str
"""
return self._record_id
@record_id.setter
def record_id(self, record_id):
"""Sets the record_id of this EnvExecutionBody.
应用的部署id,可通过record_id回滚至之前的部署状态。选中应用历史部署记录,在URL中获取
:param record_id: The record_id of this EnvExecutionBody.
:type record_id: str
"""
self._record_id = record_id
@property
def trigger_source(self):
"""Gets the trigger_source of this EnvExecutionBody.
限制触发来源,0不限制任何部署请求来源,1时只允许通过流水线触发部署
:return: The trigger_source of this EnvExecutionBody.
:rtype: str
"""
return self._trigger_source
@trigger_source.setter
def trigger_source(self, trigger_source):
"""Sets the trigger_source of this EnvExecutionBody.
限制触发来源,0不限制任何部署请求来源,1时只允许通过流水线触发部署
:param trigger_source: The trigger_source of this EnvExecutionBody.
:type trigger_source: str
"""
self._trigger_source = trigger_source
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, EnvExecutionBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"hwcloudsdk@huawei.com"
] |
hwcloudsdk@huawei.com
|
50688ed44d8bd1d4a1ef450b21069878a4a36032
|
0fccee4c738449f5e0a8f52ea5acabf51db0e910
|
/genfragments/EightTeV/EstarToEG_L10000_M2400_8TeV_pythia8_cff.py
|
5391ffb60a29f8eaa4a3b41e7b1861224dd1978b
|
[] |
no_license
|
cms-sw/genproductions
|
f308ffaf3586c19b29853db40e6d662e937940ff
|
dd3d3a3826343d4f75ec36b4662b6e9ff1f270f4
|
refs/heads/master
| 2023-08-30T17:26:02.581596
| 2023-08-29T14:53:43
| 2023-08-29T14:53:43
| 11,424,867
| 69
| 987
| null | 2023-09-14T12:41:28
| 2013-07-15T14:18:33
|
Python
|
UTF-8
|
Python
| false
| false
| 874
|
py
|
import FWCore.ParameterSet.Config as cms
generator = cms.EDFilter("Pythia8GeneratorFilter",
maxEventsToPrint = cms.untracked.int32(0),
pythiaPylistVerbosity = cms.untracked.int32(1),
pythiaHepMCVerbosity = cms.untracked.bool(False),
filterEfficiency = cms.untracked.double(1.),
comEnergy = cms.double(8000.0),
crossSection = cms.untracked.double(0.000008795),
PythiaParameters = cms.PSet(
processParameters = cms.vstring(
'Tune:pp 5',
'ExcitedFermion:qqbar2eStare = on',
'ExcitedFermion:Lambda= 10000',
'4000011:onMode = off',
'4000011:onIfMatch = 11 22',
'4000011:m0 = 2400'
),
parameterSets = cms.vstring('processParameters')
)
)
|
[
"sha1-481a5a2ac9973b7cab990140e7a395466f50a31e@cern.ch"
] |
sha1-481a5a2ac9973b7cab990140e7a395466f50a31e@cern.ch
|
3789c4bb30efb30fcd70b94d844283827ef69b86
|
3f60f9e1081fd08824254006f7fd2fd1b4b56731
|
/POI/no_salary_dec_tree.py
|
c31cfe251192ddb24877623e1ef3d4958b723947
|
[] |
no_license
|
mihirkelkar/Text_Mining_Enron
|
0e37abfe7cc916b6f278ddd6bb72ac4b48283425
|
b7e2d59bc38bb62e68a61d2348aa5302f5cf5918
|
refs/heads/master
| 2020-04-15T21:49:42.673886
| 2015-04-18T00:03:58
| 2015-04-18T00:03:58
| 30,212,104
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,798
|
py
|
import matplotlib.pyplot as plt
import sys
import pickle
import sklearn
import time
import numpy as np
from data_sorter import featuresProcess
from data_sorter import split
from sklearn import cross_validation
from sklearn.tree import DecisionTreeClassifier
outliers = ["SKILLING JEFFREY K", "LAY KENNETH L", "FREVERT MARK A", "PICKERING MARK R"]
data = pickle.load(open("enron.pkl", "r"))
features = ["salary", "bonus"]
for ii in outliers:
del data[ii]
def get_information(key, total, data):
retval = list()
for ii in data:
if data[ii][key] == "NaN" or data[ii][total] == "NaN":
retval.append(0.0)
elif data[ii][key] >= 0:
retval.append(float(data[ii][key]) / float(data[ii][total]))
return retval
email_from_poi = get_information("from_poi_to_this_person", "to_messages", data)
email_to_poi = get_information("from_this_person_to_poi", "from_messages", data)
count = 0
for ii in data:
data[ii]["email_from_poi"] = email_from_poi[count]
data[ii]["email_to_poi"] = email_to_poi[count]
count += 1
features_list = ["poi", "bonus", "email_from_poi", "email_to_poi",'deferral_payments', 'total_payments']
dataset = featuresProcess(data, features_list)
labels, features = split(dataset)
features_train, features_test, labels_train, labels_test = cross_validation.train_test_split(features, labels, test_size=0.1, random_state=42)
time_start = time.time()
dtree = DecisionTreeClassifier()
dtree.fit(features_train, labels_train)
score = dtree.score(features_test, labels_test)
print "Accuracy ", score
print "Decesion tree took time : ", time.time() - time_start
feat_ranks = dtree.feature_importances_
indices = np.argsort(feat_ranks)[::-1]
for ii in range(4):
print "{} feature {} ({})".format(ii+1, features_list[ii + 1], feat_ranks[indices[ii]])
|
[
"kelkarmhr@gmail.com"
] |
kelkarmhr@gmail.com
|
1da5bc816ff092ff1efd02e934a8a810db7b83ba
|
f13acd0d707ea9ab0d2f2f010717b35adcee142f
|
/AtCoder_Virtual_Contest/20190105-ganariya2525/abc096/c.py
|
c0bd9f5ae1b4dfa37263a8c56a23310888195f2c
|
[
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
KATO-Hiro/AtCoder
|
126b9fe89fa3a7cffcbd1c29d42394e7d02fa7c7
|
bf43320bc1af606bfbd23c610b3432cddd1806b9
|
refs/heads/master
| 2023-08-18T20:06:42.876863
| 2023-08-17T23:45:21
| 2023-08-17T23:45:21
| 121,067,516
| 4
| 0
|
CC0-1.0
| 2023-09-14T21:59:38
| 2018-02-11T00:32:45
|
Python
|
UTF-8
|
Python
| false
| false
| 671
|
py
|
# -*- coding: utf-8 -*-
def main():
h, w = map(int, input().split())
board = [list(input()) for _ in range(h)]
dx = [0, 0, -1, 1]
dy = [-1, 1, 0, 0]
for i in range(h):
for k in range(w):
if board[i][k] == '#':
count = 0
for m in range(4):
x = i + dx[m]
y = k + dy[m]
if 0 <= x < h and 0 <= y < w and board[x][y] == '#':
count += 1
if count == 0:
print('No')
exit()
print('Yes')
if __name__ == '__main__':
main()
|
[
"k.hiro1818@gmail.com"
] |
k.hiro1818@gmail.com
|
38e50d3f24f37b25e2e10924332a53123811847a
|
49ba5356bdc5df7dd9803b56fe507c5164a90716
|
/shortest-unsorted-continuous-subarray/solution.py
|
68ff764fd997e9bff5e3f3237167c7e462af3c0e
|
[] |
no_license
|
uxlsl/leetcode_practice
|
d80ad481c9d8ee71cce0f3c66e98446ced149635
|
d8ed762d1005975f0de4f07760c9671195621c88
|
refs/heads/master
| 2021-04-25T18:12:28.136504
| 2020-03-11T07:54:15
| 2020-03-11T07:54:15
| 121,472,384
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 433
|
py
|
class Solution(object):
def findUnsortedSubarray(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
nums_s = sorted(nums)
i = 0
while i < len(nums) and nums_s[i] == nums[i]:
i += 1
j = len(nums) - 1
while 0 <= j and nums_s[j] == nums[j]:
j -= 1
if i < j:
return j - i + 1
else:
return 0
|
[
"songlin.lin@yunfangdata.com"
] |
songlin.lin@yunfangdata.com
|
06ab1173149ebf74e0df8962d78ccda462a2138a
|
1d7eec692553afc411ec1e7325634f71a2aed291
|
/backend/social_auth/urls.py
|
a8a59a7deeb00710a61c0465859ccf7d521cd772
|
[] |
no_license
|
Andy-Nkumane/Tilde
|
a41a2a65b3901b92263ae94d527de403f59a5caf
|
80de97edaf99f4831ca8cb989b93e3be5e09fdd6
|
refs/heads/develop
| 2023-05-09T10:02:41.240517
| 2021-05-28T09:20:51
| 2021-05-28T09:20:51
| 299,501,586
| 0
| 0
| null | 2020-10-25T22:37:30
| 2020-09-29T04:10:48
|
Python
|
UTF-8
|
Python
| false
| false
| 465
|
py
|
from django.urls import path
from . import views
urlpatterns = [
path(
"github_oauth_start/",
views.authorize_github_start,
name="authorize_github_start",
),
path(
"github_oauth_callback/",
views.authorize_github_callback,
name="authorize_github_callback",
),
path(
"oauth_one_time_token_auth/",
views.oauth_one_time_token_auth,
name="oauth_one_time_token_auth",
),
]
|
[
"sheena.oconnell@gmail.com"
] |
sheena.oconnell@gmail.com
|
1ea9b9d12aa2d29c8695d3633efd804228328fd3
|
498fcf34fa4482be5c9fefc488666e60edcf46c7
|
/math/0x00-linear_algebra/100-slice_like_a_ninja.py
|
528b04754521035294e7fdf6a8121f20db0302c4
|
[] |
no_license
|
MansourKef/holbertonschool-machine_learning
|
7dbc465def04c311c1afb0e8b8903cbe34c72ad3
|
19f78fc09f0ebeb9f27f3f76b98e7a0e9212fd22
|
refs/heads/main
| 2023-03-12T16:18:08.919099
| 2021-03-05T09:42:09
| 2021-03-05T09:42:09
| 317,303,125
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 435
|
py
|
#!/usr/bin/env python3
"""
np_slice.
"""
def np_slice(matrix, axes={}):
"""Slices a matrix."""
len_dic = len(axes)
args = []
list_axes = []
max_val = max(axes.keys())
for j in range(max_val + 1):
list_axes.append(j)
for i in list_axes:
if i in axes.keys():
sl = slice(*axes[i])
else:
sl = slice(None)
args.append(sl)
return matrix[tuple(args)]
|
[
"2798@holbertonschool.com"
] |
2798@holbertonschool.com
|
cb036973b6b10d9a97cf5c01c03aa9f66500336d
|
77de000e7d9f11a5c00ec8f6a9502c9c772bbe65
|
/paying_for_college/admin.py
|
c20f0c8dd6e545bd09c7cbbdf82b523da4dd7fad
|
[
"CC0-1.0"
] |
permissive
|
mistergone/college-costs
|
5081ceedfd2bb560adfb6ac412471d79dc3e4b80
|
7fcb9155d23f363d7d1a22da4df8887996c4b8a6
|
refs/heads/master
| 2021-01-17T07:52:45.351978
| 2016-05-27T18:21:01
| 2016-05-27T18:21:01
| 45,491,267
| 0
| 0
| null | 2015-11-03T19:45:04
| 2015-11-03T19:45:04
| null |
UTF-8
|
Python
| false
| false
| 1,661
|
py
|
#!/usr/bin/env python
from __future__ import absolute_import
from django.contrib import admin
from .models import School, Program, Alias, Nickname, Contact, Disclosure
from .models import BAHRate, Feedback, Worksheet, ConstantRate, ConstantCap
class DisclosureAdmin(admin.ModelAdmin):
list_display = ('name', 'institution', 'text')
class ConstantRateAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'value', 'updated')
list_editable = ['value']
class ConstantCapAdmin(admin.ModelAdmin):
list_display = ('name', 'slug', 'value', 'updated')
list_editable = ['value']
class SchoolAdmin(admin.ModelAdmin):
list_display = ('primary_alias',
'school_id',
'city',
'state',
'settlement_school')
list_filter = ('settlement_school', 'state')
list_editable = ('settlement_school',)
search_fields = ['school_id', 'city', 'state']
ordering = ['state']
class AliasAdmin(admin.ModelAdmin):
list_display = ('alias', 'institution', 'is_primary')
search_fields = ['alias']
class NicknameAdmin(admin.ModelAdmin):
list_display = ('nickname', 'institution', 'is_female')
search_fields = ['nickname']
admin.site.register(Disclosure, DisclosureAdmin)
admin.site.register(ConstantRate, ConstantRateAdmin)
admin.site.register(ConstantCap, ConstantCapAdmin)
admin.site.register(School, SchoolAdmin)
admin.site.register(Alias, AliasAdmin)
admin.site.register(BAHRate)
admin.site.register(Feedback)
admin.site.register(Worksheet)
admin.site.register(Contact)
admin.site.register(Nickname, NicknameAdmin)
admin.site.register(Program)
|
[
"higs4281@gmail.com"
] |
higs4281@gmail.com
|
f4bfb83c2408bd57b3ce899378717e5b6fe39407
|
5552380060fd8be832a61d1cc4020c16dde40452
|
/home/migrations/0004_bank_branch_file.py
|
244237a150562d68ccb2eac9f7f5cf4d1998f799
|
[] |
no_license
|
rayhancse08/LawChamber
|
a1f4121a911d92cdf7cd4c885767b2080fa5fbb0
|
40f2306d50bc9c2d2b7c08b44e527ab45a7dc756
|
refs/heads/master
| 2021-09-10T06:32:48.473590
| 2018-03-21T16:12:58
| 2018-03-21T16:12:58
| 125,860,987
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,087
|
py
|
# Generated by Django 2.0.3 on 2018-03-19 13:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('home', '0003_contact'),
]
operations = [
migrations.CreateModel(
name='Bank',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter a Bank name (e.g. Brack,DutchBangla)', max_length=200)),
],
),
migrations.CreateModel(
name='Branch',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter branch name', max_length=200, verbose_name='Branch Name')),
('address', models.CharField(help_text='Enter branch address', max_length=200, verbose_name='Branch Address')),
('bank', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.Bank')),
],
),
migrations.CreateModel(
name='File',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='Enter File Name', max_length=200, verbose_name='File Name')),
('bill', models.CharField(help_text='Enter Bill Amount', max_length=200, verbose_name='Bill Amount')),
('account_no', models.CharField(help_text='Enter Account No', max_length=200, verbose_name='Account No')),
('file', models.FileField(default=False, help_text='Upload File', upload_to='')),
('bank', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.Bank')),
('branch', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='home.Branch')),
],
),
]
|
[
"rayhancse08@gmail.com"
] |
rayhancse08@gmail.com
|
5ca7bb44464eb2b8bc4a97eed49157497214cee6
|
aced407b41f6669f69e9eb8bd599260d50c0bd3f
|
/server/libs/top/api/rest/TaobaokeItemsDetailGetRequest.py
|
13b1b69b497043b5f31870c254d245496fe97a64
|
[] |
no_license
|
alswl/music_sofa
|
42f7d15431f11b97bf67b604cfde0a0e9e3860cc
|
c4e5425ef6c80c3e57c91ba568f7cbfe63faa378
|
refs/heads/master
| 2016-09-12T18:37:34.357510
| 2016-05-20T11:49:52
| 2016-05-20T11:49:52
| 58,946,171
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 472
|
py
|
'''
Created by auto_sdk on 2013-11-07 12:53:22
'''
from top.api.base import RestApi
class TaobaokeItemsDetailGetRequest(RestApi):
def __init__(self,domain='gw.api.taobao.com',port=80):
RestApi.__init__(self,domain, port)
self.fields = None
self.is_mobile = None
self.nick = None
self.num_iids = None
self.outer_code = None
self.pid = None
self.refer_type = None
self.track_iids = None
def getapiname(self):
return 'taobao.taobaoke.items.detail.get'
|
[
"alswlx@gmail.com"
] |
alswlx@gmail.com
|
93d70c998b5ffa9aa7583bfab766cfc5b0a7927b
|
bc82de9237a6aa28fd7623a27b35c02ae8416702
|
/allennlp/modules/seq2seq_decoders/decoder_net.py
|
aa73863dd2f546fc9ee2b7c5a40ada2af3ff2e00
|
[
"Apache-2.0"
] |
permissive
|
Snnzhao/GrailQA
|
78190a8a5bae934c07f4035786f658ef4764c510
|
e89e66380402e51bac56f59c7d24d4400bcd11b6
|
refs/heads/main
| 2023-04-26T19:49:21.683922
| 2021-04-11T09:40:34
| 2021-04-11T09:40:34
| 370,937,323
| 1
| 0
|
Apache-2.0
| 2021-05-26T07:00:21
| 2021-05-26T07:00:20
| null |
UTF-8
|
Python
| false
| false
| 4,117
|
py
|
from typing import Tuple, Dict, Optional
import torch
from allennlp.common import Registrable
class DecoderNet(torch.nn.Module, Registrable):
# pylint: disable=abstract-method
"""
This class abstracts the neural architectures for decoding the encoded states and
embedded previous step prediction vectors into a new sequence of output vectors.
The implementations of ``DecoderNet`` is used by implementations of
``allennlp.modules.seq2seq_decoders.seq_decoder.SeqDecoder`` such as
``allennlp.modules.seq2seq_decoders.seq_decoder.auto_regressive_seq_decoder.AutoRegressiveSeqDecoder``.
The outputs of this module would be likely used by ``allennlp.modules.seq2seq_decoders.seq_decoder.SeqDecoder``
to apply the final output feedforward layer and softmax.
Parameters
----------
decoding_dim : ``int``, required
Defines dimensionality of output vectors.
target_embedding_dim : ``int``, required
Defines dimensionality of target embeddings. Since this model takes it's output on a previous step
as input of following step, this is also an input dimensionality.
decodes_parallel : ``bool``, required
Defines whether the decoder generates multiple next step predictions at in a single `forward`.
"""
def __init__(self,
decoding_dim: int,
target_embedding_dim: int,
decodes_parallel: bool) -> None:
super().__init__()
self.target_embedding_dim = target_embedding_dim
self.decoding_dim = decoding_dim
self.decodes_parallel = decodes_parallel
def get_output_dim(self) -> int:
"""
Returns the dimension of each vector in the sequence output by this ``DecoderNet``.
This is `not` the shape of the returned tensor, but the last element of that shape.
"""
return self.decoding_dim
def init_decoder_state(self, encoder_out: Dict[str, torch.LongTensor]) -> Dict[str, torch.Tensor]:
"""
Initialize the encoded state to be passed to the first decoding time step.
Parameters
----------
batch_size : ``int``
Size of batch
final_encoder_output : ``torch.Tensor``
Last state of the Encoder
Returns
-------
``Dict[str, torch.Tensor]``
Initial state
"""
raise NotImplementedError()
def forward(self,
previous_state: Dict[str, torch.Tensor],
encoder_outputs: torch.Tensor,
source_mask: torch.Tensor,
previous_steps_predictions: torch.Tensor,
previous_steps_mask: Optional[torch.Tensor] = None) -> Tuple[Dict[str, torch.Tensor],
torch.Tensor]:
# pylint: disable=arguments-differ
"""
Performs a decoding step, and returns dictionary with decoder hidden state or cache and the decoder output.
The decoder output is a 3d tensor (group_size, steps_count, decoder_output_dim)
if `self.decodes_parallel` is True, else it is a 2d tensor with (group_size, decoder_output_dim).
Parameters
----------
previous_steps_predictions : ``torch.Tensor``, required
Embeddings of predictions on previous step.
Shape: (group_size, steps_count, decoder_output_dim)
encoder_outputs : ``torch.Tensor``, required
Vectors of all encoder outputs.
Shape: (group_size, max_input_sequence_length, encoder_output_dim)
source_mask : ``torch.Tensor``, required
This tensor contains mask for each input sequence.
Shape: (group_size, max_input_sequence_length)
previous_state : ``Dict[str, torch.Tensor]``, required
previous state of decoder
Returns
-------
Tuple[Dict[str, torch.Tensor], torch.Tensor]
Tuple of new decoder state and decoder output. Output should be used to generate out sequence elements
"""
raise NotImplementedError()
|
[
"gu.826@cse-cnc196739s.coeit.osu.edu"
] |
gu.826@cse-cnc196739s.coeit.osu.edu
|
9ca6d92b19008f566d7bad5323b3263b78cd3d47
|
09e0efb05c9cd29c7aa4301cec7fb95d24d2bdc2
|
/nionui_app/nionui_examples/ui_demo/StatusBar.py
|
99f1fd1396a4fbf0ae59bae7916bfa90a1e1e115
|
[
"Apache-2.0"
] |
permissive
|
AEljarrat/nionui
|
4c452037f7e3ee6710c19b18afeb45e35e9ec361
|
3714a54d56f472a8a0f7b9f8a8240103ca790374
|
refs/heads/master
| 2022-11-30T19:54:10.714060
| 2020-08-06T21:37:47
| 2020-08-06T21:37:47
| 286,014,692
| 0
| 0
|
NOASSERTION
| 2020-08-08T09:40:31
| 2020-08-08T09:40:30
| null |
UTF-8
|
Python
| false
| false
| 686
|
py
|
from nion.utils import Model
class Handler:
slider_value_model = Model.PropertyModel(50)
def reset(self, widget):
self.slider_value_model.value = 50
def construct_ui(ui):
label = ui.create_label(text="@binding(slider_value_model.value)")
button = ui.create_push_button(text="Reset to 50", on_clicked="reset")
content = ui.create_column(label, button, spacing=8)
left = ui.create_label(text="LEFT")
right = ui.create_label(text="RIGHT")
group_row = ui.create_row(left, ui.create_stretch(), right, spacing=8)
status_bar = ui.create_group(group_row)
return ui.create_column(content, ui.create_stretch(), status_bar, spacing=8)
|
[
"cmeyer1969@gmail.com"
] |
cmeyer1969@gmail.com
|
fa40c9f5c291d466245994d71e3b05bde8d0668f
|
976230c7d6270f9fbec052e01f9799e13059f79c
|
/tests/spatio_temporal/test_daily.py
|
0c473f4a8bf1f201c13a2cf6200948d00b171507
|
[
"MIT"
] |
permissive
|
amet123/jyotisha
|
01cd60884f729814e6757d2a7186a6a4e3941a1f
|
83930880ca74d6ddc95cc72f7026ef63a2e1c3be
|
refs/heads/master
| 2020-03-16T09:44:13.310195
| 2018-02-10T02:43:19
| 2018-02-10T02:43:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,037
|
py
|
import logging
from jyotisha.panchangam.spatio_temporal import City
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s: %(asctime)s {%(filename)s:%(lineno)d}: %(message)s "
)
def test_solar_day():
from jyotisha.panchangam.spatio_temporal import daily
panchangam = daily.Panchangam(city=City('Chennai', '13:05:24', '80:16:12', 'Asia/Calcutta'), julian_day=2457023.27)
panchangam.compute_solar_day()
logging.debug(str(panchangam))
assert panchangam.solar_month_day == 17
assert panchangam.solar_month == 9
def test_tb_muhuurta():
from jyotisha.panchangam.spatio_temporal import daily
panchangam = daily.Panchangam(city=City('Chennai', '13:05:24', '80:16:12', 'Asia/Calcutta'), julian_day=2457023.27)
panchangam.compute_tb_muhuurtas()
logging.debug(str(panchangam))
assert len(panchangam.tb_muhuurtas) == 15
assert panchangam.tb_muhuurtas[0].jd_start == panchangam.jd_sunrise
import numpy.testing
numpy.testing.assert_approx_equal(panchangam.tb_muhuurtas[14].jd_end, panchangam.jd_sunrise)
|
[
"vishvas.vasuki@gmail.com"
] |
vishvas.vasuki@gmail.com
|
c1c2f08f213c1883d5c0d7e36a037127caeebd14
|
29ab920cf5c02ca6351e1c98824b8ebdb08933b0
|
/src/core/models.py
|
d6b1641b0977a3f01e92bd8e2d76097fcb2dff09
|
[
"MIT"
] |
permissive
|
yordan-marinov/su_dj_todo_app
|
237c8a14c17d349a75a437ab810c396269d3b571
|
34adcf3c17fcda30e57d76099780d0cf5f3f0fe6
|
refs/heads/main
| 2023-05-15T12:05:08.948196
| 2021-06-16T08:01:03
| 2021-06-16T08:01:03
| 375,369,149
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 531
|
py
|
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Task(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True, blank=True)
title = models.CharField(max_length=120)
description = models.TextField(null=True, blank=True)
complete = models.BooleanField(default=False)
created_on = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ['complete']
def __str__(self):
return self.title
|
[
"jordanmarinov8@gmail.com"
] |
jordanmarinov8@gmail.com
|
47eccdae8acbd25874bd94626617616ed0ee61fe
|
5fdcf15f818eb2d0c7b5dd39443064d5bc42aff9
|
/lc_reorganizing_string.py
|
8ab639672dbe010d9b0fc8e9842a0ca17c1438dc
|
[] |
no_license
|
vincentt117/coding_challenge
|
acf3664034a71ffd70c5f1ac0f6a66768e097a6e
|
5deff070bb9f6b19a1cfc0a6086ac155496fbb78
|
refs/heads/master
| 2021-07-02T05:43:08.007851
| 2020-08-27T02:16:19
| 2020-08-27T02:16:19
| 146,027,883
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 886
|
py
|
# Optimal solution retrieved from Leetcode - https://leetcode.com/problems/reorganize-string/
class Solution:
def reorganizeString(self, S: str) -> str:
pq = [(-S.count(x), x) for x in set(S)]
heapq.heapify(pq)
if any(-nc > (len(S) + 1) / 2 for nc, x in pq):
return ""
ans = []
while len(pq) >= 2:
nct1, ch1 = heapq.heappop(pq)
nct2, ch2 = heapq.heappop(pq)
#This code turns out to be superfluous, but explains what is happening
#if not ans or ch1 != ans[-1]:
# ans.extend([ch1, ch2])
#else:
# ans.extend([ch2, ch1])
ans.extend([ch1, ch2])
if nct1 + 1: heapq.heappush(pq, (nct1 + 1, ch1))
if nct2 + 1: heapq.heappush(pq, (nct2 + 1, ch2))
return "".join(ans) + (pq[0][1] if pq else '')
|
[
"vincentt117@gmail.com"
] |
vincentt117@gmail.com
|
97c63ac9e665a2d66342e42e241c92b0489a2ee3
|
18d51ac0a6ca14c8221c26f0dacd8d3721ca28e9
|
/73_hun.py
|
5a18a272bbd749e2a12f95c01d715dc788f4067d
|
[] |
no_license
|
mahakalai/mahak
|
05f96d52880ed7b2e5eb70dd1dbf14fc533236e8
|
613be9df7743ef59b1f0e07b7df987d29bb23ec7
|
refs/heads/master
| 2020-04-15T05:01:58.541930
| 2019-07-15T16:28:32
| 2019-07-15T16:28:32
| 164,406,486
| 0
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 176
|
py
|
s=input()
s1=input()
l=[]
if s==s1:
print(s)
else:
for i in range(0,len(s)):
for j in range(i+1,len(s)):
k=s1[i:j]
if k in s:
l.append(k)
print(max(l, key=len))
|
[
"noreply@github.com"
] |
mahakalai.noreply@github.com
|
21e0a4a5c520b5364f7115b1ed1387993123f14b
|
bea2c1ad58a1d45705ba8f9e4527e4ffac4969cb
|
/sfepy/discrete/parse_regions.py
|
c0b71f0ee8b1d0bc7fd05dcd16232c367e61ac0b
|
[
"BSD-3-Clause"
] |
permissive
|
fethio/sfepy
|
8423e9083e3144e1fd97d24c31eda1646167ff5b
|
78efe51d6038dc92f5bfeac4c57614746fe28117
|
refs/heads/master
| 2021-01-16T20:59:29.459626
| 2016-07-01T15:06:11
| 2016-07-01T15:06:11
| 61,715,917
| 1
| 1
| null | 2016-07-01T15:06:11
| 2016-06-22T12:06:07
|
Python
|
UTF-8
|
Python
| false
| false
| 6,738
|
py
|
"""
Grammar for selecting regions of a domain.
Regions serve for selection of certain parts of the computational domain
represented as a finite element mesh. They are used to define the boundary
conditions, the domains of terms and materials etc.
Notes
-----
History: pre-git versions already from from 13.06.2006.
"""
from pyparsing import Literal, CaselessLiteral, Word, delimitedList,\
Group, Optional, ZeroOrMore, nums, alphas, alphanums,\
Combine, StringStart, StringEnd, Forward, oneOf, ParseException
ParseException # Needed for importing elsewhere.
op_codes = ['OA_SubV', 'OA_SubE', 'OA_SubF', 'OA_SubC', 'OA_SubS',
'OA_AddV', 'OA_AddE', 'OA_AddF', 'OA_AddC', 'OA_AddS',
'OA_IntersectV', 'OA_IntersectE', 'OA_IntersectF',
'OA_IntersectC', 'OA_IntersectS']
eval_codes = ['E_VIR', 'E_VOS', 'E_VBF', 'E_VOG', 'E_OVIR', 'E_VI', 'E_VOSET',
'E_CBF', 'E_COG', 'E_CI', 'E_COSET']
kw_codes = ['KW_All', 'KW_Region']
def to_stack(stack):
def push_first(str, loc, toks):
if toks:
stack.append(toks[0])
return toks
return push_first
def replace(what, keep=False):
def _replace(str, loc, toks):
ret = {'token' : what, 'orig' : []}
if keep:
ret['orig'] = list(toks[0])
return ret
return _replace
def replace_with_region(what, r_index):
def _replace(str, loc, toks):
ret = {'token' : what, 'orig' : []}
orig = toks[0]
r_orig = orig[r_index]
if isinstance(r_orig, dict) and (r_orig['token'] == 'KW_Region'):
orig = list(orig[:r_index]) + r_orig['orig']
ret['orig'] = orig
return ret
return _replace
def join_tokens(str, loc, toks):
return [" ".join(toks[0])]
def visit_stack(stack, op_visitor, leaf_visitor):
def visit(stack, level):
op = stack.pop()
token = op['token']
if token in op_codes:
res2 = visit(stack, level + 1)
res1 = visit(stack, level + 1)
return op_visitor(level, op, res1, res2)
elif token in eval_codes:
return leaf_visitor(level, op)
elif token in kw_codes:
return leaf_visitor(level, op)
else:
raise ValueError, token
return visit(stack, 0)
def print_op(level, op, item1, item2):
print level * ' ' + (': %s' % op)
def print_leaf(level, op):
print level * ' ' + ('< %s' % op)
def print_stack(stack):
visit_stack(stack, print_op, print_leaf)
def create_bnf(stack):
point = Literal(".")
e = CaselessLiteral("E")
inumber = Word(nums)
fnumber = Combine(Word("+-"+nums, nums) +
Optional(point + Optional(Word(nums))) +
Optional(e + Word("+-"+nums, nums)))
_of = Literal('of')
_in = Literal('in')
_by = Literal('by')
_copy = Literal('copy')
_mv = Literal('-v').setParseAction(replace('OA_SubV'))
_me = Literal('-e').setParseAction(replace('OA_SubE'))
_mf = Literal('-f').setParseAction(replace('OA_SubF'))
_mc = Literal('-c').setParseAction(replace('OA_SubC'))
_ms = Literal('-s').setParseAction(replace('OA_SubS'))
_pv = Literal('+v').setParseAction(replace('OA_AddV'))
_pe = Literal('+e').setParseAction(replace('OA_AddE'))
_pf = Literal('+f').setParseAction(replace('OA_AddF'))
_pc = Literal('+c').setParseAction(replace('OA_AddC'))
_ps = Literal('+s').setParseAction(replace('OA_AddS'))
_inv = Literal('*v').setParseAction(replace('OA_IntersectV'))
_ine = Literal('*e').setParseAction(replace('OA_IntersectE'))
_inf = Literal('*f').setParseAction(replace('OA_IntersectF'))
_inc = Literal('*c').setParseAction(replace('OA_IntersectC'))
_ins = Literal('*s').setParseAction(replace('OA_IntersectS'))
regop = (_mv | _me | _mf | _mc | _ms |
_pv | _pe | _pf | _pc | _ps |
_inv | _ine | _inf | _inc | _ins)
lpar = Literal("(").suppress()
rpar = Literal(")").suppress()
_all = Literal('all').setParseAction(replace('KW_All'))
vertex = Literal('vertex')
vertices = Literal('vertices')
cell = Literal('cell')
cells = Literal('cells')
group = Literal('group')
_set = Literal('set')
surface = Literal('surface')
ident = Word(alphas + '_.', alphanums + '_.')
set_name = Word(nums) | ident
function = Word(alphas + '_', alphanums + '_')
function = Group(function).setParseAction(join_tokens)
region = Combine(Literal('r.') + Word(alphas + '_',
'_' + alphas + nums + '.'))
region = Group(Optional(_copy, default='nocopy') + region)
region.setParseAction(replace('KW_Region', keep=True))
coor = oneOf('x y z')
boolop = oneOf('& |')
relop = oneOf('< > <= >= != ==')
bool_term = (ZeroOrMore('(') + (coor | fnumber) + relop + (coor | fnumber)
+ ZeroOrMore(')'))
relation = Forward()
relation << (ZeroOrMore('(')
+ bool_term + ZeroOrMore(boolop + relation)
+ ZeroOrMore(')'))
relation = Group(relation).setParseAction(join_tokens)
nos = Group(vertices + _of + surface).setParseAction(replace('E_VOS'))
nir = Group(vertices + _in + relation).setParseAction(
replace('E_VIR', keep=True))
nbf = Group(vertices + _by + function).setParseAction(
replace('E_VBF', keep=True))
ebf = Group(cells + _by + function).setParseAction(
replace('E_CBF', keep=True))
eog = Group(cells + _of + group + Word(nums)).setParseAction(
replace('E_COG', keep=True))
nog = Group(vertices + _of + group + Word(nums)).setParseAction(
replace('E_VOG', keep=True))
onir = Group(vertex + _in + region).setParseAction(
replace_with_region('E_OVIR', 2))
ni = Group(vertex + delimitedList(inumber)).setParseAction(
replace('E_VI', keep=True))
ei = Group(cell + delimitedList(inumber)).setParseAction(
replace('E_CI', keep=True))
noset = Group(vertices + _of + _set + set_name).setParseAction(
replace('E_VOSET', keep=True))
eoset = Group(cells + _of + _set + set_name).setParseAction(
replace('E_COSET', keep=True))
region_expression = Forward()
atom1 = (_all | region | ni | onir | nos | nir | nbf
| ei | ebf | eog | nog | noset | eoset)
atom1.setParseAction(to_stack(stack))
atom2 = (lpar + region_expression.suppress() + rpar)
atom = (atom1 | atom2)
aux = (regop + region_expression)
aux.setParseAction(to_stack(stack))
region_expression << atom + ZeroOrMore(aux)
region_expression = StringStart() + region_expression + StringEnd()
return region_expression
|
[
"cimrman3@ntc.zcu.cz"
] |
cimrman3@ntc.zcu.cz
|
e678647f567f69e613f9801f9ae2ac7ac80e1659
|
de24f83a5e3768a2638ebcf13cbe717e75740168
|
/moodledata/vpl_data/63/usersdata/185/30193/submittedfiles/swamee.py
|
cf9c5669d5d51a92ee3a60133f07bc131b3da0cc
|
[] |
no_license
|
rafaelperazzo/programacao-web
|
95643423a35c44613b0f64bed05bd34780fe2436
|
170dd5440afb9ee68a973f3de13a99aa4c735d79
|
refs/heads/master
| 2021-01-12T14:06:25.773146
| 2017-12-22T16:05:45
| 2017-12-22T16:05:45
| 69,566,344
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 352
|
py
|
# -*- coding: utf-8 -*-
import math
#COMECE SEU CÓDIGO AQUI
f=float(input('digite f:'))
L=float(input('digite L:'))
Q=float(input('digite Q:'))
DeltaH=float(input('digite DeltaH:'))
V=float(input('digite V:'))
g=9.81
E=0.000002
D=((8*f*L*(Q*Q))/((math.pi**2)*g*DeltaH))**1/5
Rey=(4*Q)/(math.pi*D*V)
K=0.25/(math.log10((E/(3.7*D))+(5.74/(Rey**0.9)))**2
|
[
"rafael.mota@ufca.edu.br"
] |
rafael.mota@ufca.edu.br
|
1d1b706b79d817045a4458163d9e0a30057bb120
|
9743d5fd24822f79c156ad112229e25adb9ed6f6
|
/xai/brain/wordbase/adjectives/_nets.py
|
9595b4e462c02417b87f949ef703a5e57f707413
|
[
"MIT"
] |
permissive
|
cash2one/xai
|
de7adad1758f50dd6786bf0111e71a903f039b64
|
e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6
|
refs/heads/master
| 2021-01-19T12:33:54.964379
| 2017-01-28T02:00:50
| 2017-01-28T02:00:50
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 227
|
py
|
from xai.brain.wordbase.adjectives._net import _NET
#calss header
class _NETS(_NET, ):
def __init__(self,):
_NET.__init__(self)
self.name = "NETS"
self.specie = 'adjectives'
self.basic = "net"
self.jsondata = {}
|
[
"xingwang1991@gmail.com"
] |
xingwang1991@gmail.com
|
7967013853faa251a7c164a1774ad60bcab38057
|
84a240d463a7c286a4ff3b0c344a0e80e681ce38
|
/src/chap03/10_time_dependencies/test.py
|
1ae0ad5dbbf3e72fb61a6fcfb782ede2deaa843d
|
[] |
no_license
|
perillaroc/ecflow-tutorial-code
|
87f12b11f942b2323e45f8f8c205f665ceaeca7f
|
e00d0556a161a4c221b854f99dbbd0898ee14762
|
refs/heads/master
| 2021-09-06T04:30:48.812500
| 2018-02-02T06:48:41
| 2018-02-02T06:48:41
| 119,969,899
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,034
|
py
|
import os
from pathlib import Path
from ecflow import Defs, Suite, Task, Family, Edit, Trigger, Event, Complete, Meter, Time, Day, Date
def create_family_f1():
return Family(
"f1",
Edit(SLEEP=20),
Task("t1",
Time("03:00 23:00 00:30")),
Task("t2",
Day("sunday")),
Task("t3",
Date("1.*.*"),
Time("12:00")
),
Task("t4",
Time("+00:02")),
Task("t5",
Time("00:02"))
)
print("Creating suite definition")
home = os.path.abspath(Path(Path(__file__).parent, "../../../build/course"))
defs = Defs(
Suite('test',
Edit(ECF_INCLUDE=home, ECF_HOME=home),
create_family_f1()))
print(defs)
print("Checking job creation: .ecf -> .job0")
print(defs.check_job_creation())
print("Saving definition to file 'test.def'")
defs.save_as_defs(str(Path(home, "test.def")))
# To restore the definition from file 'test.def' we can use:
# restored_defs = ecflow.Defs("test.def")
|
[
"perillaroc@gmail.com"
] |
perillaroc@gmail.com
|
b8993aa7ccaea8513fd07124d6005cdbd77c5068
|
dd6cf539f20a0143acbdda1ed3f64b18b08a29b5
|
/whiteList_v5/Config/2019第一季度脚本/config_fangtoo.py
|
8af88ee3c64feb458b58b8f158acd47a212eef70
|
[
"MIT"
] |
permissive
|
myirelias/white_list
|
eec6a900cc3eea0f227425327ab55a6b46ba873d
|
2bf344d2dc8f5cb3afbde71e248c019651ee3a7a
|
refs/heads/master
| 2020-04-15T11:10:44.850942
| 2019-11-05T01:51:41
| 2019-11-05T01:51:41
| 164,618,155
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,178
|
py
|
# 房途网
TASK_NAME = 'fangtoo'
# 起始URL
START_URL = 'http://www.fangtoo.com/'
# 控制域,必须为list格式
DOMAIN = ['fangtoo']
# 请求头
HEADERS = {
'Host': 'www.fangtoo.com',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:53.0) Gecko/20100101 Firefox/53.0',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'Accept-Encoding': 'gzip, deflate',
'Connection': 'keep-alive',
'Upgrade-Insecure-Requests': '1',
'If-Modified-Since': 'Tue, 26 Feb 2019 06:20:30 GMT',
'Cache-Control': 'max-age=0',
}
# xpath规则
XPATHER_HREF = ".//*/@href"
# 字段模版
# {
# "title": "",
# "news_date": "",
# "source": "",
# "author": "",
# "navigation": "",
# "content": "",
# "editor": "",
# "tags": ""
# },
XPATHER_NEWS_LIST = [
{
"title": "normalize-space(.//*[@class='main-text-cnt']/h1)",
"news_date": "substring(normalize-space(.//*[contains(@class,'time-source')]),1,20)",
"source": "substring-before(substring-after(normalize-space(.//*[contains(@class,'time-source')]),'来源:'),'编辑')",
"author": "",
"navigation": "normalize-space(.//*[@class='urhere'])",
"content": ".//*[@class='main-text']/descendant::text()",
"editor": "substring-after(normalize-space(.//*[contains(@class,'time-source')]),'编辑:')",
"tags": ".//*[@name='keywords']/@content"
},
{
"title": "normalize-space(.//*[@class='title'])",
"news_date": "normalize-space(.//*[@class='info']/span[1])",
"source": "normalize-space(.//*[@class='info']/span[3])",
"author": "",
"navigation": "normalize-space(.//*[@class='channel-name'])",
"content": ".//article/descendant::p/text()",
"editor": "substring-after(normalize-space(.//*[@class='info']/span[2]),'编辑:')",
"tags": ".//*[@name='keywords']/@content"
},
]
# 正则匹配规则,此处为静态页面url的正则表达式,匹配以下的规则的网址将抓取其中新闻内容
REGEX_URL = r'/\d*-\d*-\w*\d*\.[s]*htm[l]*'
|
[
"myIrelia@aliyun.com"
] |
myIrelia@aliyun.com
|
451b171eedc679c5013c736a0918b085ca36b46c
|
fbea3e5d1ecab07b15887b321cd650349f22df95
|
/peterklepec_webpage/cms/dashboard.py
|
4df6b11b0df775c579e8ece440fb36e4e21e822a
|
[] |
no_license
|
simonrakovic/peterklepec
|
2e2004c04450898107da61314ec2ba03ee93bbe7
|
58114cfbd4f85d08a4749aa34492f52e11a9925e
|
refs/heads/master
| 2021-01-21T07:53:35.429724
| 2016-09-21T12:34:52
| 2016-09-21T12:34:52
| 24,656,406
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,632
|
py
|
"""
This file was generated with the customdashboard management command and
contains the class for the main dashboard.
To activate your index dashboard add the following to your settings.py::
GRAPPELLI_INDEX_DASHBOARD = 'peterklepec_webpage.dashboard.CustomIndexDashboard'
"""
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
from grappelli.dashboard import modules, Dashboard
from grappelli.dashboard.utils import get_admin_site_name
class CustomIndexDashboard(Dashboard):
"""
Custom index dashboard for www.
"""
def init_with_context(self, context):
site_name = get_admin_site_name(context)
# append a group for "Administration" & "Applications"
self.children.append(modules.Group(
_('Group: Administration & Applications'),
column=1,
collapsible=True,
children = [
modules.AppList(
_('Administration'),
column=1,
collapsible=False,
models=('django.contrib.*',),
),
modules.AppList(
_('Applications'),
column=1,
css_classes=('collapse closed',),
exclude=('django.contrib.*',),
)
]
))
# append an app list module for "Applications"
self.children.append(modules.AppList(
_('AppList: Applications'),
collapsible=True,
column=1,
css_classes=('collapse closed',),
exclude=('django.contrib.*',),
))
# append an app list module for "Administration"
self.children.append(modules.ModelList(
_('ModelList: Administration'),
column=1,
collapsible=False,
models=('django.contrib.*',),
))
# append another link list module for "support".
self.children.append(modules.LinkList(
_('Media Management'),
column=2,
children=[
{
'title': _('FileBrowser'),
'url': '/admin/filebrowser/browse/',
'external': False,
},
]
))
# append another link list module for "support".
self.children.append(modules.LinkList(
_('Support'),
column=2,
children=[
{
'title': _('Django Documentation'),
'url': 'http://docs.djangoproject.com/',
'external': True,
},
{
'title': _('Grappelli Documentation'),
'url': 'http://packages.python.org/django-grappelli/',
'external': True,
},
{
'title': _('Grappelli Google-Code'),
'url': 'http://code.google.com/p/django-grappelli/',
'external': True,
},
]
))
# append a feed module
self.children.append(modules.Feed(
_('Latest Django News'),
column=2,
feed_url='http://www.djangoproject.com/rss/weblog/',
limit=5
))
# append a recent actions module
self.children.append(modules.RecentActions(
_('Recent Actions'),
limit=5,
collapsible=False,
column=3,
))
|
[
"simonrakovic@gmail.com"
] |
simonrakovic@gmail.com
|
9500c48e7a7fbda723ca05e82f6932e50a80cc2a
|
53ba1c29d6122b8afeb6578f1a338621c61f507d
|
/FCCeeAnalyses/ZH_Zmumu/dataframe/plots.py
|
1478a35a3cf293e86dc3db7e5dd11aed9830304d
|
[] |
no_license
|
selvaggi/FCChhAnalyses
|
dd420f5bdba60147322cc16b49479ca200e8a54c
|
8f397c77229c68ad87947e6912466da4b0a6654b
|
refs/heads/master
| 2021-04-27T17:08:28.824726
| 2020-05-21T07:42:43
| 2020-05-21T07:42:43
| 136,484,120
| 1
| 0
| null | 2019-05-02T13:12:39
| 2018-06-07T13:49:23
|
Python
|
UTF-8
|
Python
| false
| false
| 1,547
|
py
|
import ROOT
# global parameters
intLumi = 5.0e+06 #in pb-1
ana_tex = "e^{+}e^{-} #rightarrow ZH #rightarrow #mu^{+}#mu^{-} + X"
delphesVersion = "3.4.2"
energy = 240.0
collider = "FCC-ee"
inputDir = "Outputs/FCCee/ZH_Zmumu/"
formats = ['png','pdf']
yaxis = ['lin','log']
stacksig = ['stack','nostack']
outdir = 'Outputs/FCCee/ZH_Zmumu/plots/'
variables = ['mz','mz_zoom','nbjets','leptonic_recoil_m','leptonic_recoil_m_zoom']
###Dictonnary with the analysis name as a key, and the list of selections to be plotted for this analysis. The name of the selections should be the same than in the final selection
selections = {}
selections['ZH'] = ["sel0","sel1","sel2"]
selections['ZH_2'] = ["sel0","sel2"]
extralabel = {}
extralabel['sel0'] = "Selection: N_{Z} = 1"
extralabel['sel1'] = "Selection: N_{Z} = 1; 80 GeV < m_{Z} < 100 GeV"
extralabel['sel2'] = "Selection: N_{Z} = 1; 80 GeV < m_{Z} < 100 GeV; N_{b} = 2"
colors = {}
colors['ZH'] = ROOT.kRed
colors['WW'] = ROOT.kBlue+1
colors['ZZ'] = ROOT.kGreen+2
colors['VV'] = ROOT.kGreen+3
plots = {}
plots['ZH'] = {'signal':{'ZH':['p8_ee_ZH_ecm240']},
'backgrounds':{'WW':['p8_ee_WW_ecm240'],
'ZZ':['p8_ee_ZZ_ecm240']}
}
plots['ZH_2'] = {'signal':{'ZH':['p8_ee_ZH_ecm240']},
'backgrounds':{'VV':['p8_ee_WW_ecm240','p8_ee_ZZ_ecm240']}
}
legend = {}
legend['ZH'] = 'ZH'
legend['WW'] = 'WW'
legend['ZZ'] = 'ZZ'
legend['VV'] = 'VV boson'
|
[
"clement.helsens@cern.ch"
] |
clement.helsens@cern.ch
|
51cf6cb246c8fc7206420f5ca0b4ca38daf503b7
|
9a2413b572c0f89b1f80899a10237657d9393bd6
|
/sdk/python/pulumi_keycloak/generic_client_role_mapper.py
|
7482298398ca25d54904f73e31411f577ced592b
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] |
permissive
|
jaxxstorm/pulumi-keycloak
|
5c25363ece6af49dad40bd693ce07b1fa0dedd74
|
2fc7b1060b725a40d2ada745aa0d10130243a0b5
|
refs/heads/master
| 2022-10-10T13:11:04.290703
| 2020-06-05T19:11:19
| 2020-06-05T19:11:19
| 270,870,883
| 0
| 0
|
NOASSERTION
| 2020-06-09T01:08:56
| 2020-06-09T01:08:55
| null |
UTF-8
|
Python
| false
| false
| 4,797
|
py
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from typing import Union
from . import utilities, tables
class GenericClientRoleMapper(pulumi.CustomResource):
client_id: pulumi.Output[str]
"""
The destination client of the client role. Cannot be used at the same time as client_scope_id.
"""
client_scope_id: pulumi.Output[str]
"""
The destination client scope of the client role. Cannot be used at the same time as client_id.
"""
realm_id: pulumi.Output[str]
"""
The realm id where the associated client or client scope exists.
"""
role_id: pulumi.Output[str]
"""
Id of the role to assign
"""
def __init__(__self__, resource_name, opts=None, client_id=None, client_scope_id=None, realm_id=None, role_id=None, __props__=None, __name__=None, __opts__=None):
"""
Create a GenericClientRoleMapper resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] client_id: The destination client of the client role. Cannot be used at the same time as client_scope_id.
:param pulumi.Input[str] client_scope_id: The destination client scope of the client role. Cannot be used at the same time as client_id.
:param pulumi.Input[str] realm_id: The realm id where the associated client or client scope exists.
:param pulumi.Input[str] role_id: Id of the role to assign
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['client_id'] = client_id
__props__['client_scope_id'] = client_scope_id
if realm_id is None:
raise TypeError("Missing required property 'realm_id'")
__props__['realm_id'] = realm_id
if role_id is None:
raise TypeError("Missing required property 'role_id'")
__props__['role_id'] = role_id
super(GenericClientRoleMapper, __self__).__init__(
'keycloak:index/genericClientRoleMapper:GenericClientRoleMapper',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name, id, opts=None, client_id=None, client_scope_id=None, realm_id=None, role_id=None):
"""
Get an existing GenericClientRoleMapper resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param str id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] client_id: The destination client of the client role. Cannot be used at the same time as client_scope_id.
:param pulumi.Input[str] client_scope_id: The destination client scope of the client role. Cannot be used at the same time as client_id.
:param pulumi.Input[str] realm_id: The realm id where the associated client or client scope exists.
:param pulumi.Input[str] role_id: Id of the role to assign
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["client_id"] = client_id
__props__["client_scope_id"] = client_scope_id
__props__["realm_id"] = realm_id
__props__["role_id"] = role_id
return GenericClientRoleMapper(resource_name, opts=opts, __props__=__props__)
def translate_output_property(self, prop):
return tables._CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return tables._SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
[
"public@paulstack.co.uk"
] |
public@paulstack.co.uk
|
4c10cbd3c907d2b700a4587d67043ec7b66c5280
|
f098c361ee79bb8b7a8402fcf20b37f17fb36983
|
/Back-End/Python/Basics/Part -3- Hash Maps/04- Serialization and Deserialization/_04_JSON_singledispatch.py
|
5bac467735833efd499c104e8e24e933427f5fe3
|
[
"MIT",
"Apache-2.0",
"LicenseRef-scancode-public-domain"
] |
permissive
|
rnsdoodi/Programming-CookBook
|
4d619537a6875ffbcb42cbdaf01d80db1feba9b4
|
9bd9c105fdd823aea1c3f391f5018fd1f8f37182
|
refs/heads/master
| 2023-09-05T22:09:08.282385
| 2021-10-31T11:57:40
| 2021-10-31T11:57:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,389
|
py
|
import json
from datetime import datetime
log_record = {
'time': datetime.utcnow(),
'message': 'Testing...',
'other': {'a', 'b', 'c'}
}
def custom_json_formatter(arg):
if isinstance(arg, datetime):
return arg.isoformat()
elif isinstance(arg, set):
return list(arg)
print(json.dumps(log_record, default=custom_json_formatter))
#{"time": "2020-11-06T03:10:44.482803", "message": "Testing...", "other": ["a", "b", "c"]}
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
self.create_dt = datetime.utcnow()
def __repr__(self):
return f'Person(name={self.name}, age={self.age})'
def toJSON(self):
return {
'name': self.name,
'age': self.age,
'create_dt': self.create_dt.isoformat()
}
p = Person('John', 82)
print(p)
print(p.toJSON())
# {"time": "2020-11-06T03:11:08.677000", "message": "Testing...", "other": ["b", "a", "c"]}
# Person(name=John, age=82)
# {'name': 'John', 'age': 82, 'create_dt': '2020-11-06T03:11:08.677000'}
def custom_json_formatter(arg):
if isinstance(arg, datetime):
return arg.isoformat()
elif isinstance(arg, set):
return list(arg)
elif isinstance(arg, Person):
return arg.toJSON()
log_record = dict(time=datetime.utcnow(),
message='Created new person record',
person=p)
print(json.dumps(log_record, default=custom_json_formatter, indent=2))
# {
# "time": "2020-11-06T03:12:12.624757",
# "message": "Created new person record",
# "person": {
# "name": "John",
# "age": 82,
# "create_dt": "2020-11-06T03:12:12.624757"
# }
# }
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
self.create_dt = datetime.utcnow()
def __repr__(self):
return f'Person(name={self.name}, age={self.age})'
def toJSON(self):
return {
'name': self.name,
'age': self.age,
'create_dt': self.create_dt
}
p = Person('Monty', 100)
log_record = dict(time=datetime.utcnow(),
message='Created new person record',
person=p)
print(json.dumps(log_record, default=custom_json_formatter, indent=2))
# {
# "time": "2020-11-06T03:13:01.412670",
# "message": "Created new person record",
# "person": {
# "name": "Monty",
# "age": 100,
# "create_dt": "2020-11-06T03:13:01.412670"
# }
# }
class Person:
def __init__(self, name, age):
self.name = name
self.age = age
self.create_dt = datetime.utcnow()
def __repr__(self):
return f'Person(name={self.name}, age={self.age})'
def toJSON(self):
return vars(self)
p = Person('Python', 27)
print(p.toJSON())
# {'name': 'Python', 'age': 27, 'create_dt': datetime.datetime(2020, 11, 6, 3, 13, 34, 452006)}
log_record['person'] = p
print(log_record)
#{'time': datetime.datetime(2020, 11, 6, 3, 14, 6, 399677), 'message': 'Created new person record', 'person': Person(name=Python, age=27)}
print(json.dumps(log_record, default=custom_json_formatter, indent=2))
# {
# "time": "2020-11-06T03:14:19.885341",
# "message": "Created new person record",
# "person": {
# "name": "Python",
# "age": 27,
# "create_dt": "2020-11-06T03:14:19.885341"
# }
# }
def custom_json_formatter(arg):
if isinstance(arg, datetime):
return arg.isoformat()
elif isinstance(arg, set):
return list(arg)
else:
try:
return arg.toJSON()
except AttributeError:
try:
return vars(arg)
except TypeError:
return str(arg)
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def __repr__(self):
return f'Point(x={self.x}, y={self.y})'
pt1 = Point(10, 10)
log_record = dict(time=datetime.utcnow(),
message='Created new point',
point=pt1,
created_by=p)
print(json.dumps(log_record, default=custom_json_formatter, indent=2))
# {
# "time": "2020-11-06T03:18:39.272100",
# "message": "Created new point",
# "point": {
# "x": 10,
# "y": 10
# },
# "created_by": {
# "name": "Python",
# "age": 27,
# "create_dt": "2020-11-06T03:18:39.272100"
# }
# }
|
[
"58447627+Koubae@users.noreply.github.com"
] |
58447627+Koubae@users.noreply.github.com
|
cee671906f006b2298a9e9071b8bf2c43320fd39
|
5c7b6f96aef9a2c605c8e16eb0e3f6e2ab958947
|
/settings.py
|
454c3f98b426329036c61e8949009c6ba0b54d30
|
[] |
no_license
|
chapkovski/progressbaracrossrounds
|
0137ce4a552edf9027eb092f1d9e0abde5cc6e8e
|
62905f17d456fc6a7a57fa1fe91b510593740518
|
refs/heads/master
| 2021-07-03T20:53:58.758511
| 2020-09-03T22:19:49
| 2020-09-03T22:19:49
| 87,289,409
| 0
| 0
| null | 2021-06-10T20:38:02
| 2017-04-05T09:07:49
|
Python
|
UTF-8
|
Python
| false
| false
| 1,797
|
py
|
import os
from os import environ
import dj_database_url
from boto.mturk import qualification
import otree.settings
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# the environment variable OTREE_PRODUCTION controls whether Django runs in
# DEBUG mode. If OTREE_PRODUCTION==1, then DEBUG=False
if environ.get('OTREE_PRODUCTION') not in {None, '', '0'}:
DEBUG = False
else:
DEBUG = True
ADMIN_USERNAME = 'admin'
# for security, best to set admin password in an environment variable
ADMIN_PASSWORD = environ.get('OTREE_ADMIN_PASSWORD')
AUTH_LEVEL = environ.get('OTREE_AUTH_LEVEL')
# e.g. EUR, CAD, GBP, CHF, CNY, JPY
REAL_WORLD_CURRENCY_CODE = 'USD'
USE_POINTS = True
POINTS_DECIMAL_PLACES = 2
# e.g. en, de, fr, it, ja, zh-hans
# see: https://docs.djangoproject.com/en/1.9/topics/i18n/#term-language-code
LANGUAGE_CODE = 'en'
# if an app is included in SESSION_CONFIGS, you don't need to list it here
INSTALLED_APPS = ['otree', ]
DEMO_PAGE_INTRO_TEXT = """
oTree games
"""
# from here on are qualifications requirements for workers
# see description for requirements on Amazon Mechanical Turk website:
# http://docs.aws.amazon.com/AWSMechTurk/latest/AWSMturkAPI/ApiReference_QualificationRequirementDataStructureArticle.html
# and also in docs for boto:
# https://boto.readthedocs.org/en/latest/ref/mturk.html?highlight=mturk#module-boto.mturk.qualification
SESSION_CONFIG_DEFAULTS = {
'real_world_currency_per_point': 0.000,
'participation_fee': 0.00,
'doc': "",
}
SESSION_CONFIGS = [
{
'name': 'bigfive',
'display_name': 'Progress bar across rounds',
'num_demo_participants': 1,
'app_sequence': ['bigfive'],
},
]
SECRET_KEY = 'whatever'
|
[
"chapkovski@gmail.com"
] |
chapkovski@gmail.com
|
6372a17f6b0d06d1112a8594098c1279fd098c30
|
6eb58e32b469c37428185ab4456184905a5b4fb5
|
/analysis_code/newdata_dataread_BP_MHTSTRATEGY_v2_hacktemp.py
|
c830e7ec48f7165d32ca2acd9803182abd11b3b1
|
[] |
no_license
|
rchenmit/mht_analysis
|
0b8bfff7730df835975c7c41d65f007ad269e3a9
|
678d4419bdaed9ed9d0041df3a2cd8638074590f
|
refs/heads/master
| 2020-04-06T03:40:41.577209
| 2015-01-12T00:14:48
| 2015-01-12T00:14:48
| 19,548,658
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,094
|
py
|
## Robert Chen
## Monday 3/12/2014
##
## trying to parse this in python
##
import os
import sys
if os.name == 'nt': #'nt' = windows
sys.path.append('C:\\anaconda\\lib\\site-packages') #in windows, alot of modules were installed with Anaconda
import pandas as pd
import numpy as np
import math
import copy
import csv
import scipy as s
import openpyxl
from openpyxl import load_workbook
import matplotlib.pyplot as plt
import matplotlib
from datetime import datetime
from dateutil import parser
from collections import Counter
#import pickle
if sys.version_info.major == 3:
import pickle
else:
import cPickle as pickle
## ENTER import files ##########################################################################################################
datadir = '../../data/new_data_20140416/Data_20140409/'
filename = datadir + 'Meds_DD_04082014.csv'
file_classes = datadir + 'MedClasses.xlsx'
file_BP_clinician = datadir + 'mht_strategy_20140407.txt'
file_BP_record = datadir + 'BP_04082014.csv'
file_eGFR_record = datadir + 'EGFR_04082014.csv'
pickle_dir = '../analysis_output/pickle/'
if not os.path.exists(pickle_dir):
os.makedirs(pickle_dir)
#pickle
with open(pickle_dir + "df_bp_clinician.pickle", "wb") as output_file:
pickle.dump(df_bp_clinician, output_file)
output_file.close()
with open(pickle_dir + "df_bp_record.pickle", "wb") as output_file:
pickle.dump(df_bp_record, output_file)
output_file.close()
with open(pickle_dir + "df_egfr_record.pickle", "wb") as output_file:
pickle.dump(df_egfr_record, output_file)
output_file.close()
## analyze recorded BP's: using BP.txt (reported numbers)#################################################################################
list_ruid = list(set(df_bp_clinician.index.values)) #list of floats
#earliest and latest possible date : for throwing out bad data
early_date = datetime(1990,1,1)
late_date = datetime.today()
#make dictionary of BP's key'd by RUID
d_bp_record = dict()
cnt = 0
print("bulding dictionary of recorded BP's (346K lines total)-----------------\n")
for i in range(len(df_bp_record)):
cnt+=1
if (cnt % 10000 == 0):
print(cnt)
key = df_bp_record.index[i]
indexes_for_df = np.array([])
data = []
this_date = parser.parse(df_bp_record.iloc[i]['MEASURE_DATE']) ##PARSE THE DATE OUT!
bool_this_date_good = this_date > early_date and this_date < late_date
indexes_for_df = np.append(indexes_for_df, this_date)
if df_bp_record.iloc[i]['SYSTOLIC'].isdigit() and df_bp_record.iloc[i]['DIASTOLIC'].isdigit() and bool_this_date_good:
data.append([int(df_bp_record.iloc[i]['SYSTOLIC']), int(df_bp_record.iloc[i]['DIASTOLIC'])]) #CAST ELEMENTS AS INTEGERS!!!!
if key in d_bp_record: #then append
d_bp_record[key] = d_bp_record[key].append(pd.DataFrame(data, index = indexes_for_df, columns = ['SYSTOLIC', 'DIASTOLIC']))
else: #then initialize
d_bp_record[key] = pd.DataFrame(data, index = indexes_for_df, columns = ['SYSTOLIC', 'DIASTOLIC'])
#add in status at each time point
print("calculating BP control status from recorded numbers: \n")
for key in d_bp_record: #loop thru the keys in dictionary
d_bp_record[key]['STATUS'] = 0
bool_condition_systolic = d_bp_record[key]['SYSTOLIC'] < 140 #in control if SYSTOLIC < 140
bool_condition_diastolic = d_bp_record[key]['DIASTOLIC'] < 90 #in control if DIASTOLIC < 90
bool_condition_INCONTROL = bool_condition_systolic & bool_condition_diastolic
d_bp_record[key].loc[bool_condition_INCONTROL, 'STATUS'] = 1 #-1 => IN CONTROL
d_bp_record[key].loc[~bool_condition_INCONTROL, 'STATUS'] = -1 #1 => OUT OF CONTROL
#make dictionary of BP Control Status (at the patient level, ie mostly in control or out of control)
print("calculating intervals of in control vs out of control from recorded numbers: \n")
d_bp_status_pt_level = dict()
for key in d_bp_record:
d_days_in_out = {-1: 0, 1:0}
ts_status_this_pt = d_bp_record[key]['STATUS'].sort_index()
last_status = ts_status_this_pt[0]
last_timestamp = ts_status_this_pt.index[0]
if len(ts_status_this_pt) > 1 and (max(ts_status_this_pt.index) - min(ts_status_this_pt.index)).days > 1: #if there are more than 1 entry, and more than 1 day's worth (if theres more than one entry and they're not all on the same day)
#loop thru the timeSeries of status for this patient
for timestamp in ts_status_this_pt.index:
time_delta = (timestamp - last_timestamp).days
d_days_in_out[last_status] += time_delta #add the time that has passed
if ts_status_this_pt[timestamp].size > 1:
status_at_this_timestamp = ts_status_this_pt[timestamp][-1] #pick the last recorded status for this timestamp
if status_at_this_timestamp != last_status: #if the status changed
last_status = status_at_this_timestamp
else:
status_at_this_timestamp = ts_status_this_pt[timestamp]
if status_at_this_timestamp != last_status: #if the status changed
last_status = status_at_this_timestamp #then change last_status to reflect this so that you add to the right status for the next timestamp
last_timestamp = timestamp
#now count how many days in /out and detemrine if mostly in or mostly out or mixed
num_in = d_days_in_out[1]
num_out = d_days_in_out[-1]
else: #if only one BP measurement was taken for the patient
if last_status == 1:
num_in = 1
num_out = 0
else:
num_in = 0
num_out = 1
if num_in == 0 and num_out == 0:
print("ERROR 0: no days in or out! " + str(key))
d_bp_status_pt_level[key] = 0
elif num_out == 0:
if num_in > num_out:
d_bp_status_pt_level[key] = 1
else:
print("ERROR1 - check!")
elif num_in == 0:
if num_out > num_in:
d_bp_status_pt_level[key] = -1
else:
print("ERROR2 - check!")
elif num_in > num_out and num_out == 0:
d_bp_status_pt_level[key] = 1
elif num_out > num_in and num_in == 0:
d_bp_status_pt_level[key] = -1
elif num_in / float(num_out) > 1.5:
d_bp_status_pt_level[key] = 1
elif num_out / float(num_in) > 1.5:
d_bp_status_pt_level[key] = -1
else:
d_bp_status_pt_level[key] = 0
#print counts
print("number patients with each control class (from numbers: ")
counter_control_status = Counter(val for val in d_bp_status_pt_level.values())
print(counter_control_status)
#pickle:
with open(pickle_dir + "d_bp_record.pickle", "wb") as output_file:
pickle.dump(d_bp_record, output_file)
output_file.close()
with open(pickle_dir + "d_bp_status_pt_level.pickle", "wb") as output_file:
pickle.dump(d_bp_status_pt_level, output_file)
output_file.close()
with open(pickle_dir + "list_ruid.pickle", "wb") as output_file:
pickle.dump(list_ruid, output_file)
output_file.close()
|
[
"robchen401@gmail.com"
] |
robchen401@gmail.com
|
b19d4c48e98253f0c798e34a47492d4728a86040
|
440e4f13d6b3939f0290931bcd984591f191f9f9
|
/demo/XmlResourceSubclass.py
|
3ffb043f8b415e55275f9873d23bd8a36a423eb2
|
[] |
no_license
|
AlfiyaZi/Phoenix
|
b445e6906eb61d037c83957ce601d731dc04acfa
|
c524ed1a3794ec4d2baaba6b12d6d7ef37aa7695
|
refs/heads/master
| 2020-06-16T18:04:42.696173
| 2016-11-24T18:39:47
| 2016-11-24T18:39:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,717
|
py
|
#!/usr/bin/env python
import wx
import wx.xrc as xrc
#----------------------------------------------------------------------
resourceText = br'''<?xml version="1.0"?>
<resource>
<!-- Notice that the class IS a standard wx class, and a custom
subclass is specified as "moduleName.ClassName" Try changing
the classname to one that does not exist and see what happens -->
<object class="wxPanel" subclass="XmlResourceSubclass.MyCustomPanel" name="MyPanel">
<size>200,100</size>
<object class="wxStaticText" name="label1">
<label>This panel is a custom class derived from wx.Panel,\nand is loaded by a custom XmlResourceHandler.</label>
<pos>10,10</pos>
</object>
</object>
</resource>
'''
#----------------------------------------------------------------------
class MyCustomPanel(wx.Panel):
def __init__(self):
wx.Panel.__init__(self)
# the Create step is done by XRC.
self.Bind(wx.EVT_WINDOW_CREATE, self.OnCreate)
self.Bind(wx.EVT_SIZE, self.OnSize)
def OnCreate(self, evt):
# This is the little bit of customization that we do for this
# silly example. It could just as easily have been done in
# the resource. We do it in the EVT_WINDOW_CREATE handler
# because the window doesn't really exist yet in the __init__.
if self is evt.GetEventObject():
t = wx.StaticText(self, -1, "MyCustomPanel")
f = t.GetFont()
f.SetWeight(wx.FONTWEIGHT_BOLD)
f.SetPointSize(f.GetPointSize()+2)
t.SetFont(f)
self.t = t
# On OSX the EVT_SIZE happens before EVT_WINDOW_CREATE !?!
# so give it another kick
wx.CallAfter(self.OnSize, None)
evt.Skip()
def OnSize(self, evt):
if hasattr(self, 't'):
sz = self.GetSize()
w, h = self.t.GetTextExtent(self.t.GetLabel())
self.t.SetPosition(((sz.width-w)/2, (sz.height-h)/2))
#----------------------------------------------------------------------
class TestPanel(wx.Panel):
def __init__(self, parent, log):
self.log = log
wx.Panel.__init__(self, parent, -1)
# make the components
label = wx.StaticText(self, -1, "The lower panel was built from this XML:")
label.SetFont(wx.Font(12, wx.FONTFAMILY_SWISS, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD))
text = wx.TextCtrl(self, -1, resourceText,
style=wx.TE_READONLY|wx.TE_MULTILINE)
text.SetInsertionPoint(0)
line = wx.StaticLine(self, -1)
# Load the resource
res = xrc.XmlResource()
res.LoadFromBuffer(resourceText)
# Now create a panel from the resource data
panel = res.LoadPanel(self, "MyPanel")
# and do the layout
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(label, 0, wx.EXPAND|wx.TOP|wx.LEFT, 5)
sizer.Add(text, 1, wx.EXPAND|wx.ALL, 5)
sizer.Add(line, 0, wx.EXPAND)
sizer.Add(panel, 1, wx.EXPAND|wx.ALL, 5)
self.SetSizer(sizer)
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = """<html><body>
<h2><center>wx.XmlResourceSubclass</center></h2>
Sometimes it is necessary to use custom classes, but you still want
them to be created from XRC. The subclass XRC attribute allows you to
do that.
</body></html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
|
[
"robin@alldunn.com"
] |
robin@alldunn.com
|
d72b7ab5be64ba723348d6436a369f88a256597d
|
bce29ac8dccfc51983dcc00c433287866e9d63e7
|
/1222-5223.py
|
2fd13043ddeebbd63a8be746dccad6b295a1017f
|
[] |
no_license
|
aiifabbf/leetcode-memo
|
b2c3f110073367b4b6db95722e96a794b5fe0453
|
5be09b4a804cb600e61e24617b9b2a1cc78fab3f
|
refs/heads/master
| 2021-06-11T18:41:16.550017
| 2021-04-20T15:15:50
| 2021-04-20T15:15:50
| 175,244,504
| 10
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,916
|
py
|
"""
给一个8x8的棋盘、皇后的位置、国王的位置,问哪些皇后可以吃掉国王。
很简单,从国王开始往8个方向(上、下、左、右、上左、上右、下左、下右)分别搜索,每个方向遇到的第一个皇后就是能吃掉国王的皇后。注意如果出现国王和皇后之间隔了另一个皇后,比如
::
王 后1 后2
后2是没办法吃掉国王的。
但是要写的好看挺难的……我写了8个for循环。
"""
from typing import *
class Solution:
def queensAttacktheKing(self, queens: List[List[int]], king: List[int]) -> List[List[int]]:
queens = set(map(tuple, queens)) # 把皇后的位置转换成tuple再放到set里,这样判断某个皇后是否存在的复杂度就是O(1)
res = []
for delta in range(1, 9): # 往右看
temp = (king[0] + delta, king[1])
if temp in queens: # 遇到的第一个皇后
res.append(temp) # 放到结果里
break # 然后就不用看下去了,因为反正被第一个皇后挡住了,吃不到国王了
for delta in range(1, 9): # 往下看
temp = (king[0], king[1] + delta)
if temp in queens:
res.append(temp)
break
for delta in range(1, 9): # 往右下看
temp = (king[0] + delta, king[1] + delta)
if temp in queens:
res.append(temp)
break
for delta in range(1, 9): # 往左看
temp = (king[0] - delta, king[1])
if temp in queens:
res.append(temp)
break
for delta in range(1, 9): # 往上看
temp = (king[0], king[1] - delta)
if temp in queens:
res.append(temp)
break
for delta in range(1, 9): # 往左上看
temp = (king[0] - delta, king[1] - delta)
if temp in queens:
res.append(temp)
break
for delta in range(1, 9): # 往右上看
temp = (king[0] + delta, king[1] - delta)
if temp in queens:
res.append(temp)
break
for delta in range(1, 9): # 往左下看
temp = (king[0] - delta, king[1] + delta)
if temp in queens:
res.append(temp)
break
return list(map(list, res))
# s = Solution()
# print(s.queensAttacktheKing(queens = [[0,1],[1,0],[4,0],[0,4],[3,3],[2,4]], king = [0,0]))
# print(s.queensAttacktheKing(queens = [[0,0],[1,1],[2,2],[3,4],[3,5],[4,4],[4,5]], king = [3,3]))
# print(s.queensAttacktheKing(queens = [[5,6],[7,7],[2,1],[0,7],[1,6],[5,1],[3,7],[0,3],[4,0],[1,2],[6,3],[5,0],[0,4],[2,2],[1,1],[6,4],[5,4],[0,0],[2,6],[4,5],[5,2],[1,4],[7,5],[2,3],[0,5],[4,2],[1,0],[2,7],[0,1],[4,6],[6,1],[0,6],[4,3],[1,7]], king = [3,4]))
|
[
"aiifabbf@outlook.com"
] |
aiifabbf@outlook.com
|
36820395946cd35b82cf0890ad00f03f16354844
|
906b3df6f4cb2141910c19dbe8e29fe493205ed2
|
/contracts.py
|
b39f4cb339e5a70daf16f74cbf15b0b6b99822c0
|
[] |
no_license
|
webclinic017/IB_to_XCEL_Python
|
6af80ed3855e97664f38c50a945554678cc1f834
|
66dfefdb047d373486434e065324e676a9f7618e
|
refs/heads/main
| 2023-06-18T20:38:33.784478
| 2021-07-15T15:29:31
| 2021-07-15T15:29:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,991
|
py
|
from ibapi.contract import Contract
july_vixFuture_contract = Contract()
july_vixFuture_contract.symbol = 'VXK1'
july_vixFuture_contract.secType = 'FUT'
july_vixFuture_contract.tradingClass = 'VX'
july_vixFuture_contract.exchange = 'CFE'
july_vixFuture_contract.currency = 'USD'
july_vixFuture_contract.lastTradeDateOrContractMonth = '20210721'
#Create August VIX future object
august_vixFuture_contract = Contract()
august_vixFuture_contract.symbol = 'VXK1'
august_vixFuture_contract.secType = 'FUT'
august_vixFuture_contract.tradingClass = 'VX'
august_vixFuture_contract.exchange = 'CFE'
august_vixFuture_contract.currency = 'USD'
august_vixFuture_contract.lastTradeDateOrContractMonth = '20210818'
#Create Septmeber VIX future object
september_vixFuture_contract = Contract()
september_vixFuture_contract.symbol = 'VXK1'
september_vixFuture_contract.secType = 'FUT'
september_vixFuture_contract.tradingClass = 'VX'
september_vixFuture_contract.exchange = 'CFE'
september_vixFuture_contract.currency = 'USD'
september_vixFuture_contract.lastTradeDateOrContractMonth = '20210915'
#Ocotber VIX Contract Object
october_vixFuture_contract = Contract()
october_vixFuture_contract.symbol = 'VXK1'
october_vixFuture_contract.secType = 'FUT'
october_vixFuture_contract.tradingClass = 'VX'
october_vixFuture_contract.exchange = 'CFE'
october_vixFuture_contract.currency = 'USD'
october_vixFuture_contract.lastTradeDateOrContractMonth = '20211020'
november_vixFuture_contract = Contract()
november_vixFuture_contract.symbol = 'VXK1'
november_vixFuture_contract.secType = 'FUT'
november_vixFuture_contract.tradingClass = 'VX'
november_vixFuture_contract.exchange = 'CFE'
november_vixFuture_contract.currency = 'USD'
november_vixFuture_contract.lastTradeDateOrContractMonth = '20211117'
contract_list = [july_vixFuture_contract, august_vixFuture_contract, september_vixFuture_contract, october_vixFuture_contract, november_vixFuture_contract]
|
[
"noreply@github.com"
] |
webclinic017.noreply@github.com
|
8a221e3053dcc8502c46d16f04827b083c3b5bd0
|
8159c2c650d53fb188a26b508dfff524296707d1
|
/lore/deities/alki_l.py
|
905ce33ad3e1bc0fadf8acc8293b6ce5991ca540
|
[] |
no_license
|
Teifion/Rob3
|
501663bf5077da0c28a7db4960c74c4477dd11bf
|
81fc2f9930434b5a4de52b75eb6a8d78dd708f77
|
refs/heads/master
| 2021-01-16T19:20:29.666426
| 2011-05-30T13:34:17
| 2011-05-30T13:34:17
| 1,819,452
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 278
|
py
|
data = {
"cat": "deities",
"page": "alki",
}
blocks = [
{
"id": "summary",
"text": "Summary text"
},
{
"id": "Backstory",
"text": """"""
},
{
"level": "secret",
"text": "Secret info"
},
{
"id": "gm_notes",
"level": "gm",
"text": "GM info"
}
]
|
[
"sarkalian@gmail.com"
] |
sarkalian@gmail.com
|
770d33e367fca4988fdf64c78ac4aef27d69ca8f
|
9682dab1ce9e00e11708872fa26febc847d4d18c
|
/pycorrector/seq2seq/corpus_reader.py
|
46822ab4f180f6abb8baed21724debbfb98f1cc9
|
[
"Apache-2.0"
] |
permissive
|
fireflycsq/pycorrector
|
1ed2a6df9ec05bb9055b2052f92301212ebab235
|
d1096e4cee99ba95bb4df945707bbd2b8972717a
|
refs/heads/master
| 2020-03-21T15:43:51.228521
| 2018-06-12T07:34:44
| 2018-06-12T07:34:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,961
|
py
|
# -*- coding: utf-8 -*-
# Author: XuMing <xuming624@qq.com>
# Brief: Corpus for model
import random
from pycorrector.seq2seq.reader import Reader, PAD_TOKEN, EOS_TOKEN, GO_TOKEN
class FCEReader(Reader):
"""
Read FCE data set
"""
UNKNOWN_TOKEN = 'UNK'
DROPOUT_TOKENS = {"a", "an", "the", "'ll", "'s", "'m", "'ve"}
REPLACEMENTS = {"there": "their", "their": "there", "then": "than", "than": "then"}
def __init__(self, config, train_path=None, token_2_id=None,
dropout_prob=0.25, replacement_prob=0.25, dataset_copies=2):
super(FCEReader, self).__init__(
config, train_path=train_path, token_2_id=token_2_id,
special_tokens=[PAD_TOKEN, GO_TOKEN, EOS_TOKEN, FCEReader.UNKNOWN_TOKEN],
dataset_copies=dataset_copies)
self.dropout_prob = dropout_prob
self.replacement_prob = replacement_prob
self.UNKNOWN_ID = self.token_2_id[FCEReader.UNKNOWN_TOKEN]
def read_samples_by_string(self, path):
with open(path, 'r', encoding='utf-8') as f:
while True:
line_src = f.readline()
line_dst = f.readline()
if not line_src or len(line_src) < 5:
break
source = line_src.lower()[5:].strip().split()
target = line_dst.lower()[5:].strip().split()
if self.config.enable_special_error:
new_source = []
for token in source:
# Random dropout words from the input
dropout_token = (token in FCEReader.DROPOUT_TOKENS and
random.random() < self.dropout_prob)
replace_token = (token in FCEReader.REPLACEMENTS and
random.random() < self.replacement_prob)
if replace_token:
new_source.append(FCEReader.REPLACEMENTS[source])
elif not dropout_token:
new_source.append(token)
source = new_source
yield source, target
def unknown_token(self):
return FCEReader.UNKNOWN_TOKEN
def read_tokens(self, path):
i = 0
with open(path, 'r', encoding='utf-8') as f:
for line in f:
# Input the correct text, which start with 0
if i % 2 == 1:
if line and len(line) > 5:
yield line.lower()[5:].strip().split()
i += 1
class CGEDReader(Reader):
"""
Read CGED data set
"""
UNKNOWN_TOKEN = 'UNK'
def __init__(self, config, train_path=None, token_2_id=None, dataset_copies=2):
super(CGEDReader, self).__init__(
config, train_path=train_path, token_2_id=token_2_id,
special_tokens=[PAD_TOKEN, GO_TOKEN, EOS_TOKEN, CGEDReader.UNKNOWN_TOKEN],
dataset_copies=dataset_copies)
self.UNKNOWN_ID = self.token_2_id[CGEDReader.UNKNOWN_TOKEN]
def read_samples_by_string(self, path):
with open(path, 'r', encoding='utf-8') as f:
while True:
line_src = f.readline()
line_dst = f.readline()
if not line_src or len(line_src) < 5:
break
source = line_src.lower()[5:].strip().split()
target = line_dst.lower()[5:].strip().split()
yield source, target
def unknown_token(self):
return CGEDReader.UNKNOWN_TOKEN
def read_tokens(self, path, is_infer=False):
i = 0
with open(path, 'r', encoding='utf-8') as f:
for line in f:
# Input the correct text, which start with 0
if i % 2 == 1:
if line and len(line) > 5:
yield line.lower()[5:].strip().split()
i += 1
|
[
"507153809@qq.com"
] |
507153809@qq.com
|
4a810f7029a6f0806c1dc6e4f8679c877af55d4b
|
2bb90b620f86d0d49f19f01593e1a4cc3c2e7ba8
|
/pardus/tags/2007.2/kernel/drivers/eciadsl/actions.py
|
77a821cb1f94a9b37fd3943be25d766c74b060e7
|
[] |
no_license
|
aligulle1/kuller
|
bda0d59ce8400aa3c7ba9c7e19589f27313492f7
|
7f98de19be27d7a517fe19a37c814748f7e18ba6
|
refs/heads/master
| 2021-01-20T02:22:09.451356
| 2013-07-23T17:57:58
| 2013-07-23T17:57:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 665
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2005, 2006 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/copyleft/gpl.txt.
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
WorkDir = "eciadsl-usermode-0.11"
def setup():
pisitools.dosed("eciadsl-config.tk", "set BIN_DIR \"/usr/local/bin\"", "set BIN_DIR \"/usr/bin\"")
autotools.configure()
def build():
autotools.make()
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("README", "INSTALL", "BUGS", "TODO", "TROUBLESHOOTING")
|
[
"yusuf.aydemir@istanbul.com"
] |
yusuf.aydemir@istanbul.com
|
03bcad2f49a796e50d41e05a57f6c096d03928b1
|
642e8d6d8cd8d08a73bdcf82ae9689a09284025c
|
/celery/tests/test_pickle.py
|
bf2f4ccc4968b6c53b399fa9e10f0ffbaf43368d
|
[
"BSD-3-Clause"
] |
permissive
|
abecciu/celery
|
941f29c033b54b766166f17aa8c5e4be05df08b9
|
f0c399e34d56c7a2a14cb42bfb2b6455c68ef0c0
|
refs/heads/master
| 2021-01-14T12:57:11.230199
| 2009-09-10T13:44:51
| 2009-09-10T13:44:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,631
|
py
|
import unittest
from celery.serialization import pickle
class RegularException(Exception):
pass
class ArgOverrideException(Exception):
def __init__(self, message, status_code=10):
self.status_code = status_code
super(ArgOverrideException, self).__init__(message, status_code)
class TestPickle(unittest.TestCase):
# See: http://www.reddit.com/r/django/comments/8gdwi/
# celery_distributed_task_queue_for_django/c097hr1
def test_pickle_regular_exception(self):
e = None
try:
raise RegularException("RegularException raised")
except RegularException, e:
pass
pickled = pickle.dumps({"exception": e})
unpickled = pickle.loads(pickled)
exception = unpickled.get("exception")
self.assertTrue(exception)
self.assertTrue(isinstance(exception, RegularException))
self.assertEquals(exception.args, ("RegularException raised", ))
def test_pickle_arg_override_exception(self):
e = None
try:
raise ArgOverrideException("ArgOverrideException raised",
status_code=100)
except ArgOverrideException, e:
pass
pickled = pickle.dumps({"exception": e})
unpickled = pickle.loads(pickled)
exception = unpickled.get("exception")
self.assertTrue(exception)
self.assertTrue(isinstance(exception, ArgOverrideException))
self.assertEquals(exception.args, ("ArgOverrideException raised",
100))
self.assertEquals(exception.status_code, 100)
|
[
"askh@opera.com"
] |
askh@opera.com
|
ee8fcb6113dfdf4554705e7c1cf7fe5e5b3c6017
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p03253/s028075824.py
|
5a75ef3726225d5282ae0dd3a1b8e2323b4d2dfd
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,029
|
py
|
import sys
import math
sys.setrecursionlimit(1000000) # 再帰上限を増やす
def calc_combi(n, m, mod=1000000007):
""" nCmの組み合わせ数を数える """
if n - m < m:
return calc_combi(n, n - m)
ans_mull, ans_div = 1, 1
for i in range(m):
ans_mull *= (n - i)
ans_div *= (i + 1)
ans_mull %= mod
ans_div %= mod
ans = ans_mull * pow(ans_div, mod - 2, mod) % mod
return ans
def main():
input = sys.stdin.readline # 文字列に対してinputした場合は、rstripするのを忘れずに!
N, M = map(int, input().rstrip().split())
mod = 1000000007
ans = 1
for i in range(2, math.ceil(math.sqrt(M))):
if M % i == 0:
count = 0
while M % i == 0:
M /= i
count += 1
ans *= calc_combi(count + N - 1, N - 1, mod)
ans %= mod
if M != 1:
ans *= calc_combi(N, 1, mod)
ans %= mod
print(ans)
if __name__ == '__main__':
main()
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
d7d22bee196dd88cf02a813ed99e658a7a9134a1
|
d0758e0ca004226cec8ad8b26c9565c98534a8b8
|
/11-videogames/Julio/3 - Moscas/game.py
|
91460a45758ba15d812fa0b32ac1bfb74b85c7e4
|
[] |
no_license
|
pythoncanarias/eoi
|
334d64a96afc76ac1fa10282378f291b6d8c94b3
|
349367254f85e3e4273cede067ca950913a1332c
|
refs/heads/master
| 2023-07-06T08:00:11.366345
| 2023-06-30T15:19:33
| 2023-06-30T15:19:33
| 222,742,870
| 26
| 19
| null | 2023-06-25T16:03:46
| 2019-11-19T16:41:25
|
Jupyter Notebook
|
UTF-8
|
Python
| false
| false
| 1,274
|
py
|
import pygame
import math
from settings import *
from sprites import Player
class Game:
def __init__(self):
pygame.init()
self.screen = pygame.display.set_mode([WIDTH, HEIGHT])
pygame.display.set_caption(TITLE)
self.clock = pygame.time.Clock()
self.all_sprites = pygame.sprite.Group()
for _ in range(32):
Player(self, 10, 10)
def run(self):
self.playing = True
while self.playing:
self.dt = self.clock.tick(FPS)
self.events()
self.update()
self.draw()
def events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.playing = False
def update(self):
self.all_sprites.update()
def draw(self):
self.screen.fill(BLACK)
for x in range(0, WIDTH, TILESIZE):
pygame.draw.line(self.screen, LIGHTGREY, (x, 0), (x, HEIGHT))
for y in range(0, HEIGHT, TILESIZE):
pygame.draw.line(self.screen, LIGHTGREY, (0, y), (WIDTH, y))
self.all_sprites.draw(self.screen)
# Nothing else to draw, let's show it!
pygame.display.flip()
game = Game()
game.run()
|
[
"euribates@gmail.com"
] |
euribates@gmail.com
|
5b666edc2f564f9e62ca32f883d55c4dc29f7449
|
567eac942e94c653dda710d52c1eb6db0847a075
|
/0x0B-python-input_output/100-append_after.py
|
c1d85581606a8e1587723e9fb8ea9451eb415a89
|
[] |
no_license
|
mecomontes/Higher-Level-Programming
|
beeb4a410ff99fa062a86bd0a7f4d87a39889283
|
3a78f6eeedc70e2f447c49ccaf0838f5878f651c
|
refs/heads/main
| 2023-06-26T18:02:29.046302
| 2021-07-13T14:58:24
| 2021-07-13T14:58:24
| 385,641,861
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 751
|
py
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Tru Jun 3 15:13:37 2020
@author: Robinson Montes
"""
def append_after(filename="", search_string="", new_string=""):
"""
Inserts a line after each line containing a specific string
Arguments:
filename (str): The name of the file
search_string (str): The string to math
new_string (str): The string to insert after matching
"""
with open(filename, 'r', encoding='utf-8') as file:
lines = file.readlines()
for i in range(len(lines)):
if search_string in lines[i]:
lines.insert(i + 1, new_string)
with open(filename, 'w', encoding='utf-8') as file:
content = "".join(lines)
file.write(content)
|
[
"1574@holbertonschool.com"
] |
1574@holbertonschool.com
|
9e16773835ce51e64709f46cb6093b668958019f
|
2bf6bf2eeb72b9eb4b8a41f36fb56585e140a611
|
/pickle_to_tex.py
|
82080a24d542ddb03dc447346c6cdf65cb6965bc
|
[
"MIT"
] |
permissive
|
Daniel-Bu/exposure
|
0fcff9177764cc85bb46298cfcdfaf5aeb29b6ec
|
74549ac75822fba41125cb0a086905a51db49d7b
|
refs/heads/master
| 2020-09-27T07:34:49.726377
| 2019-12-07T07:06:34
| 2019-12-07T07:06:34
| 226,464,955
| 0
| 0
|
MIT
| 2019-12-07T06:12:36
| 2019-12-07T06:12:35
| null |
UTF-8
|
Python
| false
| false
| 5,295
|
py
|
# This script converts output pickle to step-by-step latex figures
import numpy as np
import os
import pickle as pickle
import shutil
NUM_STEPS = 5
CURVE_STEPS = 8
files = []
filters = [
'Expo.',
'Gam.',
'W.B.',
'Satu.',
'Tone',
'Cst.',
'BW',
'Color',
]
def visualize_detail(name, param, pos):
def map_pos(x, y):
return '(%f,%f)' % (pos[0] + x * 0.8, pos[1] - 1.1 + y * 0.8)
if name == 'Expo.':
return '{Exposure $%+.2f$};' % param[0]
elif name == 'Gam.':
return '{Gamma $1/%.2f$};' % (1 / param[0])
elif name == 'Satu.':
return '{Saturation $+%.2f$};' % param[0]
elif name == 'Cst.':
return '{Contrast $%+.2f$};' % param[0]
elif name == 'BW':
return '{$%+.2f$};' % (param[0])
elif name == 'W.B.':
scaling = 1 / (1e-5 + 0.27 * param[0] + 0.67 * param[1] + 0.06 * param[2])
r, g, b = [int(255 * x * scaling) for x in param]
color = r'{\definecolor{tempcolor}{RGB}{%d,%d,%d}};' % (r, g, b)
return color + '\n' + r'\tikz \fill[tempcolor] (0,0) rectangle (4 ex, 2 ex);'
elif name == 'Tone':
s = '{Tone\quad\quad\quad\quad};\n'
s += r'\draw[<->] %s -- %s -- %s;' % (map_pos(0, 1.1), map_pos(0, 0),
map_pos(1.1, 0))
s += '\n'
for i in range(1):
values = np.array([0] + list(param[0][0][i]))
values /= sum(values) + 1e-30
scale = 1
values *= scale
for j in range(0, CURVE_STEPS):
values[j + 1] += values[j]
for j in range(CURVE_STEPS):
p1 = (1.0 / CURVE_STEPS * j, values[j])
p2 = (1.0 / CURVE_STEPS * (j + 1), values[j + 1])
s += r'\draw[-] %s -- %s;' % (map_pos(*p1), map_pos(*p2))
if j != CURVE_STEPS - 1:
s += '\n'
return s
elif name == 'Color':
s = '{Color\quad\quad\quad\quad};\n'
s += r'\draw[<->] %s -- %s -- %s;' % (map_pos(0, 1.1), map_pos(0, 0),
map_pos(1.1, 0))
s += '\n'
c = ['red', 'green', 'blue']
for i in range(3):
#print(param)
values = np.array([0] + list(param[0][0][i]))
values /= sum(values) + 1e-30
scale = 1
values *= scale
for j in range(0, CURVE_STEPS):
values[j + 1] += values[j]
for j in range(CURVE_STEPS):
p1 = (1.0 / CURVE_STEPS * j, values[j])
p2 = (1.0 / CURVE_STEPS * (j + 1), values[j + 1])
s += r'\draw[%s,-] %s -- %s;' % (c[i], map_pos(*p1), map_pos(*p2))
if j != CURVE_STEPS - 1:
s += '\n'
return s
else:
assert False
def visualize_step(debug_info, step_name, position):
pdf = debug_info['pdf']
filter_id = debug_info['selected_filter_id']
s = ''
s += r'\node[draw, rectangle, thick,minimum height=7em,minimum width=7em](%s) at (%f,%f) {};' % (
step_name, position[0], position[1])
s += '\n'
s += r'\node (%ss) at ([yshift=1.4em]%s.center) {' % (step_name, step_name)
s += '\n'
s += r' \scalebox{0.7}{'
s += '\n'
s += r' \begin{tabular}{|p{0.5cm}p{0.2cm}p{0.5cm}p{0.2cm}|}'
s += '\n'
s += r' \hline'
s += '\n'
def bar(i):
return '\pdfbarSelected' if i == filter_id else '\pdfbar'
for i in range(4):
f1 = filters[i]
b1 = r'%s{%.3f}' % (bar(i), pdf[i] * 3)
f2 = filters[i + 4]
b2 = r'%s{%.3f}' % (bar(i + 4), pdf[i + 4] * 3)
s += r' %s & %s & %s & %s \\' % (f1, b1, f2, b2)
s += '\n'
s += r' \hline'
s += '\n'
s += r' \end{tabular}'
s += '\n'
s += r' }'
s += '\n'
s += r'};'
s += '\n'
s += r'\node (%sd) at ([yshift=-2.0em]%s.center)' % (step_name, step_name)
s += '\n'
s += visualize_detail(
filters[filter_id],
debug_info['filter_debug_info'][filter_id]['filter_parameters'], position)
s += '\n'
return s
def process_dog():
f = 'dog04/a0694.tif_debug.pkl'
debug_info_list = pickle.load(open(f, 'r'))
for i in range(NUM_STEPS):
debug_info = debug_info_list[i]
print(visualize_step(debug_info, 'agent%d' % (i + 1), (4, i * -3)), end=' ')
def process(filename, id, src):
pkl_fn = os.path.join(src, filename)
debug_info_list = pickle.load(open(pkl_fn, 'rb'))
filename = filename[:-10]
target_dir = 'export/{}'.format(id)
os.makedirs(target_dir, exist_ok=True)
for i in range(NUM_STEPS - 1):
shutil.copy(os.path.join(src, filename + '.intermediate%02d.png' % i),
os.path.join(target_dir, 'step%d.png' % (i + 1)))
shutil.copy(os.path.join(src, filename + '.retouched.png'), os.path.join(target_dir, 'final.png'))
shutil.copy(os.path.join(src, filename + '.linear.png'), os.path.join(target_dir, 'input.png'))
with open(target_dir + '/steps.tex', 'w') as f:
for i in range(NUM_STEPS):
debug_info = debug_info_list[i]
print(
visualize_step(debug_info, 'agent%d' % (i + 1), (4, i * -3)),
end=' ',
file=f)
print('##########################################')
print('Note: Please make sure you have pdflatex.')
print('##########################################')
print()
for input_dir in ['outputs']:
for f in os.listdir(input_dir):
if not f.endswith('pkl'):
continue
id = f.split('.')[0]
print('Generating pdf operating sequences for image {}...'.format(id))
process(f, id, src=input_dir)
|
[
"yuanmhu@gmail.com"
] |
yuanmhu@gmail.com
|
c5b30f8b5cad9ac276560858501ff8ed4aa8a8b1
|
0a7b77367cde1a64d95d1aab53b6a4f344056a9c
|
/mcod/organizations/views.py
|
685347e8b0ddc6c20caa3f4d05cff315812b5080
|
[] |
no_license
|
kaglowka/danyzespolapi
|
529c2c7fc5d35c630498c8438e59dbcc3c00c437
|
0b3c07c68cf61faa81756822af9eec7c497bba2f
|
refs/heads/master
| 2023-01-13T19:11:40.247512
| 2018-10-28T12:36:47
| 2018-10-28T12:36:47
| 154,993,674
| 0
| 0
| null | 2022-12-26T20:44:19
| 2018-10-27T17:57:34
|
CSS
|
UTF-8
|
Python
| false
| false
| 2,487
|
py
|
# -*- coding: utf-8 -*-
import falcon
from dal import autocomplete
from django.apps import apps
from elasticsearch_dsl import Q
from mcod.datasets.documents import DatasetsDoc
from mcod.datasets.schemas import DatasetsList
from mcod.datasets.serializers import DatasetSerializer, DatasetsMeta
from mcod.lib.handlers import SearchHandler, RetrieveOneHandler
from mcod.lib.triggers import LoginOptional
from mcod.lib.views import SearchView, RetrieveOneView
from mcod.organizations.documents import InstitutionDoc
from mcod.organizations.models import Organization
from mcod.organizations.schemas import InstitutionsList
from mcod.organizations.serializers import InstitutionsSerializer, InstitutionsMeta
class InstitutionsView(SearchView):
class GET(SearchHandler):
meta_serializer = InstitutionsMeta()
request_schema = InstitutionsList()
response_serializer = InstitutionsSerializer(many=True)
search_document = InstitutionDoc()
class InstitutionView(RetrieveOneView):
class GET(RetrieveOneHandler):
database_model = apps.get_model('organizations', 'Organization')
response_serializer = InstitutionsSerializer(many=False, include_data=('datasets',))
triggers = [LoginOptional(), ]
def resource_clean(self, request, id, *args, **kwargs):
model = self.database_model
try:
return model.objects.get(pk=id, status="published")
except model.DoesNotExist:
raise falcon.HTTPNotFound
class InstitutionDatasetsView(SearchView):
class GET(SearchHandler):
meta_serializer = DatasetsMeta()
request_schema = DatasetsList()
response_serializer = DatasetSerializer(many=True)
search_document = DatasetsDoc()
def _queryset(self, cleaned, *args, **kwargs):
qs = super()._queryset(cleaned, *args, **kwargs)
if 'id' in kwargs:
qs = qs.query("nested", path="institution",
query=Q("term", **{'institution.id': kwargs['id']}))
return qs
class OrganizationAutocomplete(autocomplete.Select2QuerySetView):
def get_queryset(self):
# Don't forget to filter out results depending on the visitor !
if not self.request.user.is_authenticated:
return Organization.objects.none()
qs = Organization.objects.all()
if self.q:
qs = qs.filter(title__icontains=self.q)
return qs
|
[
"krz.glowka@gmail.com"
] |
krz.glowka@gmail.com
|
1821ffa05b0f39dd18622808d4a83cb6c4da538e
|
c3132612a7ac311e501e432e1a4c7592bbd7a713
|
/day16/code/08_爬虫.py
|
f5ee39d962d3e580dd54293573568a861feddcd3
|
[] |
no_license
|
everqiujuan/python
|
7b8e169107012c3d7829d4ebd9860482fc0d8fec
|
b0a98de943217e24da60f79dec4fe8ebf4f1c713
|
refs/heads/master
| 2020-06-21T16:57:22.260311
| 2019-07-18T05:58:44
| 2019-07-18T05:58:44
| 184,990,635
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 990
|
py
|
#
# 爬虫: 蜘蛛Spider
#
# requests
import requests
import re
# pip 第三方包管理
# pip install requests 安装包
# pip uninstall requests 卸载包
# pip freeze 显示自己安装的包
# pip list 显示所有包
# pip -V 查看版本
# pip show requests 查看包详情
url = 'https://search.51job.com/list/040000,000000,0000,00,9,99,python,2,1.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare='
res = requests.get(url)
string = res.content.decode('gbk')
# print(string)
pattern = '<div class="rt">(.*?)</div>'
res = re.findall(pattern, string, re.S)
# print(res)
string2 = res[0]
# print(string2)
# 取数字
# string2 = string2.strip()
# print(string2)
res2 = re.findall('(\d+)', string2)
print(res2)
print(res2[0])
|
[
"1748636236@qq.com"
] |
1748636236@qq.com
|
305d43af585283a2419fc1f458c295e373db6e69
|
ebd5c4632bb5f85c9e3311fd70f6f1bf92fae53f
|
/Sourcem8/otp/uberdog/OtpAvatarManagerAI.py
|
fe784b18b21426dfb9dc945b89ef2a60b8896819
|
[] |
no_license
|
BrandonAlex/Pirates-Online-Retribution
|
7f881a64ec74e595aaf62e78a39375d2d51f4d2e
|
980b7448f798e255eecfb6bd2ebb67b299b27dd7
|
refs/heads/master
| 2020-04-02T14:22:28.626453
| 2018-10-24T15:33:17
| 2018-10-24T15:33:17
| 154,521,816
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,377
|
py
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class OtpAvatarManagerAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory("OtpAvatarManagerAI")
def online(self):
pass
def requestAvatarList(self, todo0):
pass
def rejectAvatarList(self, todo0):
pass
def avatarListResponse(self, todo0):
pass
def requestAvatarSlot(self, todo0, todo1, todo2):
pass
def rejectAvatarSlot(self, todo0, todo1, todo2):
pass
def avatarSlotResponse(self, todo0, todo1):
pass
def requestPlayAvatar(self, todo0, todo1, todo2):
pass
def rejectPlayAvatar(self, todo0, todo1):
pass
def playAvatarResponse(self, todo0, todo1, todo2, todo3):
pass
def rejectCreateAvatar(self, todo0):
pass
def createAvatarResponse(self, todo0, todo1, todo2, todo3):
pass
def requestRemoveAvatar(self, todo0, todo1, todo2, todo3):
pass
def rejectRemoveAvatar(self, todo0):
pass
def removeAvatarResponse(self, todo0, todo1):
pass
def requestShareAvatar(self, todo0, todo1, todo2, todo3):
pass
def rejectShareAvatar(self, todo0):
pass
def shareAvatarResponse(self, todo0, todo1, todo2):
pass
|
[
"brandoncarden12345@gmail.com"
] |
brandoncarden12345@gmail.com
|
71f5c1ce2364f53f0df7d090799750a7ae5ef7d2
|
6cc9adf9c0d840139d70a51f9e05731c4f26fe4c
|
/Programmers/8.py
|
5bbe6b7fd4a2d2760dfc09ee32bdb38a7e843cb8
|
[] |
no_license
|
y2sec/Algorithm
|
2e05447d401c8c8d0c183c73fb1ce94d26080382
|
712d572bedfbf36dc27e1cc1272143e2b3b73b7f
|
refs/heads/master
| 2023-07-08T00:38:46.809836
| 2021-09-04T06:23:54
| 2021-09-04T06:23:54
| 317,202,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
# 문자열 압축
def solution(s):
answer = len(s)
for i in range(1, len(s) // 2 + 1):
start = 0
cnt = 1
change = ''
while start < len(s):
if s[start:start + i] == s[start + i:start + i + i]:
cnt += 1
elif cnt >= 2:
change += str(cnt) + s[start:start + i]
cnt = 1
else:
change += s[start:start + i]
start += i
answer = min(answer, len(change))
return answer
print(solution('a'))
|
[
"bae3835@gmail.com"
] |
bae3835@gmail.com
|
d43a0031adc40a69d9b2814a1c978c8cd490e873
|
910463f16caddc5a4e06d6ca362d62af20910dba
|
/CH_14_multithreading_and_multiprocessing/T_15_thread_local.py
|
5e4a0a8ab9363c44c745791a81890bf00047fd8a
|
[
"MIT"
] |
permissive
|
AniketS-cpu/Mastering-Python-Second-Edition-test
|
3621ac06fc2cff577992396cd924fe09a349d52e
|
84de81c355d7ca21a1849eed04a15b722538f521
|
refs/heads/master
| 2023-06-09T12:42:53.963075
| 2021-07-04T14:54:18
| 2021-07-04T20:43:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 588
|
py
|
import threading
import concurrent.futures
context = threading.local()
def init_counter():
context.counter = 10
def increment(name):
current_value = context.counter
print(f'{name} value before increment: {current_value}')
context.counter = current_value + 1
print(f'{name} value after increment: {context.counter}')
init_counter()
print(f'Before thread start: {context.counter}')
with concurrent.futures.ThreadPoolExecutor(
initializer=init_counter) as executor:
executor.map(increment, range(5))
print(f'After thread finish: {context.counter}')
|
[
"Wolph@wol.ph"
] |
Wolph@wol.ph
|
a613d147df8ad21434ac4cf9eead8e054bb3143b
|
b71c43b7f7785ca6718d74aff762557f5591758d
|
/Python/Week1/CodingDojo_Python/Django_Projects/email_validation/apps/emails/urls.py
|
5f6c9ebfc8cb14cc19aa4e6250f72d24b019c58b
|
[] |
no_license
|
jqchang/DojoAssignments
|
4be9db6039763905eada2253873997ba5bfd1058
|
3c5a8b351879ccc380af9ce3b5267ca26ea62681
|
refs/heads/master
| 2021-01-13T15:28:45.189452
| 2017-04-15T23:31:58
| 2017-04-15T23:31:58
| 79,868,706
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 259
|
py
|
from django.conf.urls import url
from . import views
# from django.contrib import admin
urlpatterns = [
url(r'^$', views.index),
url(r'^process$', views.process),
url(r'^success$', views.success),
url(r'^delete/(?P<id>\d+)$', views.delete)
]
|
[
"jqchang@gmail.com"
] |
jqchang@gmail.com
|
2412d1167b7c0b04015b6e4b11532237a2e2543d
|
2b0eab74af8d23244ff11699830f9bb10fbd717a
|
/energies/migrations/0007_energyvector_data.py
|
e945aa4ab0b6a070aaa73901fa7bf9c5246097dd
|
[] |
no_license
|
alexandrenorman/mixeur
|
c7e25cd20b03c78b361cb40e3e359a6dc5d9b06b
|
95d21cd6036a99c5f399b700a5426e9e2e17e878
|
refs/heads/main
| 2023-03-13T23:50:11.800627
| 2021-03-07T15:49:15
| 2021-03-07T15:49:15
| 345,384,858
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,103
|
py
|
# Generated by Django 2.2 on 2019-05-02 14:54
from decimal import Decimal
from django.db import migrations
def load_data(apps, schema_editor):
EnergyVector = apps.get_model("energies", "EnergyVector")
Energy = apps.get_model("energies", "Energy")
energies = {energy.identifier: energy for energy in Energy.objects.all()}
EnergyVector.objects.bulk_create([
EnergyVector(vector='oil_kg', buying_unit='kg de fioul', pci=Decimal("11.8"), unit="kWh / kg", energy=energies['oil'], order=10),
EnergyVector(vector='oil_l', buying_unit='L de fioul', pci=Decimal("10.384"), unit="kWh / L", energy=energies['oil'], order=20),
EnergyVector(vector='oil_kwh', buying_unit='kWh', pci=Decimal("1"), unit="kWh", energy=energies['oil'], order=30),
EnergyVector(vector='propane_kg', buying_unit='kg de propane', pci=Decimal("12.88"), unit="kWh / kg", energy=energies['gaz_b1'], order=40),
EnergyVector(vector='propane_m3', buying_unit='m3 de propane', pci=Decimal("7728"), unit="kWh / m³", energy=energies['gaz_b1'], order=50),
EnergyVector(vector='propane_bottles', buying_unit='Bouteilles de 13 kg de propane', pci=Decimal("167"), unit="kWh / bouteilles de 13 kg", energy=energies['propane'], order=60),
EnergyVector(vector='propane_kwh', buying_unit='kWh', pci=Decimal("1"), unit="kWh", energy=energies['propane'], order=70),
EnergyVector(vector='natural_gaz_m3', buying_unit='m3 de gaz naturel', pci=Decimal("10"), unit="kWh / m³", energy=energies['gaz_b1'], order=80),
EnergyVector(vector='natural_gaz_kwh', buying_unit='kWh', pci=Decimal("1"), unit="kWh", energy=energies['gaz_b1'], order=90),
EnergyVector(vector='electricity_kwh', buying_unit='kWh', pci=Decimal("1"), unit="kWh", energy=energies['electricity'], order=100),
EnergyVector(vector='rcu_kwh', buying_unit='kWh', pci=Decimal("1"), unit="kWh", energy=energies['network'], order=110),
EnergyVector(vector='wood_logs_stere', buying_unit='stères de bois', pci=Decimal("1700"), unit="kWh / stère", energy=energies['wood'], order=120),
EnergyVector(vector='granules_t', buying_unit='tonne', pci=Decimal("4700"), unit="kWh / t", energy=energies['bulk_granules'], order=130),
EnergyVector(vector='granules_bag', buying_unit='nombres de sacs de granulés', pci=Decimal("70.5"), unit="kWh/sac de granulés", energy=energies['bag_granules'], order=140),
EnergyVector(vector='shredded_wood_t', buying_unit='tonne', pci=Decimal("3500"), unit="kWh / t", energy=energies['shredded_wood'], order=150),
EnergyVector(vector='shredded_wood_map', buying_unit='MAP', pci=Decimal("875"), unit="kWh / MAP", energy=energies['shredded_wood'], order=160),
EnergyVector(vector='shredded_wood_kwh', buying_unit='kWh', pci=Decimal("1"), unit="kWh", energy=energies['shredded_wood'], order=170),
])
class Migration(migrations.Migration):
dependencies = [
('energies', '0006_energyvector'),
]
operations = [
migrations.RunPython(load_data, reverse_code=migrations.RunPython.noop),
]
|
[
"norman@xael.org"
] |
norman@xael.org
|
b3b2a6a978e363f8f53a0106b14af35f54d5c484
|
e23a4f57ce5474d468258e5e63b9e23fb6011188
|
/125_algorithms/_examples/_algorithms_challenges/pybites/advanced/98/grid.py
|
0b25292e5ce34d129230783fd6a21739685074b5
|
[] |
no_license
|
syurskyi/Python_Topics
|
52851ecce000cb751a3b986408efe32f0b4c0835
|
be331826b490b73f0a176e6abed86ef68ff2dd2b
|
refs/heads/master
| 2023-06-08T19:29:16.214395
| 2023-05-29T17:09:11
| 2023-05-29T17:09:11
| 220,583,118
| 3
| 2
| null | 2023-02-16T03:08:10
| 2019-11-09T02:58:47
|
Python
|
UTF-8
|
Python
| false
| false
| 2,104
|
py
|
import re
DOWN, UP, LEFT, RIGHT = '⇓', '⇑', '⇐', '⇒'
START_VALUE = 1
def print_sequence_route(grid, start_coordinates=None):
"""Receive grid string, convert to 2D matrix of ints, find the
START_VALUE coordinates and move through the numbers in order printing
them. Each time you turn append the grid with its corresponding symbol
(DOWN / UP / LEFT / RIGHT). See the TESTS for more info."""
matrix = []
for i,line in enumerate(grid.splitlines()):
if i % 2 == 1:
values = list(map(int,re.split(r'\D+',line)))
if START_VALUE in values:
start_row = len(matrix)
start_col = values.index(START_VALUE)
matrix.append(values)
length = len(matrix)
goal = length**2
current_row,current_col = start_row,start_col
current_value = START_VALUE
previous_direction = None
print(current_value,end=' ')
while current_value != goal:
directions = ((current_row + 1,current_col,DOWN),(current_row - 1,current_col,UP),(current_row,current_col + 1,RIGHT),(current_row,current_col -1,LEFT))
for neighbor_x,neighbor_y,direction in directions:
if 0 <= neighbor_x < length and 0 <= neighbor_y < length:
if matrix[neighbor_x][neighbor_y] == current_value + 1:
if previous_direction is not None and direction != previous_direction:
print(direction)
previous_direction = direction
elif previous_direction is None:
previous_direction = direction
print(current_value + 1,end=' ')
break
current_row,current_col = neighbor_x,neighbor_y
current_value += 1
if __name__ == "__main__":
small_grid = """
21 - 22 - 23 - 24 - 25
|
20 7 - 8 - 9 - 10
| | |
19 6 1 - 2 11
| | | |
18 5 - 4 - 3 12
| |
17 - 16 - 15 - 14 - 13"""
print_sequence_route(small_grid)
|
[
"sergejyurskyj@yahoo.com"
] |
sergejyurskyj@yahoo.com
|
4d7a13431523e52f557cb9288ca2df2918ca4592
|
6e9d6a682f20054e13d3764e95b8bd3b7b64fabf
|
/dailychallenge794.py
|
480c50e622ac834ef1ded759d169a5654425fa0f
|
[] |
no_license
|
SeanyDcode/codechallenges
|
30a271e04bc2b360bca923ae868be65a9533c8db
|
947cf3034911b381afaf777794d22d2af06aa5ba
|
refs/heads/master
| 2022-11-07T21:22:56.927863
| 2022-10-18T23:33:13
| 2022-10-18T23:33:13
| 154,498,776
| 1
| 0
| null | 2022-10-18T23:02:05
| 2018-10-24T12:38:45
|
Python
|
UTF-8
|
Python
| false
| false
| 515
|
py
|
# from dailycodingproblem.com
#
# Daily Challenge #794
# Implement a stack that has the following methods:
#
# push(val), which pushes an element onto the stack
# pop(), which pops off and returns the topmost element of the stack. If there are no elements in the stack, then
# it should throw an error or return null.
# max(), which returns the maximum value in the stack currently. If there are no elements in the stack, then it should
# throw an error or return null.
# Each method should run in constant time.
|
[
"noreply@github.com"
] |
SeanyDcode.noreply@github.com
|
cac5589c71a79b0bc92bda216d163e76b9777908
|
b4f0f536c721178a69128eba0afb39fde6f62ffb
|
/tests/e2e/redis_client.py
|
1a61fcb32ee7863df6606f0f932eb42ff2f39bfc
|
[
"MIT"
] |
permissive
|
jeantardelli/architecture-patterns-with-python
|
c049257febc369c7d213428019387fe19d38998a
|
d48c7d6d4a44073b815c7e6770e44cf2e231e35b
|
refs/heads/main
| 2023-05-27T05:12:12.951919
| 2021-06-04T18:48:43
| 2021-06-04T18:48:43
| 355,638,599
| 1
| 0
|
MIT
| 2021-06-04T18:48:44
| 2021-04-07T18:03:08
|
Python
|
UTF-8
|
Python
| false
| false
| 390
|
py
|
import json
import redis
from allocation import config
r = redis.Redis(**config.get_redis_host_and_port())
def subscribe_to(channel):
pubsub = r.pubsub()
pubsub.subscribe(channel)
confirmation = pubsub.get_message(timeout=3)
assert confirmation["type"] == "subscribe"
return pubsub
def publish_message(channel, message):
r.publish(channel, json.dumps(message))
|
[
"jeantardelli@gmail.com"
] |
jeantardelli@gmail.com
|
c54a4eef064dc72dd2b30f222d245099b69f7005
|
b1547d22520133f03c7529086f70d4ae83d24237
|
/script/conntect.py
|
adb439f4a913590a349ab9d91767eb1d6ecc3e6f
|
[] |
no_license
|
wrwahut/flask_demo
|
913b6a3e4cd56fadb834895f559d25af742f1a7f
|
699beaa1a6c9f0f413147ff237bb6256c651e953
|
refs/heads/master
| 2020-03-29T22:03:54.659686
| 2019-01-03T07:48:38
| 2019-01-03T07:48:38
| 150,401,389
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 917
|
py
|
# -*- coding:utf-8 -*-
import requests
from flask import request
import json
req_session = requests.session()
class Caller(object):
def __init__(self, url_fix,args={}):
self.url = "http://localhost:10002/resource/user/" + url_fix
self.args = args
self.headers = {
"content-type": "application/json",
"accept": "application/json"
}
# self.cookie = {
# "token": request.cookies.get("token", "None"),
# "lang": request.cookies.get("lang", "zh-CN")
# }
def _res_data(self, response):
res_data = response.json()
if not res_data.get("data"):
res_data["data"] = {}
return res_data
def post_req(self):
payload = json.dumps(self.args, "utf-8")
response = req_session.post(self.url, data=payload, headers=self.headers)
return self._res_data(response)
|
[
"root@localhost.localdomain"
] |
root@localhost.localdomain
|
00233f22e2e5ef66eb8018f58af6d447b2945cbb
|
dcbef06d5a00f07756339b9e62c684dec2fee425
|
/nuitka/code_generation/LoopCodes.py
|
b553f9f6b95a7db4207f65448e6f7735be2edebb
|
[
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer"
] |
permissive
|
Nuitka/Nuitka
|
f9543d8d95bfa0b81d4e60af0dfad99fb72893a4
|
d87faf2f7e1d6ed9bfe4cf8c1d648f34307e33f2
|
refs/heads/develop
| 2023-08-28T14:00:32.861328
| 2023-08-27T09:16:45
| 2023-08-27T09:16:45
| 9,626,741
| 8,573
| 599
|
Apache-2.0
| 2023-09-13T02:49:41
| 2013-04-23T15:40:33
|
Python
|
UTF-8
|
Python
| false
| false
| 3,141
|
py
|
# Copyright 2023, Kay Hayen, mailto:kay.hayen@gmail.com
#
# Part of "Nuitka", an optimizing Python compiler that is compatible and
# integrates with CPython, but also works on its own.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Loop codes.
Code generation for loops, breaking them, or continuing them. In Nuitka, there
are no for-loops or while-loops at this point. They have been re-formulated in
a simpler loop without a condition, and statements there-in that break under
certain conditions.
See Developer Manual for how the CPython loops are mapped to these nodes.
"""
from .CodeHelpers import generateStatementSequenceCode
from .ErrorCodes import getErrorExitBoolCode
from .ExceptionCodes import getExceptionUnpublishedReleaseCode
from .LabelCodes import getGotoCode, getLabelCode
def generateLoopBreakCode(statement, emit, context):
# Functions used for generation all accept statement, but this one does
# not use it. pylint: disable=unused-argument
getExceptionUnpublishedReleaseCode(emit, context)
break_target = context.getLoopBreakTarget()
getGotoCode(break_target, emit)
def generateLoopContinueCode(statement, emit, context):
# Functions used for generation all accept statement, but this one does
# not use it. pylint: disable=unused-argument
getExceptionUnpublishedReleaseCode(emit, context)
continue_target = context.getLoopContinueTarget()
getGotoCode(continue_target, emit)
def generateLoopCode(statement, emit, context):
loop_start_label = context.allocateLabel("loop_start")
if not statement.isStatementAborting():
loop_end_label = context.allocateLabel("loop_end")
else:
loop_end_label = None
getLabelCode(loop_start_label, emit)
old_loop_break = context.setLoopBreakTarget(loop_end_label)
old_loop_continue = context.setLoopContinueTarget(loop_start_label)
generateStatementSequenceCode(
statement_sequence=statement.subnode_loop_body,
allow_none=True,
emit=emit,
context=context,
)
context.setLoopBreakTarget(old_loop_break)
context.setLoopContinueTarget(old_loop_continue)
# Note: We are using the wrong line here, but it's an exception, it's unclear what line it would be anyway.
with context.withCurrentSourceCodeReference(statement.getSourceReference()):
getErrorExitBoolCode(
condition="CONSIDER_THREADING(tstate) == false", emit=emit, context=context
)
getGotoCode(loop_start_label, emit)
if loop_end_label is not None:
getLabelCode(loop_end_label, emit)
|
[
"kay.hayen@gmail.com"
] |
kay.hayen@gmail.com
|
9770456866da2c0a4a7485ed4ccefca3170983b2
|
ca7aa979e7059467e158830b76673f5b77a0f5a3
|
/Python_codes/p02417/s513271588.py
|
dd2618394ee3a3fd9727f547170f17cf79edd3e4
|
[] |
no_license
|
Aasthaengg/IBMdataset
|
7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901
|
f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8
|
refs/heads/main
| 2023-04-22T10:22:44.763102
| 2021-05-13T17:27:22
| 2021-05-13T17:27:22
| 367,112,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 285
|
py
|
import string
from sys import stdin
chs = dict([(ch, 0) for ch in string.ascii_lowercase])
for line in stdin:
for ch in line:
c = ch.lower()
if c not in chs:
continue
chs[c] += 1
for ch in string.ascii_lowercase:
print(ch, ':', chs[ch])
|
[
"66529651+Aastha2104@users.noreply.github.com"
] |
66529651+Aastha2104@users.noreply.github.com
|
20c35fdc0ddd1e4481b51b2d8e0f88a9989398f7
|
df770e5961aa7a0790314da663c426c8b3a93092
|
/python/adult_sklearn.py
|
f344a98e3b820e08c867ac05917d704ba14a9690
|
[] |
no_license
|
dengl11/CS315B-Project
|
be996d461582110e70c3f9e621d13d80ed78a160
|
0da493eb5f6030562d2d2d7546ac7c107f9d2879
|
refs/heads/master
| 2021-08-28T18:34:41.655923
| 2017-12-13T00:06:29
| 2017-12-13T00:06:29
| 108,506,314
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,339
|
py
|
############################################################
# My Decision Tree Classification
############################################################
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from my_decision_tree import *
# param
max_depth = 3
# max_depth = 6
def get_data(input):
"""return (X, y) for input
Args:
input:
Return:
"""
mat = np.loadtxt(input, skiprows=2)
y = mat[:, 0].astype(int)
X = mat[:, 1:].astype(np.float)
return (X, y)
X_train, y_train = get_data("../data/adult/adult_train_tiny.tsv")
X_test, y_test = get_data("../data/adult/adult_test_tiny.tsv")
# X_train, y_train = get_data("../data/adult/adult_train.tsv")
# X_test, y_test = get_data("../data/adult/adult_test.tsv")
# construct estimator
estimator = DecisionTreeClassifier(max_depth=max_depth , random_state=0)
# train
estimator.fit(X_train, y_train)
# [pred_train, pred_test]
predictions = [estimator.predict(x) for x in (X_train, X_test)]
# [acc_train, acc_test]
accuracys = [accuracy_score(p, y) for (p, y) in zip(predictions, (y_train, y_test))]
print("------------------- Sklearn Decision Tree -------------------")
print("Train Accuracy: {:.2f}".format(accuracys[0]))
print("Test Accuracy : {:.2f}".format(accuracys[1]))
|
[
"dengl11@stanford.edu"
] |
dengl11@stanford.edu
|
03733d50c66db99aab90d764c67c8102c1927d32
|
fdce456e2f0ea12f854e98583cfda95955b9a36b
|
/seekerbuilder/migrations/0016_auto_20210916_1303.py
|
ad0d13f013270eb5cd84ec0db637506231ce82d1
|
[] |
no_license
|
atifasr/jobportal
|
e5fdc8058759311e8d4ca2c0291066ad86059fb6
|
3fe211598daa66f2a76c2b3d4d26d73459ac7457
|
refs/heads/master
| 2023-08-05T02:01:00.870360
| 2021-09-29T11:59:29
| 2021-09-29T11:59:29
| 388,807,519
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,245
|
py
|
# Generated by Django 3.2.5 on 2021-09-16 07:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('job_management', '0019_auto_20210913_2147'),
('seekerbuilder', '0015_auto_20210914_1503'),
]
operations = [
migrations.AlterModelOptions(
name='educationdetail',
options={'verbose_name_plural': "Seekers' education details"},
),
migrations.AlterModelOptions(
name='experiencedetail',
options={'verbose_name_plural': "Seekers' experience details"},
),
migrations.AlterModelOptions(
name='seekerprofile',
options={'verbose_name_plural': "Seekers' profile"},
),
migrations.AlterModelOptions(
name='seekerskillset',
options={'verbose_name': 'Seeker skill set', 'verbose_name_plural': "Seeker's skills"},
),
migrations.RemoveField(
model_name='seekerskillset',
name='skill_set',
),
migrations.AddField(
model_name='seekerskillset',
name='skill_set',
field=models.ManyToManyField(to='job_management.Skillset'),
),
]
|
[
"atifshafi63@gmail.com"
] |
atifshafi63@gmail.com
|
c81c481c4164984cc48621ac820341764eda0f70
|
eec9c673984da80f42d2a296ee2cb068639db169
|
/tods/tests/feature_analysis/test_StastiticalStd.py
|
e0e968bb983bc7b56792b66f46ebdad5e1844590
|
[
"Apache-2.0"
] |
permissive
|
datamllab/tods
|
0766f48e7736fd2dca1cbc59fef019447039fed8
|
314dd6efc6ed3f8d25e100b08de4115edc636e14
|
refs/heads/master
| 2023-09-03T22:44:31.929096
| 2023-08-24T17:21:27
| 2023-08-24T17:21:27
| 293,719,013
| 1,094
| 175
|
Apache-2.0
| 2023-08-24T17:21:28
| 2020-09-08T06:18:12
|
Python
|
UTF-8
|
Python
| false
| false
| 5,434
|
py
|
import unittest
from d3m import container, utils
from d3m.metadata import base as metadata_base
from tods.feature_analysis import StatisticalStd
class StatisticalStdTestCase(unittest.TestCase):
def test_basic(self):
self.maxDiff=None
main = container.DataFrame({'timestamp': [1, 3, 2, 5], 'values': [1.0, 2.0, 3.0, 4.0], 'b': [1.0, 4.0, 5.0, 6.0]},
columns=['timestamp', 'values', 'b'],
generate_metadata=True)
self.assertEqual(utils.to_json_structure(main.metadata.to_internal_simple_structure()), [{
'selector': [],
'metadata': {
# 'top_level': 'main',
'schema': metadata_base.CONTAINER_SCHEMA_VERSION,
'structural_type': 'd3m.container.pandas.DataFrame',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Table'],
'dimension': {
'name': 'rows',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularRow'],
'length': 4,
},
},
}, {
'selector': ['__ALL_ELEMENTS__'],
'metadata': {
'dimension': {
'name': 'columns',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/TabularColumn'],
'length': 3,
},
},
}, {
'selector': ['__ALL_ELEMENTS__', 0],
'metadata': {'structural_type': 'numpy.int64', 'name': 'timestamp'},
}, {
'selector': ['__ALL_ELEMENTS__', 1],
'metadata': {'structural_type': 'numpy.float64', 'name': 'values'},
}, {
'selector': ['__ALL_ELEMENTS__', 2],
'metadata': {'structural_type': 'numpy.float64', 'name': 'b'},
}])
hyperparams_class = StatisticalStd.StatisticalStdPrimitive.metadata.get_hyperparams()
hp = hyperparams_class.defaults().replace({
'use_columns': [1,2],
'use_semantic_types' : True,
'window_size':2
})
primitive = StatisticalStd.StatisticalStdPrimitive(hyperparams=hp)
output_main = primitive._produce(inputs=main).value
print(output_main)
expected_output = container.DataFrame(
{'timestamp': [1, 3, 2, 5], 'values': [1.0, 2.0, 3.0, 4.0], 'b': [1.0, 4.0, 5.0, 6.0],
'values_std': [0.5, 0.5, 0.5, 0.5], 'b_std': [1.5, 1.5, 0.5, 0.5]},
columns=['timestamp', 'values', 'b', 'values_std', 'b_std'])
self.assertEqual(output_main[['timestamp', 'values', 'b', 'values_std',
'b_std']].values.tolist(), expected_output[
['timestamp', 'values', 'b', 'values_std', 'b_std'
]].values.tolist())
self.assertEqual(utils.to_json_structure(output_main.metadata.to_internal_simple_structure()),
[{'metadata': {'dimension': {'length': 4,
'name': 'rows',
'semantic_types': [
'https://metadata.datadrivendiscovery.org/types/TabularRow']},
'schema': 'https://metadata.datadrivendiscovery.org/schemas/v0/container.json',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Table'],
'structural_type': 'd3m.container.pandas.DataFrame'},
'selector': []},
{'metadata': {'dimension': {'length': 5,
'name': 'columns',
'semantic_types': [
'https://metadata.datadrivendiscovery.org/types/TabularColumn']}},
'selector': ['__ALL_ELEMENTS__']},
{'metadata': {'name': 'timestamp', 'structural_type': 'numpy.int64'},
'selector': ['__ALL_ELEMENTS__', 0]},
{'metadata': {'name': 'values', 'structural_type': 'numpy.float64'},
'selector': ['__ALL_ELEMENTS__', 1]},
{'metadata': {'name': 'b', 'structural_type': 'numpy.float64'},
'selector': ['__ALL_ELEMENTS__', 2]},
{'metadata': {'name': 'values_std',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Attribute'],
'structural_type': 'numpy.float64'},
'selector': ['__ALL_ELEMENTS__', 3]},
{'metadata': {'name': 'b_std',
'semantic_types': ['https://metadata.datadrivendiscovery.org/types/Attribute'],
'structural_type': 'numpy.float64'},
'selector': ['__ALL_ELEMENTS__', 4]},
])
params = primitive.get_params()
primitive.set_params(params=params)
if __name__ == '__main__':
unittest.main()
|
[
"khlai037@gmail.com"
] |
khlai037@gmail.com
|
110218098957e307e9f699349df924065f373b44
|
f4d0c26d3aa27c77a7c27d9002a08465a0638cbb
|
/csv_schema/migrations/0008_auto_20171025_1410.py
|
5a5a669d07a6928196bbb33dce2d54f89af67d33
|
[] |
no_license
|
uk-gov-mirror/nhsengland.NCDR-reference-library
|
3afe0711f47dc1b5fa25646bc870a806b3512ce5
|
cac30ee0787e81fb9868731576c242c7ea3dbde8
|
refs/heads/master
| 2023-04-03T15:10:19.320708
| 2017-11-03T15:03:27
| 2017-11-03T15:03:27
| 356,799,453
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 427
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-10-25 14:10
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('csv_schema', '0007_auto_20171025_1119'),
]
operations = [
migrations.AlterUniqueTogether(
name='row',
unique_together=set([('table', 'data_item')]),
),
]
|
[
"fredkingham@gmail.com"
] |
fredkingham@gmail.com
|
6750c234e65cc1a9d0a5a6882b55fffe847f320d
|
781e2692049e87a4256320c76e82a19be257a05d
|
/assignments/python/wc/src/884.py
|
a8d74bc04bec198c408a24561663646aefed2414
|
[] |
no_license
|
itsolutionscorp/AutoStyle-Clustering
|
54bde86fe6dbad35b568b38cfcb14c5ffaab51b0
|
be0e2f635a7558f56c61bc0b36c6146b01d1e6e6
|
refs/heads/master
| 2020-12-11T07:27:19.291038
| 2016-03-16T03:18:00
| 2016-03-16T03:18:42
| 59,454,921
| 4
| 0
| null | 2016-05-23T05:40:56
| 2016-05-23T05:40:56
| null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
def word_count(phrase):
phrase = phrase.split()
occurences = {}
for word in set(phrase):
occurences[word] = phrase.count(word)
return occurences
|
[
"rrc@berkeley.edu"
] |
rrc@berkeley.edu
|
d313990cc102d01487a3785a466d81c1356d418e
|
d3dc1d50f683566c9d71722f218afc9340ed6ab5
|
/mql.tolog/tests/test_tolog_converter.py
|
c083f199c167dc39587fb81161ecaf545da5e0ec
|
[] |
no_license
|
heuer/mappa
|
b20ec8a61979a75802af19803c54ee339e65807c
|
fc89cf32560d2e9ea6b380127b77fb3587bbd06c
|
refs/heads/master
| 2021-01-23T15:31:54.242393
| 2015-03-16T17:17:35
| 2015-03-16T17:17:35
| 32,339,134
| 5
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,721
|
py
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2007 - 2014 -- Lars Heuer - Semagia <http://www.semagia.com/>.
# All rights reserved.
#
# BSD license.
#
"""\
Tests against the "back-to-tolog" stylesheet.
:author: Lars Heuer (heuer[at]semagia.com)
:organization: Semagia - <http://www.semagia.com/>
:license: BSD License
"""
import os
import io
import json
import glob
from nose.tools import eq_
from mql import tolog
from mql.tolog import xsl
_IGNORE = (
'fold-type-assoc.tl',
'tolog-tut-2-4_2.tl',
'topic-types.tl',
'topic-types2.tl',
'fold-scope-name.tl',
'fold-scope-occ.tl',
'create-dyn-occ3.tl',
)
def fail(msg): raise AssertionError(msg)
def test_tolog_plus():
base_dir = os.path.abspath('./xsltests/')
with open(os.path.join(base_dir, 'query2optimizers.json'), 'rb') as f:
query2optimizers = json.load(f)
tolog_dir = os.path.abspath(os.path.join(base_dir, './in/'))
found_files = set([os.path.basename(fn) for fn in glob.glob(tolog_dir + '/*.tl')])
baseline_dir = os.path.join(base_dir, './baseline/')
for fn in query2optimizers:
if fn in _IGNORE:
continue
found_files.remove(fn)
optimizers = ['query-c14n']
optimizers.extend(query2optimizers[fn])
filename = os.path.join(tolog_dir, fn)
f = open(filename, 'rb')
# 1. Apply optimizers and return tolog+
tl = tolog.convert_to_tolog_plus(f, optimizers=optimizers)
# 2. Parse created tolog+
try:
tree = tolog.parse_to_etree(tl, iri='http://www.example.org/mql-tolog/', tolog_plus=True)
except Exception, ex:
fail('Error: %r in %s' % (ex, tl))
# 3. Apply optimizers to the newly parsed query
res = xsl.apply_transformations(tree, optimizers)
out = io.BytesIO()
res.write_c14n(out)
expected = io.open(os.path.join(baseline_dir, fn + '.c14n'), encoding='utf-8').read()
yield eq_, expected, out.getvalue(), 't+: %s\n%s' % (fn, tl)
for fn in _IGNORE:
found_files.remove(fn)
if found_files:
raise Exception('Found more files in the directory: %r' % found_files)
def test_tolog():
base_dir = os.path.abspath('./xsltests/')
with open(os.path.join(base_dir, 'query2optimizers.json'), 'rb') as f:
query2optimizers = json.load(f)
tolog_dir = os.path.abspath(os.path.join(base_dir, './in/'))
found_files = set([os.path.basename(fn) for fn in glob.glob(tolog_dir + '/*.tl')])
baseline_dir = os.path.join(base_dir, './baseline/')
for fn in query2optimizers:
if fn in _IGNORE:
continue
found_files.remove(fn)
optimizers = ['query-c14n']
optimizers.extend(query2optimizers[fn])
filename = os.path.join(tolog_dir, fn)
f = open(filename, 'rb')
# 1. Apply optimizers and return tolog
tl = tolog.convert_to_tolog(f, optimizers=optimizers)
# 2. Parse created tolog+
try:
tree = tolog.parse_to_etree(tl, iri='http://www.example.org/mql-tolog/', tolog_plus=False)
except Exception, ex:
fail('Error: %r in %s' % (ex, tl))
# 3. Apply optimizers to the newly parsed query
res = xsl.apply_transformations(tree, optimizers)
out = io.BytesIO()
res.write_c14n(out)
expected = io.open(os.path.join(baseline_dir, fn + '.c14n'), encoding='utf-8').read()
yield eq_, expected, out.getvalue(), 't: %s' % fn
for fn in _IGNORE:
found_files.remove(fn)
if found_files:
raise Exception('Found more files in the directory: %r' % found_files)
if __name__ == '__main__':
import nose
nose.core.runmodule()
|
[
"Lars@localhost"
] |
Lars@localhost
|
87ea03bc8f13ab1d0b3226d73cc1dd151a73eb2f
|
e8f76b7162c9781a4457cd06f5405925a9a18593
|
/vbb_backend/users/admin.py
|
df843087d8aa089373a17f4662347cf93c4abe73
|
[
"MIT"
] |
permissive
|
wasswarichard/backend-vbb-portal
|
bcb54d4cf63f91862704ef5f6e5953b76d8839e8
|
8e4deec8a6e71b17da3476b0a05dbfe73d547b55
|
refs/heads/master
| 2023-03-24T21:09:25.364768
| 2021-03-16T18:53:26
| 2021-03-16T18:53:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 438
|
py
|
from django.contrib import admin
from vbb_backend.users.models import User, Mentor, Student, HeadMaster
from django.contrib.auth.admin import UserAdmin
class MyUserAdmin(UserAdmin):
model = User
fieldsets = UserAdmin.fieldsets + (
(None, {"fields": ("user_type", "external_id")}),
)
admin.site.register(User, MyUserAdmin)
admin.site.register(Mentor)
admin.site.register(Student)
admin.site.register(HeadMaster)
|
[
"vichuhari100@gmail.com"
] |
vichuhari100@gmail.com
|
1fd18852d0cbcda793b74043c834504fd069e531
|
e296f0f3d7db598aba5658de3ff8c767634e533e
|
/zoo/migrations/069_add_columns_is_visible_and_moderated_by_and_moderated_at_to_photos_photo.py
|
c04134e45dbcd4f0f9271346ff9617625c56833f
|
[] |
no_license
|
devfort/wildlifenearyou
|
b2ac05070aa6face60156d6e7c85f98f00013c25
|
8e618aea90bbcedc45a4e30199e31880ea9e6dca
|
refs/heads/master
| 2021-01-13T01:25:29.467549
| 2010-06-10T06:37:43
| 2010-06-10T06:37:43
| 7,874,317
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 481
|
py
|
from django.conf import settings
if settings.DATABASE_ENGINE == 'mysql':
from dmigrations.mysql import migrations as m
elif settings.DATABASE_ENGINE == 'sqlite3':
from dmigrations.sqlite3 import migrations as m
import datetime
migration = m.Compound([
m.AddColumn('photos', 'photo', 'is_visible', 'bool NOT NULL'),
m.AddColumn('photos', 'photo', 'moderated_at', 'datetime NULL'),
m.AddColumn('photos', 'photo', 'moderated_by', 'integer NULL', 'auth_user'),
])
|
[
"simon@simonwillison.net"
] |
simon@simonwillison.net
|
4886128779480e9bd970b67106abee8174e3da54
|
b0ef0b7b7752564b703b4438e2624a4645299006
|
/usedcar/userinfo/models.py
|
3e3221136a2010eb538098c2a6e27d5b90d3e2aa
|
[] |
no_license
|
beantomemory/django
|
66f1ff3f7fbc72df18ee01e394e733b1135fb01c
|
1b588cf3888724a5f4d86df04d7ebc91d7f20000
|
refs/heads/master
| 2020-04-19T09:17:32.771039
| 2019-01-29T07:21:01
| 2019-01-29T07:21:01
| 168,105,733
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,989
|
py
|
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
sex_choice = (
(0, "男"),
(1, "女"),
)
role_choice = (
(0, "买家"),
(1, "卖家"),
(2, "平台"),
)
bank_choice = (
(0, "中国工商银行"),
(1, "中国建设银行"),
(2, "中国农业银行"),
(3, "招商银行"),
(4, "北京银行"),
(5, "我家银行"),
)
class Userinfo(AbstractUser):
# username = models.CharField(verbose_name="用户名", max_length=30, null=False)
# password = models.CharField(verbose_name="密码", max_length=200, null = False)
realname = models.CharField(verbose_name="真实姓名", max_length=30, null=False)
iden = models.CharField(verbose_name="身份证号", max_length=18, null=False)
ads = models.CharField(verbose_name="地址", max_length=200, null=False)
uphone = models.CharField(verbose_name="手机号", max_length=20, null=False)
sex = models.IntegerField(verbose_name="性别", choices=sex_choice, default=0)
role = models.IntegerField(verbose_name="角色", choices=role_choice, default=0)
isactive = models.BooleanField(verbose_name="是否激活", default = False)
isban = models.BooleanField(verbose_name="是否禁用", default = False)
def __str__(self):
return self.username
class Meta:
db_table = "userinfo"
verbose_name = "用户信息"
verbose_name_plural = verbose_name
class Bank(models.Model):
cardno = models.CharField("卡号", max_length=30, null=False)
user = models.ForeignKey(Userinfo)
cpwd = models.CharField("交易密码", max_length=200, null=False)
bank = models.IntegerField("开户银行", choices=bank_choice, default=0)
isdelete = models.BooleanField("是否删除", default=False)
def __str__(self):
return self.bank
class Meta:
db_table = "bank"
verbose_name = "银行卡"
verbose_name_plural = verbose_name
|
[
"tarena@tedu.cn"
] |
tarena@tedu.cn
|
7554d991424a83b97388e0f2edccaa713a4db8e9
|
7f114a1fb511b816c116d5b9e67cb998e3e23956
|
/PyplayS31.py
|
c0da34dbd4a6b96011dfd4aadb0932634bbc90f4
|
[] |
no_license
|
Bharanij27/bharanirep
|
90ac34eb28deaa7ec96d042de456de71b96866d7
|
982133a7939c889d433c178a601441fa087293d9
|
refs/heads/master
| 2021-08-07T20:22:36.244395
| 2020-06-05T04:58:10
| 2020-06-05T04:58:10
| 186,580,768
| 0
| 6
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 136
|
py
|
n=list(input())
s=0
for i in range(0,len(n)):
if n[i]=='(': s=s+1
elif n[i]==')': s=s-1
if s==0: print("yes")
else: print("no")
|
[
"noreply@github.com"
] |
Bharanij27.noreply@github.com
|
704a4aee8b6f27bb5942d6c99ff9aad57ada94b8
|
d2f50124ff3bec70b9b3139ecb063b06e526781d
|
/biable/migrations/0030_auto_20170104_1540.py
|
e5309f4b5538cf1b5c001ee7667bcac85e3fe29f
|
[] |
no_license
|
odecsarrollo/odecopack-componentes
|
e8d993f089bf53bbf3c53d1265e70ac5c06b59b8
|
b583a115fb30205d358d97644c38d66636b573ff
|
refs/heads/master
| 2022-12-12T00:33:02.874268
| 2020-08-13T18:45:01
| 2020-08-13T18:45:01
| 189,262,705
| 0
| 0
| null | 2022-12-08T11:23:46
| 2019-05-29T16:37:21
|
Python
|
UTF-8
|
Python
| false
| false
| 872
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2017-01-04 20:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('biable', '0029_auto_20170104_1528'),
]
operations = [
migrations.AlterField(
model_name='vendedorbiable',
name='colaborador',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.PROTECT, related_name='mi_vendedor_biable', to='usuarios.UserExtended'),
),
migrations.AlterField(
model_name='vendedorbiableuser',
name='usuario',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='mis_vendedores', to='usuarios.UserExtended'),
),
]
|
[
"fabio.garcia.sanchez@gmail.com"
] |
fabio.garcia.sanchez@gmail.com
|
ef1d6fd47f5542551fbfe13739016b1565abc26b
|
1bddfbc901946b6cdef47e5325626d26a9865a51
|
/setup.py
|
c7120cb1461fa0651511e8b384ddc401e186d6a8
|
[] |
no_license
|
kagesenshi/dkiscm.importer
|
ce56eccf70ac776692a0e1015d3e5bc311680979
|
c255bca2a755cd3681106c5d3ee2f917de359e2b
|
refs/heads/master
| 2016-09-11T00:58:54.109283
| 2013-10-01T20:05:23
| 2013-10-01T20:05:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,667
|
py
|
from setuptools import setup, find_packages
import os
version = '1.4.dev0'
setup(name='dkiscm.importer',
version=version,
description="",
long_description=open("README.rst").read() + "\n" +
open(os.path.join("docs", "HISTORY.rst")).read(),
# Get more strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
"Framework :: Plone",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='',
author='Inigo Consulting',
author_email='team@inigo-tech.com',
url='http://github.com/inigoconsulting/',
license='gpl',
packages=find_packages(),
namespace_packages=['dkiscm'],
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'plone.app.dexterity',
'plone.namedfile [blobs]',
'collective.grok',
'plone.app.referenceablebehavior',
'collective.dexteritytextindexer',
'plone.app.multilingual',
'plone.multilingualbehavior',
# -*- Extra requirements: -*-
],
extras_require={
'test': [
'plone.app.testing',
],
},
entry_points="""
# -*- Entry points: -*-
[z3c.autoinclude.plugin]
target = plone
""",
# The next two lines may be deleted after you no longer need
# addcontent support from paster and before you distribute
# your package.
setup_requires=["PasteScript"],
paster_plugins=["templer.localcommands"],
)
|
[
"izhar@inigo-tech.com"
] |
izhar@inigo-tech.com
|
56702c50ea0061115156969d39cfedd65bcc9d30
|
63c0a3e5599be2a038bb713abc69584db42a3aae
|
/system_status.py
|
baf403826f3a9dc1bef32657a89548c790de8370
|
[] |
no_license
|
dmitryduev/sserv-njs
|
6ccc98512547ba399e62dd83a99eef6ffe92c075
|
17e53069a2804506aca2a2b984ab465c5b5ff718
|
refs/heads/master
| 2020-05-22T04:39:53.115636
| 2018-04-11T06:44:43
| 2018-04-11T06:44:43
| 49,021,310
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,005
|
py
|
from __future__ import print_function
import time
import psutil
import datetime
import json
import traceback
import argparse
if __name__ == '__main__':
''' Create command line argument parser '''
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter,
description='Manage data archive for Robo-AO')
parser.add_argument('config_file', metavar='config_file',
action='store', help='path to config file.', type=str)
args = parser.parse_args()
# read in config
with open(args.config_file) as cjson:
config = json.load(cjson)
# config must not be empty:
if len(config) == 0:
raise Exception('Failed to load config file')
while 1:
# construct line with telemetry
try:
# UTC running? start_time #_enqueued_tasks system_CPU_usage_% system_memory_usage_%
_utc_now = datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]
_cpu_usage = psutil.cpu_percent(interval=None)
_mem_usage = psutil.virtual_memory().percent
_root = psutil.disk_usage('/').percent
_data = psutil.disk_usage('/Data').percent
_data_1 = psutil.disk_usage('/Data1').percent
_data_2 = psutil.disk_usage('/Data2').percent
_data_3 = psutil.disk_usage('/Data3').percent
_data_4 = psutil.disk_usage('/Data4').percent
_t = '{:s} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f} {:.1f}\n'.format(
_utc_now, _cpu_usage, _mem_usage,
_root, _data, _data_1, _data_2, _data_3, _data_4)
with open(config['status']['Control machine status']['data-file'], 'w') as _f:
_f.write(_t)
except Exception as _e:
print(_e)
traceback.print_exc()
# take a nap
time.sleep(0.95)
|
[
"dmitry.duev@gmail.com"
] |
dmitry.duev@gmail.com
|
c6de41952c29d959461ae7ba8c7269444ef04189
|
2c20cd2b84b44f6711d53e02f681b455100d5bdc
|
/setup.py
|
e3f064e118ecf33fe25b41078bef36e51b3bfdde
|
[
"MIT"
] |
permissive
|
YAtOff/s3rsync
|
a28c69c21a1091ee3d53362af8a9d40357d0469a
|
ebb36e47602491ef7dcb073bed9527f6243e317b
|
refs/heads/master
| 2023-05-12T04:05:19.950099
| 2020-01-10T10:57:28
| 2020-01-10T10:57:28
| 224,131,742
| 0
| 1
|
MIT
| 2023-05-01T21:17:12
| 2019-11-26T07:43:08
|
Python
|
UTF-8
|
Python
| false
| false
| 184
|
py
|
from setuptools import setup, find_packages
setup(
name="s3rsync",
version="1.0.0",
description="",
packages=find_packages(exclude=("tests",)),
zip_safe=False,
)
|
[
"yavor.atov@gmail.com"
] |
yavor.atov@gmail.com
|
35c2d0ab34307b215c50250f7de343a66666276a
|
9b5c995b247803b64895223fc51f407e9da2df45
|
/IQ/Files/create_file_and_write_data.py
|
2cc9b05c3f86d4e7f3dbc1997bc0e34c16dac890
|
[] |
no_license
|
Shyam-Personal/python_repo
|
3453156ed73efaa91fa5e01dd15a1a0e664d3d22
|
1809de5afbecc1fd17cd70ae80a1eb4b9282d554
|
refs/heads/master
| 2021-01-25T14:10:30.394842
| 2019-09-23T13:26:47
| 2019-09-23T13:26:47
| 123,660,813
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
"""
Create 10 files of 1MB size each.
"""
def main():
try:
#Solution 1
data = "A"*1024*1024
for i in range(10):
filename = "Test_{:03d}".format(i)
with open(filename, "w") as fh:
fh.write(data)
except Exception as e:
print("Exception while executing program. Error details: {}".format(str(e)))
if __name__ == "__main__":
main()
|
[
"shyamdeshmukh1@gmail.com"
] |
shyamdeshmukh1@gmail.com
|
272486de2b4247c949cc78cd8e0754d37480de58
|
b2913030cf1646310b08efaa57c2199bb08e37c9
|
/general/apero_updates/inprogress/is_night_obs.py
|
c5c6c4306f1de856f7212981e99d50db3911ae68
|
[
"MIT"
] |
permissive
|
njcuk9999/apero-utils
|
6f5b5083537562a31573b5c4cc76908c5fe194b9
|
368d53182428ca8befcdd3e5c8ca054f61913711
|
refs/heads/master
| 2023-08-31T02:56:01.369406
| 2023-08-18T15:12:59
| 2023-08-18T15:12:59
| 238,777,509
| 3
| 5
|
MIT
| 2023-08-17T14:15:41
| 2020-02-06T20:24:49
|
Python
|
UTF-8
|
Python
| false
| false
| 2,454
|
py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
# CODE NAME HERE
# CODE DESCRIPTION HERE
Created on 2023-01-31 at 11:24
@author: cook
"""
import numpy as np
import matplotlib.pyplot as plt
from astropy.io import fits
from astropy.table import Table
from astropy import units as u
from tqdm import tqdm
import warnings
# =============================================================================
# Define variables
# =============================================================================
# -----------------------------------------------------------------------------
# =============================================================================
# Define functions
# =============================================================================
def function1():
return 0
# =============================================================================
# Start of code
# =============================================================================
# Main code here
if __name__ == "__main__":
# ----------------------------------------------------------------------
from astropy.io import fits
import astropy.coordinates as coord
import astropy.units as u
from astropy.coordinates import get_sun, AltAz
from astropy.time import Time
file = 'NIRPS_2023-01-20T08_49_12_510_pp_e2dsff_tcorr_A.fits'
# get the header
h = fits.getheader(file)
lat = h['BC_LAT'] # latitude
lon = h['BC_LONG'] # longitude
mjd = h['MJD-OBS'] # Modified Julian day
sun_time = Time(mjd, format='mjd') # UTC time
loc = coord.EarthLocation(lon=lon * u.deg,
lat=lat * u.deg)
altaz = AltAz(obstime=sun_time, location=loc)
sun_elevation = 90 - get_sun(sun_time).transform_to(altaz).zen.value
# Leval definition of twilight angles
CIV_TWIL = sun_elevation < (-6) # suggestion for Civil twilight keyword
NAU_TWIL = sun_elevation < (-12) # suggestion for Nautical twilight keyword
AST_TWIL = sun_elevation < (-18) # suggestion for Astronomical twilight keyword
print('Civil twilight : {}\n'
'Nautical twilight : {}\n'
'Astron twilight : {}'.format(CIV_TWIL, NAU_TWIL, AST_TWIL))
print('Sun elevation : {:.1f} deg'.format(sun_elevation))
# =============================================================================
# End of code
# =============================================================================
|
[
"neil.james.cook@gmail.com"
] |
neil.james.cook@gmail.com
|
182247988bf376661723c18e69b7095523833c84
|
04eb5ed2afbd0b2a190e38a48f1c8b86f63b5497
|
/client/tests/mocks.py
|
c7f5fe1971df6068e73fca1512cc9aca0a5d54af
|
[
"MIT"
] |
permissive
|
Playfloor/pyre-check
|
04d671c63ce882891f978c8d1f6540d236dd22ab
|
2e8b86fe7ed9fd84a026c188d08877a77b142309
|
refs/heads/main
| 2023-08-21T11:08:30.229589
| 2021-10-06T03:20:01
| 2021-10-06T03:21:24
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,028
|
py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-unsafe
from pathlib import Path
from typing import Optional
from unittest.mock import MagicMock
from .. import command_arguments, configuration as configuration_module
from ..analysis_directory import AnalysisDirectory
from ..commands.command import IncrementalStyle
from ..commands.incremental import Incremental
from ..configuration import SharedMemory
def mock_arguments(
debug: bool = False,
changed_files_path=None,
enable_profiling: bool = False,
enable_memory_profiling: bool = False,
features=None,
load_initial_state_from=None,
local_configuration=None,
log_identifier: str = "",
no_saved_state: bool = False,
output: str = command_arguments.TEXT,
save_initial_state_to=None,
saved_state_project=None,
sequential: bool = False,
source_directories=None,
targets=None,
dot_pyre_directory: Optional[Path] = None,
) -> command_arguments.CommandArguments:
return command_arguments.CommandArguments(
local_configuration=local_configuration,
version=False,
debug=debug,
sequential=sequential,
strict=False,
additional_checks=[],
show_error_traces=False,
output=output,
enable_profiling=enable_profiling,
enable_memory_profiling=enable_memory_profiling,
noninteractive=True,
logging_sections=None,
log_identifier=log_identifier,
logger=None,
targets=targets or [],
use_buck_builder=False,
use_buck_source_database=False,
source_directories=source_directories or [],
filter_directory=None,
buck_mode=None,
no_saved_state=no_saved_state,
search_path=["some_path"],
binary="/foo/binary.exe",
buck_builder_binary=None,
exclude=[],
typeshed="/typeshed",
save_initial_state_to=save_initial_state_to,
load_initial_state_from=load_initial_state_from,
changed_files_path=changed_files_path,
saved_state_project=saved_state_project,
dot_pyre_directory=dot_pyre_directory or Path(".pyre"),
features=features,
python_version="3.6.0",
shared_memory_heap_size=1024 * 1024 * 1024,
)
def mock_configuration(version_hash=None, file_hash=None) -> MagicMock:
configuration = MagicMock()
configuration.project_root = "/root"
configuration.local_root = None
configuration.strict = False
configuration.source_directories = ["."]
configuration.logger = None
configuration.get_number_of_workers = lambda: 5
configuration.search_path = []
configuration.taint_models_path = []
configuration.get_typeshed_respecting_override = lambda: "stub"
configuration.get_version_hash_respecting_override = lambda: version_hash
configuration.file_hash = file_hash
configuration.local_root = None
configuration.autocomplete = False
configuration.dot_pyre_directory = Path(".pyre")
configuration.relative_local_root = None
configuration.log_directory = ".pyre"
configuration.disabled = False
configuration.get_python_version = lambda: configuration_module.PythonVersion(
major=3, minor=6, micro=0
)
configuration.shared_memory = SharedMemory(heap_size=1024 * 1024 * 1024)
return configuration
def mock_incremental_command(cfg: configuration_module.Configuration) -> Incremental:
arguments = mock_arguments()
analysis_directory = AnalysisDirectory(
configuration_module.SimpleSearchPathElement(".")
)
return Incremental(
arguments,
original_directory="/original/directory",
configuration=cfg,
analysis_directory=analysis_directory,
nonblocking=False,
incremental_style=IncrementalStyle.FINE_GRAINED,
no_start_server=False,
no_watchman=False,
)
|
[
"facebook-github-bot@users.noreply.github.com"
] |
facebook-github-bot@users.noreply.github.com
|
7281a1d2b651d0b7b363e815d9c84fe19146565a
|
116aadef9866be33782c6cbd06901703728295cc
|
/tests/conftest.py
|
3b640c81e07bc3e0f36f219c27129fc10614ecc5
|
[
"Apache-2.0"
] |
permissive
|
dracos/datasette-tiles
|
9c4cf6ca683a703f08e1f69cbc4def3694d7bcc3
|
f7aa1a49df23584445cf154ad0e3e6d750965b15
|
refs/heads/main
| 2023-02-28T22:33:08.331682
| 2021-02-03T22:21:57
| 2021-02-03T22:21:57
| 335,932,265
| 0
| 0
| null | 2021-02-04T11:24:40
| 2021-02-04T11:24:39
| null |
UTF-8
|
Python
| false
| false
| 2,211
|
py
|
import asyncio
from datasette.app import Datasette
from datasette.database import Database
import pytest
CREATE_TILES_TABLE = "CREATE TABLE tiles (zoom_level integer, tile_column integer, tile_row integer, tile_data blob)"
CREATE_METADATA_TABLE = "CREATE TABLE metadata (name text, value text)"
@pytest.fixture(scope="module")
async def ds():
datasette = Datasette([], memory=True)
await datasette.invoke_startup()
return datasette
# Needed because of https://stackoverflow.com/a/56238383
# to allow me to use scope="module" on the ds() fixture below
@pytest.fixture(scope="module")
def event_loop():
loop = asyncio.get_event_loop()
yield loop
loop.close()
@pytest.fixture(scope="module")
async def ds_tiles_stack():
return await ds_tiles()
@pytest.fixture(scope="module")
async def ds_tiles_stack_with_stack_order():
return await ds_tiles(
{
"plugins": {
"datasette-tiles": {"tiles-stack-order": ["world", "country", "city2"]}
}
}
)
async def ds_tiles(metadata=None):
datasette = Datasette([], metadata=metadata or {}, memory=True)
for db_name, tiles in (
("world", [[1, 1, 1]]),
("country", [[1, 1, 2], [1, 2, 2]]),
("city1", [[1, 2, 2]]),
("city2", [[1, 3, 3]]),
):
db = datasette.add_database(Database(datasette, memory_name=db_name))
# During test runs database tables may exist already
if await db.table_exists("tiles"):
continue
await db.execute_write(CREATE_TILES_TABLE, block=True)
await db.execute_write(CREATE_METADATA_TABLE, block=True)
for pair in (("name", db_name), ("format", "png")):
await db.execute_write(
"insert into metadata (name, value) values (?, ?)",
pair,
block=True,
)
for tile in tiles:
await db.execute_write(
"insert into tiles (zoom_level, tile_column, tile_row, tile_data) values (?, ?, ?, ?)",
tile + [db_name + ":" + "/".join(map(str, tile))],
block=True,
)
await datasette.invoke_startup()
return datasette
|
[
"swillison@gmail.com"
] |
swillison@gmail.com
|
292b77898a0865b25cff82a3cada848553b42769
|
1e37cc605d52a8264329ba23e2bc7a74408b0f22
|
/chrome_from_url.py
|
11afc3ec2f015d2f264d0fd1b2ff6d06e5837a79
|
[] |
no_license
|
maasano/download_file_from_web
|
7df45639a26f9911bb0ae42727d7a5acfb396659
|
fa6ce6d57f30c71e9ccd982a3e6f6e314187c8c6
|
refs/heads/master
| 2022-03-06T06:19:47.662728
| 2019-12-06T04:07:42
| 2019-12-06T04:07:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
# URL でダウンロード
from selenium import webdriver
import os
import time
import csv
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import urllib.request
driver = webdriver.Chrome()
url = "https://pythonchannel.com/media/codecamp/201908-/scrape-test.html"
driver.get(url)
file_url = driver.find_element_by_tag_name("a").get_attribute("href")
urllib.request.urlretrieve(file_url, "□my_download.csv")
time.sleep(3)
#driver.close()
|
[
"YOUR@EMAIL.com"
] |
YOUR@EMAIL.com
|
99613f69b3d7edc99a5dc7a75e483837ae852e7c
|
0fa00ecf2dd671515dc001d4b14049ec6a0c1f1c
|
/custom_components/spook/ectoplasms/repairs/services/create.py
|
6d46089ce9dab0e2aa31caa286d4f9116eebb2cd
|
[
"Unlicense"
] |
permissive
|
bacco007/HomeAssistantConfig
|
d91a5368344f50abbea881bd1e6dfc57a0e456ca
|
8548d9999ddd54f13d6a307e013abcb8c897a74e
|
refs/heads/master
| 2023-08-30T07:07:33.571959
| 2023-08-29T20:00:00
| 2023-08-29T20:00:00
| 230,585,631
| 98
| 16
|
Unlicense
| 2023-09-09T08:28:39
| 2019-12-28T09:05:02
|
Python
|
UTF-8
|
Python
| false
| false
| 1,658
|
py
|
"""Spook - Not your homie."""
from __future__ import annotations
from typing import TYPE_CHECKING
import voluptuous as vol
from homeassistant.components.repairs import DOMAIN as REPAIRS_DOMAIN
from homeassistant.helpers import config_validation as cv, issue_registry as ir
from homeassistant.util.ulid import ulid
from ....const import DOMAIN
from ....services import AbstractSpookService
if TYPE_CHECKING:
from homeassistant.core import ServiceCall
class SpookService(AbstractSpookService):
"""Home Assistant Repairs service to create your own issues."""
domain = REPAIRS_DOMAIN
service = "create"
schema = {
vol.Required("title"): cv.string,
vol.Required("description"): cv.string,
vol.Optional("issue_id", default=ulid): cv.string,
vol.Optional("domain", default=DOMAIN): cv.string,
vol.Optional("severity", default=ir.IssueSeverity.WARNING): vol.Coerce(
ir.IssueSeverity,
),
vol.Optional("persistent", default=False): cv.boolean,
}
async def async_handle_service(self, call: ServiceCall) -> None:
"""Handle the service call."""
ir.async_create_issue(
self.hass,
domain=DOMAIN,
is_fixable=True,
is_persistent=call.data["persistent"],
issue_domain=call.data["domain"],
issue_id=f"user_{call.data['issue_id']}",
severity=call.data["severity"],
translation_key="user_issue",
translation_placeholders={
"title": call.data["title"],
"description": call.data["description"],
},
)
|
[
"thomas@thomasbaxter.info"
] |
thomas@thomasbaxter.info
|
4057875c237527412532aabab219ffd580c79c80
|
25dda94672497e3287a7403e283fb279ad171b79
|
/boj/11286 절대값 힙.py
|
339ce72781b6d758e8e55eb60f6416475b82351b
|
[] |
no_license
|
woorud/Algorithm
|
c94b844e8c96a446c5fdee5c0abb159bfee384d7
|
f5b8e3cf0aea7fc4400e6f5bb0c1531fad93e541
|
refs/heads/master
| 2023-02-23T13:53:28.645036
| 2021-01-29T12:24:23
| 2021-01-29T12:24:23
| 230,908,288
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 310
|
py
|
import heapq, sys
n = int(sys.stdin.readline())
absheap = []
heapq.heapify(absheap)
for i in range(n):
num = int(sys.stdin.readline())
if num != 0:
heapq.heappush(absheap, (abs(num), num))
else:
try:
print(heapq.heappop(absheap)[1])
except:
print(0)
|
[
"woorud96@gmail.com"
] |
woorud96@gmail.com
|
30f9b23d261d536ea04bb46352a57de101437555
|
248793d43cd41e588fe343fe6f7dd82f667d2eb8
|
/num4ch.py
|
5a15eaf1bf1360318191d485c7cfb5cbe665d42b
|
[] |
no_license
|
dongzeyuan/Algorithms
|
85dfe22abc0ef539f7d9ff4efa205810e80d70cc
|
a57bfe5a2887947419f5d6deb6988ce94917c286
|
refs/heads/master
| 2020-03-09T01:03:15.737335
| 2018-04-07T06:53:47
| 2018-04-07T06:53:47
| 128,503,857
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,601
|
py
|
# coding=UTF-8
import wx
class MyFrame(wx.Frame):
'''这是个测试各种函数用法的框架'''
def __init__(self):
wx.Frame.__init__(self, None, -1, 'TestFrame', size=(400, 300))
panel = wx.Panel(self, -1)
# 创建个文本框组件
# 4-04,这个组件定义,必须定义为self.的形式,下面回调函数才能使用该变量
self.num_txt = wx.TextCtrl(panel, -1, size=(150, 100),
pos=(5, 5), style=wx.TE_MULTILINE | wx.TE_READONLY)
# 试试SetValue方法
# 创建个按钮组件
self.num_button = wx.Button(
panel, -1, "确定", size=(150, 100), pos=(5, 110))
# 绑定按钮事件,绑定on_click回调函数
self.num_button.Bind(wx.EVT_BUTTON, self.on_click)
# 定义on_click回调函数,注意函数需要2个参数,self,event,但是这里我写的有问题
# 我本意是想写成每点击一次,i自增1,记录点击次数,但是这里每次调用都会重置i,导致每次调用都打印1
# 4-04,使用全局变量解决问题
# 4-04, 终于学到了如何改变显示数值的方法
def on_click(self, event):
# 全局变量i。得这么写才能实现自增的功能
global i
i += 1
# 清除上次传输的内容,如果不加Clear()方法,会导致逐行显示
self.num_txt.Clear()
self.num_txt.AppendText('%d\n' % i)
if __name__ == "__main__":
# 初始化全局变量i
i = 0
app = wx.App()
MyFrame().Show(True)
app.MainLoop()
|
[
"dongfujing88@gmail.com"
] |
dongfujing88@gmail.com
|
418ab74777c51e9c75e706afd4e9ced9af142f16
|
f669b07bf4e1047c7fa4f36983f5d8d9f607a42c
|
/.metadata/.plugins/org.eclipse.core.resources/.history/c0/20134fb0049000161aace3ce0a3d140b
|
4ecac11caf1f115505b487e0511d733ae686192b
|
[] |
no_license
|
tca85/python
|
d07b4453b38b5c5a024e2a6f8b8795dac6d928b1
|
436a23250539ef8055f817ada20cb21eab65cc42
|
refs/heads/master
| 2021-01-10T22:53:01.118480
| 2016-10-24T21:46:59
| 2016-10-24T21:46:59
| 70,340,585
| 1
| 0
| null | null | null | null |
ISO-8859-1
|
Python
| false
| false
| 1,233
|
#!/usr/bin/python
# -*- coding: latin1 -*-
'''
Created on Oct 5, 2016
@author: tca85
'''
# Para importar a classe é melhor utilizar os atalhos do Eclipse,
# mas também segue o seguinte padrão:
# from pacote + nome do arquivo import NomeClasse
from pacote.Teste import Teste
from pacote.Arquivo import Arquivo
from pacote.DateTime import DateTime
from pacote.Cadastro import Cadastro
teste = Teste()
lista = teste.multiplicar_range_por_dois(15)
teste.testar_resto_igual_zero(lista, 3)
teste.tamanho_palavra('lsdfjfkjsdfkjas')
print teste.media(lista)
print teste.nome_sistema_operacional()
teste.exemplo_interpolacao()
print teste.inverter_string('thiago')
print teste.exemplo_string_template()
teste.exemplo_argumentos('peso', 10, unidade='k')
#-----------------------------------------------------------------------------
arquivo = Arquivo()
arquivo.criarArquivoTextoeEscrever('teste')
arquivo.mostrarListaArquivoTamanho()
arquivo.criarArquivoTemporario()
arquivo.gravarTextoArquivoCompactado()
#-----------------------------------------------------------------------------
datetime = DateTime()
datetime.testaClasseDateTime()
#-----------------------------------------------------------------------------
|
[
"thiago.alves.85@gmail.com"
] |
thiago.alves.85@gmail.com
|
|
db3a5e796dccbcd86c59c07f265a266fba9cb209
|
53fab060fa262e5d5026e0807d93c75fb81e67b9
|
/backup/user_267/ch66_2020_06_21_19_51_02_942607.py
|
0e1afeb82048df4970ad1ba78a24fc6673a561ca
|
[] |
no_license
|
gabriellaec/desoft-analise-exercicios
|
b77c6999424c5ce7e44086a12589a0ad43d6adca
|
01940ab0897aa6005764fc220b900e4d6161d36b
|
refs/heads/main
| 2023-01-31T17:19:42.050628
| 2020-12-16T05:21:31
| 2020-12-16T05:21:31
| 306,735,108
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 166
|
py
|
def lista_sufixos(string):
lista_sufixos = []
while i < len(string):
lista_sufixos.append(string(i+1:))
i+=1
return lista_sufixos
|
[
"you@example.com"
] |
you@example.com
|
bd0a4edb0505e032600598e3d09c283a811f850c
|
be0f3dfbaa2fa3d8bbe59229aef3212d032e7dd1
|
/Gauss_v45r10p1/Gen/DecFiles/options/51000001.py
|
7e66d0a6b9ad05613ad85688a4de506f6035643c
|
[] |
no_license
|
Sally27/backup_cmtuser_full
|
34782102ed23c6335c48650a6eaa901137355d00
|
8924bebb935b96d438ce85b384cfc132d9af90f6
|
refs/heads/master
| 2020-05-21T09:27:04.370765
| 2018-12-12T14:41:07
| 2018-12-12T14:41:07
| 185,989,173
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 961
|
py
|
# file /home/hep/ss4314/cmtuser/Gauss_v45r10p1/Gen/DecFiles/options/51000001.py generated: Wed, 25 Jan 2017 15:25:34
#
# Event Type: 51000000
#
# ASCII decay Descriptor: e- => ?
#
from Configurables import ParticleGun
from Configurables import MomentumRange
ParticleGun().addTool( MomentumRange )
from GaudiKernel import SystemOfUnits
ParticleGun().MomentumRange.MomentumMin = 1*SystemOfUnits.GeV
from GaudiKernel import SystemOfUnits
ParticleGun().MomentumRange.MomentumMax = 1*SystemOfUnits.GeV
ParticleGun().EventType = 51000001
ParticleGun().ParticleGunTool = "MomentumRange"
ParticleGun().NumberOfParticlesTool = "FlatNParticles"
ParticleGun().MomentumRange.PdgCodes = [ 11 ]
from Configurables import ToolSvc
from Configurables import EvtGenDecay
ToolSvc().addTool( EvtGenDecay )
ToolSvc().EvtGenDecay.UserDecayFile = "$DECFILESROOT/dkfiles/e-,fixP=CaloAcc.dec"
from Gaudi.Configuration import *
importOptions( "$DECFILESROOT/options/CaloAcceptance.py" )
|
[
"slavomirastefkova@b2pcx39016.desy.de"
] |
slavomirastefkova@b2pcx39016.desy.de
|
1402215de795362c3bf280285a432acd439ed1a3
|
ad13583673551857615498b9605d9dcab63bb2c3
|
/output/instances/nistData/atomic/byte/Schema+Instance/NISTXML-SV-IV-atomic-byte-minExclusive-4-3.py
|
546cd4d622222e004eb7c096b36aa268f39fa7a8
|
[
"MIT"
] |
permissive
|
tefra/xsdata-w3c-tests
|
397180205a735b06170aa188f1f39451d2089815
|
081d0908382a0e0b29c8ee9caca6f1c0e36dd6db
|
refs/heads/main
| 2023-08-03T04:25:37.841917
| 2023-07-29T17:10:13
| 2023-07-30T12:11:13
| 239,622,251
| 2
| 0
|
MIT
| 2023-07-25T14:19:04
| 2020-02-10T21:59:47
|
Python
|
UTF-8
|
Python
| false
| false
| 259
|
py
|
from output.models.nist_data.atomic.byte.schema_instance.nistschema_sv_iv_atomic_byte_min_exclusive_4_xsd.nistschema_sv_iv_atomic_byte_min_exclusive_4 import NistschemaSvIvAtomicByteMinExclusive4
obj = NistschemaSvIvAtomicByteMinExclusive4(
value=109
)
|
[
"tsoulloftas@gmail.com"
] |
tsoulloftas@gmail.com
|
17e70d2e41bdd516debf650b514fe200ab485a25
|
bcc199a7e71b97af6fbfd916d5a0e537369c04d9
|
/leetcode/solved/43_Multiply_Strings/solution.py
|
90b9fe407e659af20688b1f0a37aefc2a67638f3
|
[] |
no_license
|
sungminoh/algorithms
|
9c647e82472905a2c4e505c810b622b734d9d20d
|
1389a009a02e90e8700a7a00e0b7f797c129cdf4
|
refs/heads/master
| 2023-05-01T23:12:53.372060
| 2023-04-24T06:34:12
| 2023-04-24T06:34:12
| 87,406,513
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,935
|
py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2020 sungminoh <smoh2044@gmail.com>
#
# Distributed under terms of the MIT license.
"""
Given two non-negative integers num1 and num2 represented as strings, return the product of num1 and num2, also represented as a string.
Note: You must not use any built-in BigInteger library or convert the inputs to integer directly.
Example 1:
Input: num1 = "2", num2 = "3"
Output: "6"
Example 2:
Input: num1 = "123", num2 = "456"
Output: "56088"
Constraints:
1 <= num1.length, num2.length <= 200
num1 and num2 consist of digits only.
Both num1 and num2 do not contain any leading zero, except the number 0 itself.
"""
import sys
import itertools
import pytest
class Solution:
def multiply(self, num1: str, num2: str) -> str:
if num1 == '0' or num2 == '0':
return '0'
def mul(num, digit):
d = int(digit)
ret = ''
c = 0
for a in reversed(num):
c, n = divmod(int(a)*d + c, 10)
ret += str(n)
if c:
ret += str(c)
return ret[::-1]
def plus(num1, num2):
ret = ''
c = 0
for a, b in itertools.zip_longest(reversed(num1), reversed(num2)):
a = a or '0'
b = b or '0'
c, n = divmod(int(a)+int(b)+c, 10)
ret += str(n)
if c:
ret += str(c)
return ret[::-1]
ret = '0'
for i, d in enumerate(reversed(num2)):
ret = plus(ret, mul(num1, d) + '0'*i)
return ret
@pytest.mark.parametrize('num1, num2, expected', [
("2", "3", "6"),
("123", "456", "56088"),
])
def test(num1, num2, expected):
assert expected == Solution().multiply(num1, num2)
if __name__ == '__main__':
sys.exit(pytest.main(["-s", "-v"] + sys.argv))
|
[
"smoh2044@gmail.com"
] |
smoh2044@gmail.com
|
e7b70b9f8478cbe00449d124f054ef12ee82c03b
|
e27333261b8e579564016c71d2061cc33972a8b8
|
/.history/api/IR_engine_20210728234203.py
|
14e0af8534a9c7489e013618793835e8ca8fd7ab
|
[] |
no_license
|
Dustyik/NewsTweet_InformationRetrieval
|
882e63dd20bc9101cbf48afa6c3302febf1989b1
|
d9a6d92b51c288f5bcd21ea1cc54772910fa58f7
|
refs/heads/master
| 2023-07-01T09:12:53.215563
| 2021-08-12T08:28:33
| 2021-08-12T08:28:33
| 382,780,359
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,671
|
py
|
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import euclidean_distances
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from IPython.display import display
'''
Functions to write:
1. tf-idf with cosine sim/Euclidean distance
- represent terms in each document with its tf-idf weights,
2. VSM with cosine sim/Euclidean distance
3. BIM
4. BM25
5. BERT
'''
titles_file_path = r"D:\Desktop\IR_term_8\IR-tweets---disaster-\article_titles_stemmed.csv"
tweets_file_path = r"D:\Desktop\IR_term_8\IR-tweets---disaster-\tweets_data_stemmed.csv"
SEARCH_MODELS = {
"tfcs": "Tf-idf w Cosine Sim",
"tfed": "Tf-idf w Euclidean Dist"
}
def returnTweetsBasedOnSearchModel(dataProcessor, articleId, searchModel):
#accepts a search model, article title, and article id, returns n most relevant results
if searchModel == SEARCH_MODELS["tfcs"]:
return dataProcessor.cosineSimilarity.query(articleId, articleText)
class DataProcessor:
def __init__(self):
self.titles_data = pd.read_csv(titles_file_path)
self.tweets_data = pd.read_csv(tweets_file_path)
self.titles_data = self.titles_data.dropna()
self.tweets_data = self.tweets_data.dropna()
self.cosineSimilarity = CosineSimilarity(self.titles_data, self.tweets_data)
print ("Data Processor up and ready...")
class CosineSimilarity:
def __init__(self, titles, tweets, type='tfidf'):
self.titles = titles #contains titles data
self.tweets = tweets #contains tweets data
self.vectorizer = self.change_matrix_type(type)
def get_result(self, return_size):
cos_sim = cosine_similarity(self.matrix, self.matrix)
top_ind = np.flip(np.argsort(cos_sim[0]))[1:return_size+1]
top_id = [list(self.matrix.index)[i] for i in top_ind]
self.result = []
for i in top_id:
filt = self.tweets[self.tweets.tweet==i]
for ind, r in filt.iterrows():
rel = r['relevance_score']
text = r['tweet']
related = r['article_id']
score = 0
# if related==self.query_id and rel>0:
# score = 1
# if related==self.query_id and rel==0:
# score = -1
self.result.append({'tweet_id':i, 'text': text, 'related_article':related})
#'score': score})
def query(self, query_id, query_text, return_size=30):
self.query_id = query_id
term_doc = self.vectorizer.fit_transform([query_text]+list(self.tweets.clean_text)) #returns document term matrix
ind = ["query"] + list(self.tweets.tweet)
self.matrix = pd.DataFrame(term_doc.toarray(), columns=self.vectorizer.get_feature_names(), index=ind) #indexes are the tweets, columns is the entire vocab
self.get_result(return_size)
return pd.DataFrame(self.result)
def change_matrix_type(self, type):
if type == 'tfidf':
return TfidfVectorizer()
elif type == 'dt':
return CountVectorizer() #transforms the entire word matrix into a set of vectors
else:
print('Type is invalid')
def get_matrix(self):
return self.matrix
class EuclideanDistance:
def __init__(self, data, type='tfidf'):
self.data = data
self.change_matrix_type(type)
self.matrix = None
def get_result(self, return_size):
euclidean = euclidean_distances(self.matrix.values[1:], [self.matrix.values[0]])
top_ind = np.argsort(euclidean.T[0])[:return_size]
top_id = [list(self.matrix.index)[i] for i in top_ind]
# print(sorted(euclidean[:20]),top_10_ind ,top_10_id)
self.result = []
for i in top_id:
filt = self.data[self.data.document==i]
for ind, r in filt.iterrows():
rel = r['rel']
text = r['text']
related = r['topic']
score = 0
if related==self.query_id and rel>0:
score = 1
if related==self.query_id and rel==0:
score = -1
self.result.append({'tweet_id':i, 'text': text, 'related_article':related,'score': score})
def query(self, query_id, query_text, return_size=10):
self.query_id = query_id
term_doc = self.vec.fit_transform([query_text]+list(self.data.clean_text))
ind = ['query'] + list(self.data.document)
self.matrix = pd.DataFrame(term_doc.toarray(), columns=self.vec.get_feature_names(), index=ind)
self.get_result(return_size)
return pd.DataFrame(self.result)
def change_matrix_type(self, type):
if type == 'tfidf':
self.vec = TfidfVectorizer()
elif type == 'dt':
self.vec = CountVectorizer()
else:
print('Type is invalid')
def get_matrix(self):
return self.matrix
def getArticleId(dataProcessor, articleTitle):
for row in dataProcessor.titles_data.iterrows():
print (row["title"])
if row.title == articleTitle:
return row.id
'''
sample_query_id = "f7ca322d-c3e8-40d2-841f-9d7250ac72ca"
sample_query_text = "Worcester breakfast club for veterans gives hunger its marching orders"
cosine_similarity_obj = CosineSimilarity(titles = titles, tweets = tweets)
result = cosine_similarity_obj.query(sample_query_id, sample_query_text)
print (display(result.head()))
Test Titles:
f7ca322d-c3e8-40d2-841f-9d7250ac72ca Worcester breakfast club for veterans gives hunger its marching orders
609772bc-0672-4db5-8516-4c025cfd54ca Jumpshot Gives Marketers Renewed Visibility Into Paid and Organic Keywords With Launch of Jumpshot Elite
1aa9d1b0-e6ba-4a48-ad0c-66552d896aac The Return Of The Nike Air Max Sensation Has 80’s Babies Hyped!
719699f9-47be-4bc7-969b-b53a881c95ae This New Dating App Will Ruin Your Internet Game
'''
|
[
"chiayik_tan@mymail.sutd.edu.sg"
] |
chiayik_tan@mymail.sutd.edu.sg
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.