max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
mindhome_alpha/erpnext/stock/doctype/stock_reconciliation/stock_reconciliation.py | Mindhome/field_service | 1 | 12762551 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe, erpnext
import frappe.defaults
from frappe import msgprint, _
from frappe.utils import cstr, flt, cint
from erpnext.controllers.stock_controller import StockController
from erpnext.accounts.utils import get_company_default
from erpnext.stock.doctype.serial_no.serial_no import get_serial_nos
from erpnext.stock.utils import get_stock_balance, get_incoming_rate, get_available_serial_nos
from erpnext.stock.doctype.batch.batch import get_batch_qty
class OpeningEntryAccountError(frappe.ValidationError): pass
class EmptyStockReconciliationItemsError(frappe.ValidationError): pass
class StockReconciliation(StockController):
def __init__(self, *args, **kwargs):
super(StockReconciliation, self).__init__(*args, **kwargs)
self.head_row = ["Item Code", "Warehouse", "Quantity", "Valuation Rate"]
def validate(self):
if not self.expense_account:
self.expense_account = frappe.get_cached_value('Company', self.company, "stock_adjustment_account")
if not self.cost_center:
self.cost_center = frappe.get_cached_value('Company', self.company, "cost_center")
self.validate_posting_time()
self.remove_items_with_no_change()
self.validate_data()
self.validate_expense_account()
self.set_total_qty_and_amount()
self.validate_putaway_capacity()
if self._action=="submit":
self.make_batches('warehouse')
def on_submit(self):
self.update_stock_ledger()
self.make_gl_entries()
self.repost_future_sle_and_gle()
from erpnext.stock.doctype.serial_no.serial_no import update_serial_nos_after_submit
update_serial_nos_after_submit(self, "items")
def on_cancel(self):
self.ignore_linked_doctypes = ('GL Entry', 'Stock Ledger Entry', 'Repost Item Valuation')
self.make_sle_on_cancel()
self.make_gl_entries_on_cancel()
self.repost_future_sle_and_gle()
def remove_items_with_no_change(self):
"""Remove items if qty or rate is not changed"""
self.difference_amount = 0.0
def _changed(item):
item_dict = get_stock_balance_for(item.item_code, item.warehouse,
self.posting_date, self.posting_time, batch_no=item.batch_no)
if ((item.qty is None or item.qty==item_dict.get("qty")) and
(item.valuation_rate is None or item.valuation_rate==item_dict.get("rate")) and
(not item.serial_no or (item.serial_no == item_dict.get("serial_nos")) )):
return False
else:
# set default as current rates
if item.qty is None:
item.qty = item_dict.get("qty")
if item.valuation_rate is None:
item.valuation_rate = item_dict.get("rate")
if item_dict.get("serial_nos"):
item.current_serial_no = item_dict.get("serial_nos")
if self.purpose == "Stock Reconciliation":
item.serial_no = item.current_serial_no
item.current_qty = item_dict.get("qty")
item.current_valuation_rate = item_dict.get("rate")
self.difference_amount += (flt(item.qty, item.precision("qty")) * \
flt(item.valuation_rate or item_dict.get("rate"), item.precision("valuation_rate")) \
- flt(item_dict.get("qty"), item.precision("qty")) * flt(item_dict.get("rate"), item.precision("valuation_rate")))
return True
items = list(filter(lambda d: _changed(d), self.items))
if not items:
frappe.throw(_("None of the items have any change in quantity or value."),
EmptyStockReconciliationItemsError)
elif len(items) != len(self.items):
self.items = items
for i, item in enumerate(self.items):
item.idx = i + 1
frappe.msgprint(_("Removed items with no change in quantity or value."))
def validate_data(self):
def _get_msg(row_num, msg):
return _("Row # {0}: ").format(row_num+1) + msg
self.validation_messages = []
item_warehouse_combinations = []
default_currency = frappe.db.get_default("currency")
for row_num, row in enumerate(self.items):
# find duplicates
key = [row.item_code, row.warehouse]
for field in ['serial_no', 'batch_no']:
if row.get(field):
key.append(row.get(field))
if key in item_warehouse_combinations:
self.validation_messages.append(_get_msg(row_num, _("Duplicate entry")))
else:
item_warehouse_combinations.append(key)
self.validate_item(row.item_code, row)
# validate warehouse
if not frappe.db.get_value("Warehouse", row.warehouse):
self.validation_messages.append(_get_msg(row_num, _("Warehouse not found in the system")))
# if both not specified
if row.qty in ["", None] and row.valuation_rate in ["", None]:
self.validation_messages.append(_get_msg(row_num,
_("Please specify either Quantity or Valuation Rate or both")))
# do not allow negative quantity
if flt(row.qty) < 0:
self.validation_messages.append(_get_msg(row_num,
_("Negative Quantity is not allowed")))
# do not allow negative valuation
if flt(row.valuation_rate) < 0:
self.validation_messages.append(_get_msg(row_num,
_("Negative Valuation Rate is not allowed")))
if row.qty and row.valuation_rate in ["", None]:
row.valuation_rate = get_stock_balance(row.item_code, row.warehouse,
self.posting_date, self.posting_time, with_valuation_rate=True)[1]
if not row.valuation_rate:
# try if there is a buying price list in default currency
buying_rate = frappe.db.get_value("Item Price", {"item_code": row.item_code,
"buying": 1, "currency": default_currency}, "price_list_rate")
if buying_rate:
row.valuation_rate = buying_rate
else:
# get valuation rate from Item
row.valuation_rate = frappe.get_value('Item', row.item_code, 'valuation_rate')
# throw all validation messages
if self.validation_messages:
for msg in self.validation_messages:
msgprint(msg)
raise frappe.ValidationError(self.validation_messages)
def validate_item(self, item_code, row):
from erpnext.stock.doctype.item.item import validate_end_of_life, \
validate_is_stock_item, validate_cancelled_item
# using try except to catch all validation msgs and display together
try:
item = frappe.get_doc("Item", item_code)
# end of life and stock item
validate_end_of_life(item_code, item.end_of_life, item.disabled, verbose=0)
validate_is_stock_item(item_code, item.is_stock_item, verbose=0)
# item should not be serialized
if item.has_serial_no and not row.serial_no and not item.serial_no_series:
raise frappe.ValidationError(_("Serial no(s) required for serialized item {0}").format(item_code))
# item managed batch-wise not allowed
if item.has_batch_no and not row.batch_no and not item.create_new_batch:
raise frappe.ValidationError(_("Batch no is required for batched item {0}").format(item_code))
# docstatus should be < 2
validate_cancelled_item(item_code, item.docstatus, verbose=0)
except Exception as e:
self.validation_messages.append(_("Row # ") + ("%d: " % (row.idx)) + cstr(e))
def update_stock_ledger(self):
""" find difference between current and expected entries
and create stock ledger entries based on the difference"""
from erpnext.stock.stock_ledger import get_previous_sle
sl_entries = []
has_serial_no = False
has_batch_no = False
for row in self.items:
item = frappe.get_doc("Item", row.item_code)
if item.has_batch_no:
has_batch_no = True
if item.has_serial_no or item.has_batch_no:
has_serial_no = True
self.get_sle_for_serialized_items(row, sl_entries)
else:
if row.serial_no or row.batch_no:
frappe.throw(_("Row #{0}: Item {1} is not a Serialized/Batched Item. It cannot have a Serial No/Batch No against it.") \
.format(row.idx, frappe.bold(row.item_code)))
previous_sle = get_previous_sle({
"item_code": row.item_code,
"warehouse": row.warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time
})
if previous_sle:
if row.qty in ("", None):
row.qty = previous_sle.get("qty_after_transaction", 0)
if row.valuation_rate in ("", None):
row.valuation_rate = previous_sle.get("valuation_rate", 0)
if row.qty and not row.valuation_rate:
frappe.throw(_("Valuation Rate required for Item {0} at row {1}").format(row.item_code, row.idx))
if ((previous_sle and row.qty == previous_sle.get("qty_after_transaction")
and (row.valuation_rate == previous_sle.get("valuation_rate") or row.qty == 0))
or (not previous_sle and not row.qty)):
continue
sl_entries.append(self.get_sle_for_items(row))
if sl_entries:
if has_serial_no:
sl_entries = self.merge_similar_item_serial_nos(sl_entries)
allow_negative_stock = False
if has_batch_no:
allow_negative_stock = True
self.make_sl_entries(sl_entries, allow_negative_stock=allow_negative_stock)
if has_serial_no and sl_entries:
self.update_valuation_rate_for_serial_no()
def get_sle_for_serialized_items(self, row, sl_entries):
from erpnext.stock.stock_ledger import get_previous_sle
serial_nos = get_serial_nos(row.serial_no)
# To issue existing serial nos
if row.current_qty and (row.current_serial_no or row.batch_no):
args = self.get_sle_for_items(row)
args.update({
'actual_qty': -1 * row.current_qty,
'serial_no': row.current_serial_no,
'batch_no': row.batch_no,
'valuation_rate': row.current_valuation_rate
})
if row.current_serial_no:
args.update({
'qty_after_transaction': 0,
})
sl_entries.append(args)
qty_after_transaction = 0
for serial_no in serial_nos:
args = self.get_sle_for_items(row, [serial_no])
previous_sle = get_previous_sle({
"item_code": row.item_code,
"posting_date": self.posting_date,
"posting_time": self.posting_time,
"serial_no": serial_no
})
if previous_sle and row.warehouse != previous_sle.get("warehouse"):
# If serial no exists in different warehouse
warehouse = previous_sle.get("warehouse", '') or row.warehouse
if not qty_after_transaction:
qty_after_transaction = get_stock_balance(row.item_code,
warehouse, self.posting_date, self.posting_time)
qty_after_transaction -= 1
new_args = args.copy()
new_args.update({
'actual_qty': -1,
'qty_after_transaction': qty_after_transaction,
'warehouse': warehouse,
'valuation_rate': previous_sle.get("valuation_rate")
})
sl_entries.append(new_args)
if row.qty:
args = self.get_sle_for_items(row)
args.update({
'actual_qty': row.qty,
'incoming_rate': row.valuation_rate,
'valuation_rate': row.valuation_rate
})
sl_entries.append(args)
if serial_nos == get_serial_nos(row.current_serial_no):
# update valuation rate
self.update_valuation_rate_for_serial_nos(row, serial_nos)
def update_valuation_rate_for_serial_no(self):
for d in self.items:
if not d.serial_no: continue
serial_nos = get_serial_nos(d.serial_no)
self.update_valuation_rate_for_serial_nos(d, serial_nos)
def update_valuation_rate_for_serial_nos(self, row, serial_nos):
valuation_rate = row.valuation_rate if self.docstatus == 1 else row.current_valuation_rate
if valuation_rate is None:
return
for d in serial_nos:
frappe.db.set_value("Serial No", d, 'purchase_rate', valuation_rate)
def get_sle_for_items(self, row, serial_nos=None):
"""Insert Stock Ledger Entries"""
if not serial_nos and row.serial_no:
serial_nos = get_serial_nos(row.serial_no)
data = frappe._dict({
"doctype": "Stock Ledger Entry",
"item_code": row.item_code,
"warehouse": row.warehouse,
"posting_date": self.posting_date,
"posting_time": self.posting_time,
"voucher_type": self.doctype,
"voucher_no": self.name,
"voucher_detail_no": row.name,
"company": self.company,
"stock_uom": frappe.db.get_value("Item", row.item_code, "stock_uom"),
"is_cancelled": 1 if self.docstatus == 2 else 0,
"serial_no": '\n'.join(serial_nos) if serial_nos else '',
"batch_no": row.batch_no,
"valuation_rate": flt(row.valuation_rate, row.precision("valuation_rate"))
})
if not row.batch_no:
data.qty_after_transaction = flt(row.qty, row.precision("qty"))
if self.docstatus == 2 and not row.batch_no:
if row.current_qty:
data.actual_qty = -1 * row.current_qty
data.qty_after_transaction = flt(row.current_qty)
data.valuation_rate = flt(row.current_valuation_rate)
data.stock_value = data.qty_after_transaction * data.valuation_rate
data.stock_value_difference = -1 * flt(row.amount_difference)
else:
data.actual_qty = row.qty
data.qty_after_transaction = 0.0
data.valuation_rate = flt(row.valuation_rate)
data.stock_value_difference = -1 * flt(row.amount_difference)
return data
def make_sle_on_cancel(self):
sl_entries = []
has_serial_no = False
for row in self.items:
if row.serial_no or row.batch_no or row.current_serial_no:
has_serial_no = True
serial_nos = ''
if row.current_serial_no:
serial_nos = get_serial_nos(row.current_serial_no)
sl_entries.append(self.get_sle_for_items(row, serial_nos))
else:
sl_entries.append(self.get_sle_for_items(row))
if sl_entries:
if has_serial_no:
sl_entries = self.merge_similar_item_serial_nos(sl_entries)
sl_entries.reverse()
allow_negative_stock = frappe.db.get_value("Stock Settings", None, "allow_negative_stock")
self.make_sl_entries(sl_entries, allow_negative_stock=allow_negative_stock)
def merge_similar_item_serial_nos(self, sl_entries):
# If user has put the same item in multiple row with different serial no
new_sl_entries = []
merge_similar_entries = {}
for d in sl_entries:
if not d.serial_no or d.actual_qty < 0:
new_sl_entries.append(d)
continue
key = (d.item_code, d.warehouse)
if key not in merge_similar_entries:
merge_similar_entries[key] = d
elif d.serial_no:
data = merge_similar_entries[key]
data.actual_qty += d.actual_qty
data.qty_after_transaction += d.qty_after_transaction
data.valuation_rate = (data.valuation_rate + d.valuation_rate) / data.actual_qty
data.serial_no += '\n' + d.serial_no
if data.incoming_rate:
data.incoming_rate = (data.incoming_rate + d.incoming_rate) / data.actual_qty
for key, value in merge_similar_entries.items():
new_sl_entries.append(value)
return new_sl_entries
def get_gl_entries(self, warehouse_account=None):
if not self.cost_center:
msgprint(_("Please enter Cost Center"), raise_exception=1)
return super(StockReconciliation, self).get_gl_entries(warehouse_account,
self.expense_account, self.cost_center)
def validate_expense_account(self):
if not cint(erpnext.is_perpetual_inventory_enabled(self.company)):
return
if not self.expense_account:
frappe.throw(_("Please enter Expense Account"))
elif self.purpose == "Opening Stock" or not frappe.db.sql("""select name from `tabStock Ledger Entry` limit 1"""):
if frappe.db.get_value("Account", self.expense_account, "report_type") == "Profit and Loss":
frappe.throw(_("Difference Account must be a Asset/Liability type account, since this Stock Reconciliation is an Opening Entry"), OpeningEntryAccountError)
def set_total_qty_and_amount(self):
for d in self.get("items"):
d.amount = flt(d.qty, d.precision("qty")) * flt(d.valuation_rate, d.precision("valuation_rate"))
d.current_amount = (flt(d.current_qty,
d.precision("current_qty")) * flt(d.current_valuation_rate, d.precision("current_valuation_rate")))
d.quantity_difference = flt(d.qty) - flt(d.current_qty)
d.amount_difference = flt(d.amount) - flt(d.current_amount)
def get_items_for(self, warehouse):
self.items = []
for item in get_items(warehouse, self.posting_date, self.posting_time, self.company):
self.append("items", item)
def submit(self):
if len(self.items) > 100:
msgprint(_("The task has been enqueued as a background job. In case there is any issue on processing in background, the system will add a comment about the error on this Stock Reconciliation and revert to the Draft stage"))
self.queue_action('submit')
else:
self._submit()
@frappe.whitelist()
def get_items(warehouse, posting_date, posting_time, company):
lft, rgt = frappe.db.get_value("Warehouse", warehouse, ["lft", "rgt"])
items = frappe.db.sql("""
select i.name, i.item_name, bin.warehouse
from tabBin bin, tabItem i
where i.name=bin.item_code and i.disabled=0 and i.is_stock_item = 1
and i.has_variants = 0 and i.has_serial_no = 0 and i.has_batch_no = 0
and exists(select name from `tabWarehouse` where lft >= %s and rgt <= %s and name=bin.warehouse)
""", (lft, rgt))
items += frappe.db.sql("""
select i.name, i.item_name, id.default_warehouse
from tabItem i, `tabItem Default` id
where i.name = id.parent
and exists(select name from `tabWarehouse` where lft >= %s and rgt <= %s and name=id.default_warehouse)
and i.is_stock_item = 1 and i.has_serial_no = 0 and i.has_batch_no = 0
and i.has_variants = 0 and i.disabled = 0 and id.company=%s
group by i.name
""", (lft, rgt, company))
res = []
for d in set(items):
stock_bal = get_stock_balance(d[0], d[2], posting_date, posting_time,
with_valuation_rate=True)
if frappe.db.get_value("Item", d[0], "disabled") == 0:
res.append({
"item_code": d[0],
"warehouse": d[2],
"qty": stock_bal[0],
"item_name": d[1],
"valuation_rate": stock_bal[1],
"current_qty": stock_bal[0],
"current_valuation_rate": stock_bal[1]
})
return res
@frappe.whitelist()
def get_stock_balance_for(item_code, warehouse,
posting_date, posting_time, batch_no=None, with_valuation_rate= True):
frappe.has_permission("Stock Reconciliation", "write", throw = True)
item_dict = frappe.db.get_value("Item", item_code,
["has_serial_no", "has_batch_no"], as_dict=1)
serial_nos = ""
with_serial_no = True if item_dict.get("has_serial_no") else False
data = get_stock_balance(item_code, warehouse, posting_date, posting_time,
with_valuation_rate=with_valuation_rate, with_serial_no=with_serial_no)
if with_serial_no:
qty, rate, serial_nos = data
else:
qty, rate = data
if item_dict.get("has_batch_no"):
qty = get_batch_qty(batch_no, warehouse, posting_date=posting_date, posting_time=posting_time) or 0
return {
'qty': qty,
'rate': rate,
'serial_nos': serial_nos
}
@frappe.whitelist()
def get_difference_account(purpose, company):
if purpose == 'Stock Reconciliation':
account = get_company_default(company, "stock_adjustment_account")
else:
account = frappe.db.get_value('Account', {'is_group': 0,
'company': company, 'account_type': 'Temporary'}, 'name')
return account | 1.789063 | 2 |
central_service/minitopo/mpMininetBuilder.py | netmsys/mpquic-rl | 1 | 12762552 | <filename>central_service/minitopo/mpMininetBuilder.py
from mininet.topo import Topo
from mininet.net import Mininet
from mininet.link import TCLink
from mininet.cli import CLI
from subprocess import Popen, PIPE
class MpMininetBuilder(Topo):
def __init__(self):
Topo.__init__( self )
self.net = None
def commandTo(self, who, cmd):
return who.cmd(cmd)
def notNSCommand(self, cmd):
p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
if stderr:
return "Error"
return stdout
def startNetwork(self):
self.net = Mininet(topo=self,link=TCLink)
self.net.start()
def getCLI(self):
if self.net is None:
print("Can not get the CLI")
else:
CLI(self.net)
def getHost(self, who):
if self.net is None:
print("Network not available....")
raise Exception("Network not ready");
else:
return self.net.getNodeByName(who)
def stopNetwork(self):
if self.net is None:
print("Could not stop network... Nothing to stop)")
else:
self.net.stop()
| 2.59375 | 3 |
Python/SQLite/anoka_backstuff.py | programmer-666/Codes | 0 | 12762553 | import sqlite3
class sqdb3:
__ERROR_CODES = []
__CONNECTION_FLAG = False
def __init__(self):
self.ConnectDB()
def ConnectDB(self, dbname = "player_infos.db"):
self.__CONNECTION_FLAG = True
self.__connection = sqlite3.connect(dbname)
def DisconnectDB(self):
self.__CONNECTION_FLAG = False
self.__connection.close()
def GetMessages(self):
if self.__CONNECTION_FLAG:
self.__cursor = self.__connection.cursor()
raw_Messages = self.__cursor.execute("SELECT * FROM MESSAGES").fetchall()
print(raw_Messages)
else:
self.__ERROR_CODES.append(24)
def ReturnErrorCodes(self):
if len(self.__ERROR_CODES) > 0:
return self.__ERROR_CODES
x = sqdb3()
x.GetMessages()
print(x.ReturnErrorCodes())
x.DisconnectDB() | 2.84375 | 3 |
py/base/Env.py | jeremybennett/force-riscv | 0 | 12762554 | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR
# FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import PyInterface
from base.Sequence import Sequence
from base.GenThreadExecutor import GenThreadExecutorFactory
import Log
import RandomUtils
## GlobalInitSequence class
# Base class of arch level global init sequence class.
class GlobalInitSequence(Sequence):
def __init__(self, gen_thread, name):
super().__init__(gen_thread, name)
def generate(self, **kargs):
self.setupResetRegion()
self.allocateHandlerSetMemory()
self.setupMemoryFillPattern()
self.setupThreadGroup()
def setupResetRegion(self):
pass
def allocateHandlerSetMemory(self):
pass
def setupMemoryFillPattern(self):
pass
def setupThreadGroup(self):
pass
## Env class
# Top level class in a test template
class Env(object):
def __init__(self, interface):
self.interface = interface
self.numberChips = self.interface.numberOfChips()
self.numberCores = self.interface.numberOfCores()
self.numberThreads = self.interface.numberOfThreads()
self.genThreads = list()
self.mGenMain = None
self.beforeSequences = list()
self.afterSequences = list()
self.defaultGenClass = None
self.defaultSeqClass = None
self.defaultInitSeqClass = None
self.genThreadInitFunc = None
self.executor = GenThreadExecutorFactory.createGenThreadExecutor(self.numberChips * self.numberCores * self.numberThreads, interface)
## Configure generator memory
def configureMemory(self, memfile_module):
import importlib
mem_module = importlib.import_module(memfile_module)
mem_module.configure_memory(self.interface)
def configureChoicesModifier(self, modfile_module):
import importlib
choices_module = importlib.import_module(modfile_module)
choices_module.configure_choices(self.mGenMain)
## Setup generator threads
def setup(self):
for i_chip in range(self.numberChips):
for i_core in range(self.numberCores):
for i_thread in range(self.numberThreads):
gen_thread_id = self.createBackEndGenThread(i_thread, i_core, i_chip) # create back end generator thread
new_gen_thread = self.createGenThread(gen_thread_id, i_thread, i_core, i_chip) # create front end generator thread
self.genThreads.append(new_gen_thread)
self.setupGenThread(new_gen_thread)
self.assignMainGen()
self.addThreadSplitterSequence()
## Assign a main generator for the before and after main test processing.
def assignMainGen(self):
num_gen = len(self.genThreads )
if num_gen == 0:
self.interface.error("[assignMainGen] number of threads = 0")
gen_index = RandomUtils.random32(0, num_gen - 1)
self.mGenMain = self.genThreads[gen_index]
self.executor.setMainThreadId(self.mGenMain.genThreadID)
Log.notice("Main generator is 0x%x" % self.mGenMain.genThreadID)
## Create back end generator thread
def createBackEndGenThread(self, i_thread, i_core, i_chip):
ret_thread_id = self.interface.createGeneratorThread(i_thread, i_core, i_chip)
return ret_thread_id
## Create front end generator thread.
def createGenThread(self, gen_thread_id, i_thread, i_core, i_chip):
return self.defaultGenClass(gen_thread_id, self.interface)
## Setting up newly created generator thread.
def setupGenThread(self, gen_thread):
main_seq = self.defaultSeqClass(gen_thread, self.defaultSeqClass.__name__)
gen_thread.addSequence(main_seq)
gen_thread.setGenThreadInitFunc(self.genThreadInitFunc)
## Start all the generator threads
def generate(self):
for seq in self.beforeSequences:
seq.genThread = self.mGenMain
seq.run()
# TODO(Noah): Remove this logic if and when a more abstracted mechanism to assign the exceptions manager can be
# determined. All threads share the same exception handler sets, so we need to propagate their locations to each
# thread. We do this by creating copies of the exceptions manager.
self.mGenMain.setup()
for gen_thread in self.genThreads:
if gen_thread is not self.mGenMain:
gen_thread.exceptionHandlerManager = self.mGenMain.exceptionHandlerManager.createShallowCopy(gen_thread)
gen_thread.addressTableManager = self.mGenMain.addressTableManager.createShallowCopy(gen_thread)
self.executor.executeGenThreads(self.genThreads)
for seq in self.afterSequences:
seq.genThread = self.mGenMain
seq.run()
self.mGenMain.genSequence("Summary")
## set Sequence class like bnt, eret on a thread
def setSequenceClass(self, thread_id, seq_type, sequence):
thread_obj = self.getThreadObject(thread_id)
if thread_obj is not None:
if seq_type == 0:
thread_obj.setBntSequence(sequence)
elif seq_type == 1:
thread_obj.setEretPreambleSequence(sequence)
else:
self.interface.error("invalid sequence type: %d" % seq_type)
else:
self.interface.error("invalid thread id: %d" % thread_id)
## run Sequence on a thread
## TBD: to optimize thread list for better performance
def runSequence(self, thread_id, seq_type, primary, param_dict):
thread_obj = self.getThreadObject(thread_id)
if thread_obj is not None:
if seq_type == 0:
thread_obj.runBntSequence(primary, param_dict)
elif seq_type == 1:
thread_obj.runEretPreambleSequence(param_dict)
else:
self.interface.error("invalid sequence type: %d" % seq_type)
else:
self.interface.error("invalid thread id: %d" % thread_id)
def getThreadObject(self, thread_id):
for thread in self.genThreads:
if thread.genThreadID == thread_id:
return thread
return None
## Add a sequence to be run before generating the main test.
def addInitialSequence(self, init_class):
if init_class is not None:
self.beforeSequences.append(init_class(None, init_class.__name__))
else:
self.beforeSequences.append(self.defaultInitSeqClass(None, self.defaultInitSeqClass.__name__))
def addThreadSplitterSequence(self):
raise NotImplementedError
| 2.03125 | 2 |
vlm/data.py | woojeongjin/vokenization | 173 | 12762555 | <gh_stars>100-1000
import copy
import os
import random
import h5py
import torch
from torch.utils.data import DataLoader, Dataset
import tqdm
class CoLDataset(Dataset):
IGNORE_ID = -100
sent_strategy = 'first'
def __init__(self, file_path, tokenizer_name, tokenizer, block_size=512,
split_sent=False, voken_dir=None, suffix=None, verbose=False,
voken_ablation=None):
# Open token's hdf5
token_path = file_path + '.' + tokenizer_name + '.hdf5'
assert os.path.isfile(token_path)
if verbose:
print("-------- Load Data -------")
print("Load tokens from", token_path)
self.token_hdf5 = h5py.File(token_path, 'r')
self.tokenizer = tokenizer
self.tokens = self.token_hdf5['tokens']
self.verbose = verbose
self.voken_ablation = voken_ablation
self._iter_cnt = 0
# Open voken's hdf5 and load voken ids
if voken_dir is not None:
assert suffix is not None, 'Please provide suffix of the voken, e.g., vg_nococo.5000.'
self.sent_level = 'sent' in voken_dir
dset_fname = os.path.split(file_path)[-1]
voken_path = os.path.join(voken_dir, f"{dset_fname}.{suffix}.hdf5")
voken_ids_path = os.path.join(voken_dir, f"{dset_fname}.{suffix}.ids")
if verbose:
print("Load vokens from", voken_path)
self.voken_hdf5 = h5py.File(voken_path, 'r')
self.vokens = self.voken_hdf5['vokens']
assert len(self.vokens) == len(self.tokens)
self._voken_ids = list(
map(lambda x: x.strip(),
open(voken_ids_path).readlines())
)
if verbose:
print("\t with voken size", self.voken_size)
print("\t top 5 voken ids are:", self._voken_ids[:5])
else:
self.vokens = None
# Split for every block_size tokens
# The last block without full length will be dropped.
num_tokens = len(self.tokens)
self.starts = list(range(0, num_tokens, block_size))
self.batches = list(zip(self.starts[:-1], self.starts[1:]))
manual_filtered =False
if "en.train.raw" in file_path and tokenizer_name == "bert-base-uncased":
self.batches = manual_filter(self.batches)
if verbose:
print("Data: Mannually filter the range for counties.")
manual_filtered = True
# batch_info
if verbose:
print("Split sent with block size", block_size)
print(f"Total batches: {len(self.batches)}")
print(f"Total tokens: {len(self.tokens)}")
if voken_dir is not None:
print(f"Total vokens: {len(self.vokens)}")
if voken_ablation is not None:
print("The model will process voken ablation strategy:", voken_ablation)
print()
block_check(self.batches, block_size, fixed_size=True, manual_filtered=manual_filtered)
if self.voken_ablation == 'token':
self._voken_ids = list(range(30522))
@property
def voken_size(self):
return len(self._voken_ids)
@property
def voken_ids(self):
return copy.copy(self._voken_ids)
def assert_equal_vokens(self, dataset):
assert self.voken_size == dataset.voken_size
for vid, vid1 in zip(self.voken_ids, dataset.voken_ids):
assert vid == vid1
def __len__(self):
return len(self.batches) - 1
def __getitem__(self, item):
token_start, token_end = self.batches[item]
if self._iter_cnt < 5 and self.verbose:
print(f"Data Loader: data iteration {self._iter_cnt}, with range {token_start} to {token_end}.")
self._iter_cnt += 1
tokens = list(self.tokens[token_start: token_end])
token_tensor = torch.tensor(
self.tokenizer.build_inputs_with_special_tokens(tokens),
dtype=torch.long)
if self.vokens is not None:
vokens = list(self.vokens[token_start: token_end])
vokens = self.maybe_do_sent_level(vokens)
vokens = self.maybe_do_ablation_study(vokens, tokens)
voken_tensor = torch.tensor(
[self.IGNORE_ID] + vokens + [self.IGNORE_ID],
dtype=torch.long
)
return token_tensor, voken_tensor
else:
return token_tensor
def maybe_do_sent_level(self, vokens):
if not self.sent_level:
return vokens
else:
if self.sent_strategy == 'all':
vokens = [
(-voken-1 if voken < 0 else voken)
for voken in vokens
]
elif self.sent_strategy == 'first':
vokens = [
(self.IGNORE_ID if voken < 0 else voken)
for voken in vokens
]
return vokens
def maybe_do_ablation_study(self, vokens, tokens):
if self.voken_ablation is None:
return vokens
else:
if self._iter_cnt < 5 and self.verbose:
print("Before voken ablation: ", vokens)
if self.voken_ablation == 'random':
vokens = [random.randint(0, self.voken_size - 1)
for _ in range(len(vokens))]
elif self.voken_ablation == 'shuffle':
random.shuffle(vokens)
elif self.voken_ablation == 'reverse':
vokens = vokens[::-1]
elif self.voken_ablation == 'token':
vokens = tokens
if self._iter_cnt < 5 and self.verbose:
print("After voken ablation: ", vokens)
return vokens
def get_item_info(self, item):
token_start = self.batches[item]
token_end = self.batches[item + 1]
return token_start, token_end
def __del__(self):
self.token_hdf5.close()
if self.vokens is not None:
self.voken_hdf5.close()
FORBIDDEN_RANGE = (
119314944, # Start of iter 3700
187053048 # End of iter 5800
)
def intersect(x, y):
x1, x2 = x
y1, y2 = y
if x2 <= y1 or x2 >= y2:
# Case 1: [ x )[ y )
# Case 2: [ y )[ x )
return False
return True
def manual_filter(batches):
batches = list(filter(
lambda x: not intersect(x, FORBIDDEN_RANGE),
batches
))
return batches
def block_check(batches, block_size, fixed_size=False, manual_filtered=False):
"""
Check whether the batches satisfy following requirements.
1. Monotonic
2. Mutually exclusive
3. Range < block_size
"""
last_end = 0
for start_token, end_token in batches:
assert last_end <= start_token
if fixed_size:
assert (end_token - start_token) == block_size, 'len([%d, %d)) != %d' % (start_token, end_token, block_size)
else:
assert (end_token - start_token) <= block_size, 'len([%d, %d)) > %d' % (start_token, end_token, block_size)
if manual_filtered:
assert not intersect((start_token, end_token), FORBIDDEN_RANGE)
last_end = end_token
def get_voken_feats(dataset: CoLDataset, feat_dir: str):
"""
Load pre-extracted visual features regarding img_ids of vokens.
"""
set2id2feat = {}
voken_feats = []
for voken_id in dataset.voken_ids:
voken_img_set, voken_img_id = voken_id.split('/')
if voken_img_set not in set2id2feat:
img_ids = list(map(
lambda x: x.rstrip(),
open(os.path.join(feat_dir, f"{voken_img_set}.ids"))
))
img_feats = h5py.File(
os.path.join(feat_dir, f"{voken_img_set}.hdf5"), 'r'
)['keys'][:]
id2feat = {}
assert len(img_ids) == len(img_feats)
for img_id, img_feat in zip(img_ids, img_feats):
id2feat[img_id] = img_feat
set2id2feat[voken_img_set] = id2feat
voken_feats.append(set2id2feat[voken_img_set][voken_img_id])
return voken_feats
| 2.09375 | 2 |
api/test_.py | Iuty/py.fastai.winservice | 0 | 12762556 | from flask_restful import Resource
from flask import request
from prx.TrainProxy import TrainProxy
from prx.TestProxy import TestProxy
class TestApi(Resource):
def testPicture():
rtn = {'success':False}
_projectname = request.form.get('projectname')
if not _projectname:
rtn['error'] = "projectname is nesserary"
return rtn
_tag = request.form.get('tag')
if not _tag:
rtn['error'] = "tag is nesserary"
return rtn
_path = request.form.get('path')
if not _path:
rtn['error'] = "path is nesserary"
return rtn
return TestProxy.testPicture(_projectname,_tag,_path)
def testDirectory():
rtn = {'success':False}
_projectname = request.form.get('projectname')
if not _projectname:
rtn['error'] = "projectname is nesserary"
return rtn
_tag = request.form.get('tag')
if not _tag:
rtn['error'] = "tag is nesserary"
return rtn
_path = request.form.get('path')
if not _path:
rtn['error'] = "path is nesserary"
return rtn
return TestProxy.testDirectory(_projectname,_tag,_path)
def post(self):
_cmd = request.form.get('cmd')
if _cmd == "testPicture":
return TestApi.testPicture()
if _cmd == "testDirectory":
return TestApi.testDirectory()
| 2.515625 | 3 |
optimise_trajectories.py | DaphneAntotsiou/task-oriented-hand-retargeting | 3 | 12762557 | __author__ = 'DafniAntotsiou'
import os
from pso import pso, particle2actuator
from functions import *
import mujoco_py as mp
from math import ceil
from mjviewerext import MjViewerExt
import glob
import argparse
from replay_trajectories import play
def argsparser():
parser = argparse.ArgumentParser("Implementation of Task Oriented Hand Motion Retargeting")
parser.add_argument('--model_path', help='path to model xml', type=str, default="model/MPL/MPL_Sphere_6.xml")
parser.add_argument('--traj_path', help='path to the trajectory file or directory', default='trajectories')
parser.add_argument('--out_dir', help='directory to save the output results', default='trajectories/result')
parser.add_argument('--seed', help='RNG seed', type=int, default=0)
boolean_flag(parser, 'play', default=False, help='playback the original and optimised trajectories')
boolean_flag(parser, 'rot_scene', default=True, help='set if scene was rotated during HPE acquisition')
return parser.parse_args()
def boolean_flag(parser, name, default=False, help=None):
""" This function is from OpenAI's baselines.
Add a boolean flag to argparse parser.
Parameters
----------
parser: argparse.Parser
parser to add the flag to
name: str
--<name> will enable the flag, while --no-<name> will disable it
default: bool or None
default value of the flag
help: str
help string for the flag
"""
dest = name.replace('-', '_')
parser.add_argument("--" + name, action="store_true", default=default, dest=dest, help=help)
parser.add_argument("--no-" + name, action="store_false", dest=dest)
def optimise_actions(model_path, traj_path, rot_scene=False, fps=60, render=False, name=None, replay=False):
per_hpe = False # apply pso only on new hpe frame regardless of simulation fps
data = read_npz(traj_path)
iterations = [100]
swarms = [100]
c_tasks = [0.8]
c_angles = [0.5]
for it in iterations:
for swarmsize in swarms:
for c_task in c_tasks:
for c_a in c_angles:
trajectory = {'obs': [], 'acs': [], 'hpe': []}
assert 'hpe' in data and 'obs' in data and 'acs' in data
if 'hpe' in data and 'obs' in data and 'acs' in data:
model = mp.load_model_from_path(model_path)
nsubstep = int(ceil(1/(fps * model.opt.timestep)))
sim = mp.MjSim(model, nsubsteps=nsubstep)
sim.reset()
if render:
viewer = MjViewerExt(sim)
# initialise environment
idvA, default_q = get_model_info(model)
default_mat = array([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
default_q2 = rotmat2quat(default_mat)
default_q = default_q2 * default_q
init_pos = array([0, -0.8, 0.1])
m_in = np.zeros(shape=(5, 3))
for i in range(5):
for j in range(3):
m_in[i, j] = idvA[3 + i * 4].get_pos()[j]
for i in range(len(data['acs'])):
# set actions
mocap_pos = data['acs'][i][0:3]
mocap_rot = data['acs'][i][3:7]
ctrl = data['acs'][i][7:]
sim.data.ctrl[:] = ctrl[:]
sim.data.mocap_pos[:] = mocap_pos[:]
sim.data.mocap_quat[:] = mocap_rot[:]
if i == 0:
# first frame
# set state
qpos = data['obs'][i][:len(sim.data.qpos)]
qvel = data['obs'][i][len(sim.data.qpos):]
sim.data.qpos[:] = qpos[:]
sim.data.qvel[:] = qvel[:]
# copy state to actuators
for j in range(len(idvA)):
idvA[j].get_val_from_sim(sim)
if i == 0 or not np.array_equal(data['hpe'][i], data['hpe'][i-1]) or not per_hpe:
# first or new frame - retarget and apply pso
idvA = obs2actions(data['hpe'][i], idvA=idvA, init_pos=init_pos,
default_q=default_q, default_mat=default_mat, m_in=m_in, ad_hoc=False,
rot_scene=rot_scene)
curr_state = (sim.data.qpos, sim.data.qvel)
obj_name = "Object"
obj_state = get_joint_state(obj_name, sim.data)
if obj_state is not None:
objects = {obj_name: obj_state}
else:
objects = None
pair_dist = get_active_contacts_dist(data=sim.data,
contact_pairs=get_pair_contacts(model=model))
if pair_dist and len(pair_dist) > 2:
# there are at least 2 fingers close to the object - enable contact
pso_params = {'contact': True, 'swarmsize': swarmsize, 'maxiter': it,
'minfunc': 1e-4, 'minstep': 1e-4, 'hybrid_prc': 10}
else:
# no object close enough to grab
pso_params = {'contact': False, 'swarmsize': 2, 'maxiter': 3,
'minfunc': 1e-1, 'minstep': 1e-1, 'hybrid_prc': 0.5}
if pso_params['contact']: # apply pso only for contact
for j in range(len(idvA) - 1):
idvA[j].set_value(idvA[j].get_value(), safe=True)
sub_params = idvA[0:23]
actions, error = pso(params=sub_params, obs=data['hpe'][i], model=model, norm=True, fps=10,
visualise=False,
default_mat=default_mat, hybrid_prc=pso_params['hybrid_prc'],
contact=pso_params['contact'], swarmsize=pso_params['swarmsize'],
initial_act=idvA, omega=0.1, phip=0.3, phig=0.7,
minstep=pso_params['minstep'], maxiter=pso_params['maxiter'],
minfunc=pso_params['minfunc'], hybrid_space=True, objects=objects
, initial_state=curr_state, rot_scene=rot_scene,
c_task=c_task, c_angle=c_a)
sub_params = particle2actuator(actions, sub_params)
for j in range(len(idvA)):
idvA[j].assign(sim)
# record frame
trajectory['obs'].append(np.concatenate((np.asarray(sim.data.qpos),
np.asarray(sim.data.qvel)), axis=0))
mocap = np.concatenate((sim.data.mocap_pos.flatten(), sim.data.mocap_quat.flatten()), axis=0)
trajectory['acs'].append(np.concatenate((np.asarray(mocap), np.asarray(sim.data.ctrl)), axis=0))
trajectory['hpe'].append(np.array(data['hpe'][i]))
sim.step()
if render:
viewer.render()
if name is None:
name = 'pso_optimise'
np.savez(name, **trajectory)
if replay:
play(model_path, data=trajectory, fps=fps, loop=False, second_data=data)
def main(args):
if not os.path.isfile(args.model_path):
print("model path does not exist. Terminating...")
exit(1)
args.model_path = os.path.abspath(args.model_path)
files = None
if os.path.isdir(args.traj_path):
filesExp = os.path.join(args.traj_path, "*.npz")
files = glob.glob(filesExp)
files.sort()
elif os.path.isfile(args.traj_path):
files = [args.traj_path]
else:
print("trajectory path does not exist. Terminating...")
exit(1)
os.makedirs(args.out_dir, exist_ok=True)
for f in files:
np.random.seed(args.seed)
out_name = os.path.join(args.out_dir, os.path.basename(f)[:-4] + '_pso')
print("now working on " + f)
optimise_actions(args.model_path, f, rot_scene=args.rot_scene, fps=60,
render=False, name=out_name, replay=args.play)
if __name__ == "__main__":
args = argsparser()
main(args)
| 2.53125 | 3 |
dataset.py | Aria-K-Alethia/Semi-supervised-VAE | 0 | 12762558 | <gh_stars>0
'''
Copyright (c) 2020 <EMAIL>
Description:
mnist dataset
Licence:
MIT
THE USER OF THIS CODE AGREES TO ASSUME ALL LIABILITY FOR THE USE OF THIS CODE.
Any use of this code should display all the info above.
'''
import numpy as np
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from functools import reduce
from operator import __or__
from torch.utils.data.sampler import SubsetRandomSampler
from torchvision.datasets import MNIST
import torchvision.transforms as transforms
from utils import onehot
def get_mnist(location="./", batch_size=64, labels_per_class=100):
n_labels = 10
cuda = torch.cuda.is_available()
flatten_bernoulli = lambda x: transforms.ToTensor()(x).view(-1).bernoulli()
mnist_train = MNIST(location, train=True, download=True,
transform=flatten_bernoulli)
mnist_valid = MNIST(location, train=False, download=True,
transform=flatten_bernoulli)
def get_sampler(labels, n=None):
# Only choose digits in n_labels
(indices,) = np.where(reduce(__or__, [labels == i for i in np.arange(n_labels)]))
# Ensure uniform distribution of labels
np.random.shuffle(indices)
indices = np.hstack([list(filter(lambda idx: labels[idx] == i, indices))[:n] for i in range(n_labels)])
indices = torch.from_numpy(indices)
sampler = SubsetRandomSampler(indices)
return sampler
# Dataloaders for MNIST
worker = 1
labelled = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, num_workers=worker, pin_memory=cuda,
sampler=get_sampler(mnist_train.train_labels.numpy(), labels_per_class))
unlabelled = torch.utils.data.DataLoader(mnist_train, batch_size=batch_size, num_workers=worker, pin_memory=cuda,
sampler=get_sampler(mnist_train.train_labels.numpy()))
validation = torch.utils.data.DataLoader(mnist_valid, batch_size=batch_size, num_workers=worker, pin_memory=cuda,
sampler=get_sampler(mnist_valid.test_labels.numpy()))
return labelled, unlabelled, validation
| 2.375 | 2 |
run.py | grasskin/SimCLR | 0 | 12762559 | <reponame>grasskin/SimCLR
import argparse
import torch
import torch.backends.cudnn as cudnn
from torchvision import models
from data_aug.contrastive_learning_dataset import ContrastiveLearningDataset, coco_collate_fn
from models.resnet_simclr import ResNetBertSimCLR, ResNetSimCLR
import faulthandler; faulthandler.enable()
from simclr import BertSimCLR, SimCLR
from torchvision.datasets import CIFAR10
from torchvision import transforms
import torch.nn as nn
model_names = sorted(name for name in models.__dict__
if name.islower() and not name.startswith("__")
and callable(models.__dict__[name]))
parser = argparse.ArgumentParser(description='PyTorch SimCLR')
parser.add_argument('-data', metavar='DIR', default='./datasets',
help='path to dataset')
parser.add_argument('-d', '--dataset-name', default='stl10',
help='dataset name', choices=['stl10', 'cifar10', 'mscoco', 'mscocobaseline'])
parser.add_argument('-g', '--gpus', default=1, type=int,
help='number of gpus per node')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet50',
choices=model_names,
help='model architecture: ' +
' | '.join(model_names) +
' (default: resnet50)')
parser.add_argument('-j', '--workers', default=12, type=int, metavar='N',
help='number of data loading workers (default: 32)')
parser.add_argument('--epochs', default=210, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('-n', '--nodes', default=1,
type=int, metavar='N')
parser.add_argument('-b', '--batch-size', default=256, type=int,
metavar='N',
help='mini-batch size (default: 256), this is the total '
'batch size of all GPUs on the current node when '
'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.0003, type=float,
metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)',
dest='weight_decay')
parser.add_argument('--seed', default=None, type=int,
help='seed for initializing training. ')
parser.add_argument('--disable-cuda', action='store_true',
help='Disable CUDA')
parser.add_argument('--fp16-precision', action='store_true',
help='Whether or not to use 16-bit precision GPU training.')
parser.add_argument('--out_dim', default=128, type=int,
help='feature dimension (default: 128)')
parser.add_argument('--log-every-n-steps', default=100, type=int,
help='Log every n steps')
parser.add_argument('--temperature', default=0.07, type=float,
help='softmax temperature (default: 0.07)')
parser.add_argument('--n-views', default=2, type=int, metavar='N',
help='Number of views for contrastive learning training.')
parser.add_argument('--gpu-index', default=0, type=int, help='Gpu index.')
parser.add_argument('-C', default=1, type=int, help='Amount of multimodal loss.')
parser.add_argument('--eval', default=False, type=bool, help='Run linear classifier evaluation.')
parser.add_argument('--saved-path', default='', help='Path to saved checkpoint.')
parser.add_argument('--baseline', default=False, type=bool, help='Run baseline SimCLR training')
def main():
args = parser.parse_args()
assert args.n_views == 2, "Only two view training is supported. Please use --n-views 2."
# check if gpu training is available
if not args.disable_cuda and torch.cuda.is_available():
args.device = torch.device('cuda')
cudnn.deterministic = True
cudnn.benchmark = True
torch.multiprocessing.set_start_method('spawn')
else:
args.device = torch.device('cpu')
args.gpu_index = -1
if args.eval:
# Load pretrained model and cifar10
cifar_transforms = transforms.Compose([transforms.Resize(96), transforms.ToTensor()])
dataset = CIFAR10(root='datasets/cifar10', download=True, transform=cifar_transforms)
train_dataset = CIFAR10(root='datasets/cifar10', train=True, transform=cifar_transforms)
valid_dataset = CIFAR10(root='datasets/cifar10', train=False, transform=cifar_transforms)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,
shuffle=True, num_workers=args.workers)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size,
shuffle=False, num_workers=args.workers)
if not args.baseline:
model = ResNetBertSimCLR(base_model=args.arch, out_dim=args.out_dim)
checkpoint = torch.load(args.saved_path)
model.load_state_dict(checkpoint['state_dict'])
else:
model = ResNetSimCLR(base_model=args.arch, out_dim=args.out_dim)
checkpoint = torch.load(args.saved_path)
model.load_state_dict(checkpoint['state_dict'])
classifier_model = torch.nn.Sequential(torch.nn.Linear(128, 10))
classifier_optimizer = torch.optim.Adam(classifier_model.parameters(), args.lr, weight_decay=args.weight_decay)
optimizer = None
scheduler = None
else:
# Load BertSimCLR and coco dataset
dataset = ContrastiveLearningDataset(args.data)
train_dataset = dataset.get_dataset(args.dataset_name, args.n_views)
valid_dataset = dataset.get_dataset(args.dataset_name+'valid', args.n_views)
if not args.baseline:
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, drop_last=True, collate_fn=coco_collate_fn)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, drop_last=True, collate_fn=coco_collate_fn)
model = ResNetBertSimCLR(base_model=args.arch, out_dim=args.out_dim)
else:
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, drop_last=True)
valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, drop_last=True)
model = ResNetSimCLR(base_model=args.arch, out_dim=args.out_dim)
optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay)
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=len(train_loader), eta_min=0,
last_epoch=-1)
# emergency fix
model.to(args.device)
checkpoint = torch.load('/home/pliang/gabriel/BertSimCLR/runs/Sep09_19-15-40_quad-p40-0-1/checkpoint_0048.pth.tar')#, map_location="cuda:0")
model_state = checkpoint['state_dict']#.to(args.device)
opt_state = checkpoint['optimizer']#.to(args.device)
model.load_state_dict(model_state)
optimizer.load_state_dict(opt_state)
model.to(args.device)
classifier_model = None
classifier_optimizer = None
#print(model.visual_backbone)
data_loaders = {"train": train_loader, "val": valid_loader}
# It’s a no-op if the 'gpu_index' argument is a negative integer or None.
with torch.cuda.device(args.gpu_index):
if not args.baseline:
simclr = BertSimCLR(model=model, optimizer=optimizer, scheduler=scheduler, classifier_model=classifier_model, classifier_optimizer=classifier_optimizer, args=args)
if args.eval:
simclr.train_linear_classifier(args.epochs, data_loaders)
else:
simclr.train(data_loaders)
else:
simclr = SimCLR(model=model, optimizer=optimizer, scheduler=scheduler, classifier_model=classifier_model, classifier_optimizer=classifier_optimizer, args=args)
if args.eval:
simclr.train_linear_classifier(args.epochs, data_loaders)
else:
simclr.train(data_loaders)
if __name__ == "__main__":
main()
| 2.109375 | 2 |
tests/test_query.py | pooya/disco | 786 | 12762560 | <filename>tests/test_query.py<gh_stars>100-1000
from disco.test import TestCase, TestPipe
from disco.compat import bytes_to_str, str_to_bytes
from disco.worker.pipeline.worker import Stage
from disco.worker.task_io import task_input_stream
import csv
from functools import partial
import hashlib
PREFIX='/tmp/'
def read(interface, state, label, inp):
from disco import util
for e in inp:
scheme, netloc, _ = util.urlsplit(e)
fileName, joinColumn = str(netloc).split('?')
File = open(PREFIX + fileName, 'r')
col = int(joinColumn)
reader = csv.reader(File)
firstRow = True
for row in reader:
if firstRow:
tableName = row[0]
firstRow = False
else:
fullName = tableName + '?' + str(col)
Hash = int(hashlib.md5(str_to_bytes(row[col])).hexdigest(), 16) % 160
interface.output(Hash).add(fullName, row)
def join_init(interface, params):
return {}
def join(interface, state, label, inp):
for k, v in inp:
if k not in state:
state[k] = [v]
else:
state[k].append(v)
def join_done(interface, state):
if len(state) != 2:
return
name0 = list(state.keys())[0]
name1 = list(state.keys())[1]
_, strCol0 = name0.split('?')
_, strCol1 = name1.split('?')
col0 = int(strCol0)
col1 = int(strCol1)
for entry0 in state[name0]:
for entry1 in state[name1]:
if entry0[col0] == entry1[col1]:
entry0_copy = entry0[:]
entry1_copy = entry1[:]
del entry0_copy[col0]
del entry1_copy[col1]
interface.output(0).add(entry0[col0], entry0_copy + entry1_copy)
def combine_init(interface, params, init):
return init()
def combine(interface, state, label, inp, func):
for k, v in inp:
func(state, k, v)
def combine_done(interface, state):
for k, v in state.items():
interface.output(0).add(k, v)
def _getPipeline():
select_stage = [("split", Stage('read', process=read))]
join_stage = [("group_label", Stage('join', init=join_init, process=join, done=join_done))]
def combine_row(state, k, v, func):
if k not in state:
state[k] = 0
state[k] = state[k] + func(v)
node_combine_stage = [("group_node_label",
Stage('node_combine', init=partial(combine_init, init=lambda: {}),
process=partial(combine, func=partial(combine_row, func=lambda v: 1)),
done=combine_done))]
combine_all_stage = [("group_label",
Stage('combine_all', init=partial(combine_init, init=lambda: {}),
process=partial(combine, func=partial(combine_row, func=lambda v: v)),
done=combine_done))]
return select_stage + join_stage + node_combine_stage + combine_all_stage
class PipeJob(TestPipe):
pipeline = _getPipeline()
class JoinTestCase(TestCase):
#input contains the file name and the join column
input = ['raw://cities.csv?0', 'raw://packages.csv?3']
def SetUpFiles(self):
F1 = open(PREFIX + 'cities.csv', 'w')
F1.write("cities\nEdmonton,-45\nCalgary,-35\nMontreal,-25\nToronto,-15\n")
F1.close()
F2 = open(PREFIX + 'packages.csv', 'w')
F2.write("packages\n0,2013-10-2,2013-11-3,Edmonton,Calgary\n" +
"1,2013-11-3,2013-12-3,Calgary,Toronto\n" +
"2,2013-10-4,2013-10-6,Edmonton,Montreal\n")
F2.close()
def serve(self, path):
return path
def test_per_node(self):
self.SetUpFiles()
self.job = PipeJob().run(input=self.test_server.urls(self.input))
self.assertEqual(sorted(self.results(self.job)), [('Calgary', 1), ('Edmonton', 2)])
| 2.390625 | 2 |
pbtranscript/tasks/TPickles.py | ylipacbio/pbtranscript | 0 | 12762561 | <filename>pbtranscript/tasks/TPickles.py
#!/usr/bin/env python
"""
Class ChunkTask defines info used for chunk tasks ICE, ice_partial
and ice_polish.
Class ChunkTasksPickle reads and writes ChunkTask objects from/to
input/output pickle files.
"""
import cPickle
import os.path as op
from pbcore.io import ContigSet
from pbtranscript.ice.IceFiles import IceFiles
def n_reads_in_contigset(contigset_file):
"""Return number of reads in a contigset"""
cs = ContigSet(contigset_file)
cs.assertIndexed()
return int(cs.numRecords)
def n_reads_in_contigsets(contigset_files):
"""Given a list of contigset files, return number of reads in
these files as a list of ints"""
return [n_reads_in_contigset(f) for f in contigset_files]
class ChunkTask(object):
"""
An instance of class represents a chunk task.
"""
def __init__(self, cluster_bin_index, flnc_file, cluster_out_dir):
self.cluster_bin_index = cluster_bin_index
self.flnc_file = flnc_file
self.cluster_out_dir = cluster_out_dir
self.n_flnc_reads = 0
if op.exists(self.flnc_file):
self.n_flnc_reads = n_reads_in_contigset(self.flnc_file)
#raise IOError("Could not find flnc_file %s" % self.flnc_file)
@property
def consensus_isoforms_file(self):
"""Return output consensus isoform file, cluster_out/output/final.consensus.fasta"""
return IceFiles(root_dir=self.cluster_out_dir, prog_name="", no_log_f=True).final_consensus_fa
@property
def flnc_pickle(self):
"""Return output flnc pickle file, cluster_out/output/final.pickle"""
return IceFiles(root_dir=self.cluster_out_dir, prog_name="", no_log_f=True).final_pickle_fn
def __str__(self):
strs = ["{cls} obj:".format(cls=self.__class__.__name__),
" cluster bin index {i}, ".format(i=self.cluster_bin_index),
" flnc file {f}, ".format(f=self.flnc_file),
" cluster out dir {d},".format(d=self.cluster_out_dir)]
if hasattr(self, 'n_flnc_reads'):
strs.append(" number of flnc reads {n}".format(n=self.n_flnc_reads))
return "\n".join(strs)
def __repr__(self):
return self.__str__()
def __eq__(self, other):
return self.cluster_bin_index == other.cluster_bin_index and \
self.flnc_file == other.flnc_file and \
self.cluster_out_dir == other.cluster_out_dir
class ClusterChunkTask(ChunkTask):
"""
Each instance of class represents a ChunkTask of ICE clustering.
"""
def __init__(self, cluster_bin_index, flnc_file, cluster_out_dir):
super(ClusterChunkTask, self).__init__(cluster_bin_index=cluster_bin_index,
flnc_file=flnc_file,
cluster_out_dir=cluster_out_dir)
@property
def nfl_pickle(self):
"""Return output nfl pickle file, cluster_out/output/nfl.all.partial_uc.pickle
"""
return IceFiles(prog_name="", root_dir=self.cluster_out_dir, no_log_f=True).nfl_all_pickle_fn
class PartialChunkTask(ChunkTask):
"""
Each instance of class represents an ice_partail chunk task.
"""
def __init__(self, cluster_bin_index, flnc_file, cluster_out_dir,
nfl_file, nfl_index, n_nfl_chunks):
super(PartialChunkTask, self).__init__(cluster_bin_index=cluster_bin_index,
flnc_file=flnc_file,
cluster_out_dir=cluster_out_dir)
self.nfl_file = nfl_file
self.nfl_index = int(nfl_index)
self.n_nfl_chunks = int(n_nfl_chunks)
def __str__(self):
return "\n".join([super(PartialChunkTask, self).__str__(),
" nfl file {i}/{n}: {f}, ".format(i=self.nfl_index,
n=self.n_nfl_chunks,
f=self.nfl_file)])
def __eq__(self, other):
return super(PartialChunkTask, self).__eq__(other) and \
self.nfl_index == other.nfl_index and \
self.n_nfl_chunks == other.n_nfl_chunks and \
self.nfl_file == other.nfl_file
@property
def nfl_pickle(self):
"""Return output nfl pickle of the i-th chunk."""
return IceFiles(prog_name="", root_dir=self.cluster_out_dir, no_log_f=True).nfl_pickle_i(self.nfl_index)
class PolishChunkTask(ChunkTask):
"""Class represents an ice_poish (quiver|arrow) chunk task."""
def __init__(self, cluster_bin_index, flnc_file, cluster_out_dir,
polish_index, n_polish_chunks):
"""
ice_polish (quiver|arrow) chunk (index, num_chunks) of a particular bin.
Parameters:
polish_index -- index of this chunked task in all ice_polish tasks
n_polish_chunks -- total number of ice_polish chunks
"""
super(PolishChunkTask, self).__init__(cluster_bin_index=cluster_bin_index,
flnc_file=flnc_file,
cluster_out_dir=cluster_out_dir)
self.polish_index = int(polish_index)
self.n_polish_chunks = int(n_polish_chunks)
assert self.polish_index >= 0
assert self.polish_index < self.n_polish_chunks
@property
def nfl_pickle(self):
"""Return output nfl pickle file, cluster_out/output/nfl.all.partial_uc.pickle
"""
return IceFiles(prog_name="", root_dir=self.cluster_out_dir, no_log_f=True).nfl_all_pickle_fn
def __str__(self):
desc = [super(PolishChunkTask, self).__str__(),
"ice_polish chunk {x}/{n} for cluster bin {y}".
format(x=self.polish_index, n=self.n_polish_chunks,
y=self.cluster_bin_index)]
return "\n".join(desc)
def __eq__(self, other):
return super(PolishChunkTask, self).__eq__(other) and \
self.polish_index == other.polish_index and \
self.n_polish_chunks == other.n_polish_chunks
class ChunkTasksPickle(object):
"""Read and write input/output files used in cluster bins in a pickle."""
def __init__(self, chunk_tasks=None):
if chunk_tasks is None:
chunk_tasks = []
assert isinstance(chunk_tasks, list)
assert all([isinstance(task, ChunkTask) for task in chunk_tasks])
self.chunk_tasks = chunk_tasks
def write(self, out_pickle_file):
"""Write chunk tasks to output pickle."""
with open(out_pickle_file, 'wb') as f:
cPickle.dump(self.chunk_tasks, f)
def sorted_no_redundant_cluster_bins(self):
"""Return a list of unique (cluster_bin_index, cluster_out_dir) tuples
sorted by cluster_bin_index"""
return sorted(list(set([(task.cluster_bin_index, task.cluster_out_dir)
for task in self.chunk_tasks])),
key=lambda x: x[0])
@staticmethod
def read(in_pickle_file):
"""Read an object from a pickle file."""
with open(in_pickle_file, 'rb') as f:
a = cPickle.load(f)
return ChunkTasksPickle(a)
def append(self, chunk_task):
"""Append this chunk_task to self.chunk_tasks."""
self.chunk_tasks.append(chunk_task)
def sorted_by_attr(self, attr, reverse=False):
"""Sort chunk_tasks by attribute attr."""
assert all([hasattr(task, attr) for task in self.chunk_tasks])
self.chunk_tasks = sorted(self.chunk_tasks,
key=lambda x: getattr(x, attr),
reverse=reverse)
@property
def n_flnc_reads_in_bins(self):
"""Return number of flnc reads in each ChunkTask object."""
return n_reads_in_contigsets([task.flnc_file for task in self.chunk_tasks])
def sort_and_group_tasks(self, max_nchunks):
"""
Scatter chunks accorind to # of flnc reads in each chunk and max_nchunks,
return groups where groups[i] contains indices of tasks in the i-th group.
First sort and then group chunk_tasks into no greater than {max_nchunks}
groups so that the total number of flnc reads in each group is roughly
the same.
"""
for t in self.chunk_tasks:
print t
# sort tasks by weight (n of flnc reads in task) reversely
self.sorted_by_attr(attr='n_flnc_reads', reverse=True)
# Create groups where each group contains a list of tasks.
groups = [[] for dummy_i in range(max_nchunks)]
# Simple grouping, first spread heavy tasks as much as possible,
# then assign lighter ones
for i in range(0, min(max_nchunks, self.__len__())):
groups[i].append(i)
# Randomly assign lighter bins
bin_index = min(max_nchunks, self.__len__())
while bin_index < self.__len__():
groups[bin_index % max_nchunks].append(bin_index)
bin_index += 1
# Remove empty groups
groups = [g for g in groups if len(g) > 0]
return groups
def spawn_pickles(self, out_pickle_fns):
"""Create n pickles each containing exactly one ChunkTask obj in list"""
if len(out_pickle_fns) != len(self):
raise ValueError("Num of spawn pickle %s does not match %s ChunkTask objs"
% (len(out_pickle_fns), len(self)))
for task, out_pickle_fn in zip(self.chunk_tasks, out_pickle_fns):
ChunkTasksPickle([task]).write(out_pickle_fn)
def spawn_pickles_by_groups(self, groups, out_pickle_fns):
"""self.chunk_tasks are grouped, create len(groups) pickles according to groups,
where groups[i] contains all chunk task in the i-th group.
"""
assert isinstance(groups, list)
assert all([isinstance(g, list) for g in groups])
all_items = [i for sublist in groups for i in sublist]
assert len(set(all_items)) == len(all_items)
if len(out_pickle_fns) != len(groups):
raise ValueError("Could not create %s spawn pickles from %d groups: %s"
% (len(out_pickle_fns), len(groups), groups))
for group, out_pickle_fn in zip(groups, out_pickle_fns):
ChunkTasksPickle([self.chunk_tasks[b] for b in group]).write(out_pickle_fn)
def __str__(self):
return "{cls} obj containing {n} chunk tasks:\n".\
format(cls=self.__class__.__name__, n=len(self)) + \
"\n".join([str(task) for task in self.chunk_tasks]) + "\n"
def __repr__(self):
return self.__str__()
def __len__(self):
return len(self.chunk_tasks)
def __getitem__(self, index):
return self.chunk_tasks[index]
def __iter__(self):
return self.chunk_tasks.__iter__()
| 2.65625 | 3 |
bitrise_reports/models.py | dotanuki-labs/bitrise-reports | 22 | 12762562 | <gh_stars>10-100
# models.py
from dataclasses import dataclass
from enum import Enum
from typing import Dict
from datetime import datetime
class MachineSize(Enum):
g1small = "standard"
g1medium = "elite"
g1large = "elite-xl"
g2small = "g2.4core"
g2medium = "g2.8core"
g2large = "g2.12core"
class BuildStack(Enum):
linux = "linux"
osx = "macos"
class ExecutionStatus(Enum):
success = 1
error = 2
aborted = 3
@dataclass(frozen=True)
class BuildMachine:
id: str
size: MachineSize
stack: BuildStack
@dataclass(frozen=True)
class BitriseProject:
id: str
slug: str
@dataclass(frozen=True)
class BitriseWorkflow:
id: str
@dataclass(frozen=True)
class BuildMinutes:
queued: int
building: int
total: int
@dataclass(frozen=True)
class BitriseBuild:
project: BitriseProject
machine: BuildMachine
workflow: str
minutes: BuildMinutes
status: ExecutionStatus
head_branch: str = None
@dataclass(frozen=True)
class CrunchedNumbers:
count: int
queued: int
building: int
total: int
successes: int = None
failures: int = None
abortions: int = None
credits: int = None
@dataclass(frozen=True)
class BitriseBreakdown:
name: str
details: Dict
@dataclass(frozen=True)
class EvaluationCriteria:
bitrise_app: str
starting_at: datetime
ending_at: datetime
| 2.4375 | 2 |
0x06-python-classes/0-square.py | Dev-Loup/holbertonschool-higher_level_programming | 0 | 12762563 | <filename>0x06-python-classes/0-square.py
#!/usr/bin/python3
"""Square class creation
"""
class Square:
"""Bypass attributes or methods declaration
"""
pass
| 1.6875 | 2 |
moon/vehicles/rover.py | robotika/osgar | 12 | 12762564 | <reponame>robotika/osgar
"""
Moon Rover Driver
"""
# source: (limited access)
# https://gitlab.com/scheducation/srcp2-competitors/-/wikis/Documentation/API/Simulation_API
# Motor Drive Command Topics
# /name/fl_wheel_controller/command
# /name/fr_wheel_controller/command
# /name/bl_wheel_controller/command
# /name/br_wheel_controller/command
# Steering Arm Control Topics
# /name/fr_steering_arm_controller/command
# /name/fl_steering_arm_controller/command
# /name/bl_steering_arm_controller/command
# /name/br_steering_arm_controller/command
# Info
# /name/joint_states sensor_msgs/JointStates
# /name/skid_cmd_vel geometry_msgs/Twist
# /name/get_true_pose
# Sensors
# /name/laser/scan sensor_msgs/LaserScan
# /name/camera/<side>/image_raw sensor_msgs/Image
# /name/imu sensor_msgs/Imu
# /name/joint_states sensor_msgs/JointStates
# This is a message that holds data to describe the state of a set of torque controlled joints.
#
# The state of each joint (revolute or prismatic) is defined by:
# * the position of the joint (rad or m),
# * the velocity of the joint (rad/s or m/s) and
# * the effort that is applied in the joint (Nm or N).
#
# Each joint is uniquely identified by its name
# The header specifies the time at which the joint states were recorded. All the joint states
# in one message have to be recorded at the same time.
# sensor_joint
# bl_arm_joint
# bl_steering_arm_joint
# bl_wheel_joint
# br_arm_joint
# br_steering_arm_joint
# br_wheel_joint
# fl_arm_joint
# fl_steering_arm_joint
# fl_wheel_joint
# fr_arm_joint
# fr_steering_arm_joint
# fr_wheel_joint
# Sensor Joint Controller
# /name/sensor_controller/command
import math
from datetime import timedelta
from osgar.lib.mathex import normalizeAnglePIPI
from moon.moonnode import MoonNode, WHEEL_RADIUS, WHEEL_SEPARATION_WIDTH, WHEEL_SEPARATION_LENGTH
from moon.motorpid import MotorPID
from moon.odometry import Odometry
WHEEL_NAMES = ['fl', 'fr', 'bl', 'br']
CRAB_ROLL_ANGLE = 0.78
class Rover(MoonNode):
def __init__(self, config, bus):
super().__init__(config, bus)
bus.register('cmd', 'odo_pose', 'desired_speeds')
# general driving parameters
# radius: radius of circle to drive around, "inf" if going straight; 0 if turning round in place
# positive if turning left, negative if turning right
# camera_angle: direction of camera vs tangent to the circle; ie 0 if looking and going straight or doing regular turn; positive it looking sideways to the left; negative if looking sideways to the right
# when turning in place, positive speed turns counterclockwise, negative clockwise
self.drive_radius = float("inf") # in degrees * 100
self.drive_camera_angle = 0 # in degrees * 100
self.drive_speed = 0 # -1000 to 1000 (0.. stop, 1000 maximum feasible speed given the type of motion)
self.joint_name = None # updated via Node.update()
self.debug_arr = []
self.verbose = False
self.prev_position = None
self.odom = Odometry()
self.roll = 0.0
self.pitch = 0.0
self.yaw = 0.0
self.yaw_offset = None
self.in_driving_recovery = False
self.steering_wait_start = None
self.steering_wait_repeat = None
self.steering_angle = 0.0
self.motor_pid = [MotorPID(p=40.0) for __ in WHEEL_NAMES] # TODO tune PID params
def on_driving_recovery(self, data):
self.in_driving_recovery = data
def on_desired_speed(self, data):
# self.desired_linear_speed, self.desired_angular_speed = data[0]/1000.0, math.radians(data[1]/100.0)
linear, angular = data # legacy: mutually exclusive, either linear goes straigth or angular turns in place; both 0 is stop
if linear == 0 and angular == 0:
self.drive_radius = self.drive_speed = 0
elif angular != 0:
self.drive_radius = 0 # turn in place
self.drive_speed = 1000 * angular / (100 * 60) # max angular speed is 60 deg/sec, value provided in 100 multiple
else: # linear is non-zero
self.drive_radius = float("inf") # going straight
self.drive_speed = linear
self.drive_camera_angle = 0 # only 0, positive (looking left 90 degrees when going forward) and negative (right) are supported
def on_desired_movement(self, data):
# rover will go forward in a circle given:
# circle radius (m) (use float("inf") to go straight)
# angle between the center of the circle and the direction of the camera (angle in 100*degrees, positive if center to the left of the camera); NOTE: currently only the sign matters and will result in looking left and right 90 degrees respectively
# drive_speed: linear speed in 1000* m/s
self.drive_radius, self.drive_camera_angle, self.drive_speed = data
def on_rot(self, data):
rot = data
(temp_yaw, self.pitch, self.roll) = [normalizeAnglePIPI(math.radians(x/100)) for x in rot]
if self.yaw_offset is None:
self.yaw_offset = -temp_yaw
self.yaw = temp_yaw + self.yaw_offset
#print ("yaw: %f, pitch: %f, roll: %f" % (self.yaw, self.pitch, self.roll))
def on_joint_position(self, data):
# TODO: this only works for going straight, possibly with turning
# this does not work at all for other types of moves such as going sideways
assert self.joint_name is not None
left_wheel_angle = b'fl_steering_arm_joint'
right_wheel_angle = b'fr_steering_arm_joint'
self.steering_angle = (data[self.joint_name.index(left_wheel_angle)] + data[self.joint_name.index(right_wheel_angle)]) / 2.0
self.odom.update_joint_position(self.joint_name, data)
x, y, heading = self.odom.pose2d
self.bus.publish('odo_pose', [round(x * 1000),
round(y * 1000),
round(math.degrees(heading) * 100)])
self.prev_position = data # TODO remove dependency
def on_joint_velocity(self, data):
assert self.joint_name is not None
speed = []
for i, wheel in enumerate(WHEEL_NAMES): # cycle through fl, fr, bl, br
s = WHEEL_RADIUS * data[self.joint_name.index(bytes(wheel, 'ascii') + b'_wheel_joint')]
speed.append(s)
self.motor_pid[i].update(s)
if self.verbose:
self.debug_arr.append([self.time.total_seconds(),] + speed)
def on_joint_effort(self, data):
assert self.joint_name is not None
steering, effort = self.get_steering_and_effort()
##### integrate PID start #####
for i, e in enumerate(effort):
self.motor_pid[i].set_desired_speed(e/40) # TODO review values 0, 40, 60, 120
effort = []
for m in self.motor_pid:
effort.append(m.get_effort())
###### integrate PID end ######
cmd = b'cmd_rover %f %f %f %f %f %f %f %f' % tuple(steering + effort)
self.bus.publish('cmd', cmd)
def get_steering_and_effort(self):
movement_type = 'none'
steering = [0.0,] * 4
if self.drive_speed == 0:
effort = [0,] * 4
elif self.drive_radius == 0:
# turning in place if radius is 0 but speed is non-zero
e = 40 * self.drive_speed / 1000.0
movement_type = 'angular'
effort = [-e, e, -e, e]
steering = [-CRAB_ROLL_ANGLE,CRAB_ROLL_ANGLE,CRAB_ROLL_ANGLE,-CRAB_ROLL_ANGLE]
else:
movement_type = 'linear'
# TODO: if large change of 'steering' values, allow time to apply before turning on 'effort'
fl = fr = rl = rr = 0.0
e = 80 * self.drive_speed / 1000.0
effort = [e, e, e, e]
if not math.isinf(self.drive_radius):
sign = 1 if self.drive_radius > 0 else -1
signed_width = -math.copysign(WHEEL_SEPARATION_WIDTH/2.0, self.drive_radius)
fl = sign * WHEEL_SEPARATION_LENGTH / (abs(self.drive_radius) + signed_width) # + if outer
fr = sign * WHEEL_SEPARATION_LENGTH / (abs(self.drive_radius) - signed_width)
rl = sign * -WHEEL_SEPARATION_LENGTH / (abs(self.drive_radius) + signed_width)
rr = sign * -WHEEL_SEPARATION_LENGTH / (abs(self.drive_radius) - signed_width)
if self.drive_camera_angle == 0:
pass
elif self.drive_camera_angle == 9000:
if self.drive_radius > 0:
temp = rr
rr = -math.pi/2 + fr
fr = -math.pi/2 + fl
fl = math.pi/2 + rl
rl = math.pi/2 + temp
effort = [-e, e, -e, e]
else:
temp = rr
rr = math.pi/2 + fr
fr = math.pi/2 + fl
fl = -math.pi/2 + rl
rl = -math.pi/2 + temp
effort = [e, -e, e, -e]
elif self.drive_camera_angle == -9000:
if self.drive_radius > 0:
temp = rr
rr = math.pi/2 + rl
rl = -math.pi/2 + fl
fl = -math.pi/2 + fr
fr = math.pi/2 + temp
effort = [-e, e, -e, e]
else:
temp = rr
rr = -math.pi/2 + rl
rl = math.pi/2 + fl
fl = math.pi/2 + fr
fr = -math.pi/2 + temp
effort = [e, -e, e, -e]
else:
assert False, "Unsupported angle: " + str(self.drive_camera_angle)
else: # if driving straight but camera at an angle, point all wheels in the same direction for crab movement
angle = math.radians(self.drive_camera_angle / 100.0)
rr = fr = fl = rl = angle
steering = [fl, fr, rl, rr]
WAIT_TO_STEER_MS = 8000
# stay put while joint angles are catching up
if (
self.drive_camera_angle != 0 and
self.prev_position is not None and
not self.in_driving_recovery
):
if (
abs(self.prev_position[self.joint_name.index(b'bl_steering_arm_joint')] - steering[2]) > 0.2 or
abs(self.prev_position[self.joint_name.index(b'br_steering_arm_joint')] - steering[3]) > 0.2 or
abs(self.prev_position[self.joint_name.index(b'fl_steering_arm_joint')] - steering[0]) > 0.2 or
abs(self.prev_position[self.joint_name.index(b'fr_steering_arm_joint')] - steering[1]) > 0.2
):
if (
(self.steering_wait_start is None or self.sim_time - self.steering_wait_start <= timedelta(milliseconds=WAIT_TO_STEER_MS)) and
(self.steering_wait_repeat is None or self.sim_time - self.steering_wait_repeat > timedelta(milliseconds=1000))
):
if self.steering_wait_start is None:
self.steering_wait_start = self.sim_time
# brake while steering angles are changing so that the robot doesn't roll away while wheels turning meanwhile
# use braking force 20 Nm/rad which should prevent sliding but can be overcome by motor effort
effort = [0.0,]*4
else:
pass # angles are not reached but attempt timed out, do not change 'effort'
else: # angles are reached
if self.steering_wait_start is not None:
self.steering_wait_start = None # angles were reached successfully, can wait any time again
self.steering_wait_repeat = None # angles were reached successfully, can wait any time again
# if attempt to steer without effort timed out, brakes off, start instant repeat prevention timer
if self.steering_wait_start is not None and self.sim_time - self.steering_wait_start > timedelta(milliseconds=WAIT_TO_STEER_MS):
self.steering_wait_start = None
self.steering_wait_repeat = self.sim_time
effort_sum = sum([abs(x) for x in effort])
self.bus.publish('desired_speeds', [
self.drive_speed / 1000.0 if effort_sum > 0 and movement_type == 'linear' else 0.0,
math.copysign(math.radians(30), self.drive_speed) if effort_sum > 0 and movement_type == 'angular' else 0.0,
self.steering_angle
])
return steering, effort
def draw(self):
# for debugging
import matplotlib.pyplot as plt
arr = self.debug_arr
t = [a[0] for a in arr]
values = [a[1:] for a in arr]
line = plt.plot(t, values, '-', linewidth=2)
plt.xlabel('time (s)')
plt.legend(WHEEL_NAMES)
plt.show()
# vim: expandtab sw=4 ts=4
| 2.5 | 2 |
src/pattern_pascal_triangle1/main.py | pranshuag9/my-hackerblocks-codes | 0 | 12762565 | '''
@author: <NAME>
@problem: https://hack.codingblocks.com/app/practice/1/285/problem
Algorithm to Generate(arr, n):
for row:=0 to n step by 1,
for col:=0 to row + 1 step by 1,
Set arr[row][col] = 1 if column is 0 or equals row
Set arr[row][col] = (Sum of Diagonally Previous element and Upper previous element) if row > 1 and col > 0
Algorithm to Print(n):
for row:=0 to n step by 1
for col:=0 to (n - row) step by 1, Print(" ")
for col:=0 to (row + 1) step by 1,
for i:=0 to (4 - number of digits of n) step by 1, Print(" ")
Print(arr[row][col])
Print(newline)
'''
def generate_pascal_triangle(n):
arr = [[0 for _ in range(0, n)] for _ in range(0, n)]
arr[0][1] = 1
for row in range(0, n):
for col in range(0, row + 1):
if col == 0 or col == row: arr[row][col] = 1
if row > 1 and col > 0: arr[row][col] = arr[row - 1][col - 1] + arr[row - 1][col]
return arr
def pascal_triangle1(n):
if n == 1 or n == 0:
print(1)
return
arr = generate_pascal_triangle(n)
for row in range(0, n):
for col in range(0, n - row): print(" ", end='')
for col in range(0, row + 1):
for _ in range(0, 4 - len(str(arr[row][col]))): print(" ", end='')
print(arr[row][col], end='')
print()
if __name__ == "__main__":
n = int(input().strip())
pascal_triangle1(n) | 3.765625 | 4 |
foundation/www/service_providers.py | prafful1234/foundation | 59 | 12762566 | import frappe
no_cache = 1
def get_context(context):
context.form_dict = frappe.form_dict
context.title = 'Service Providers'
context.gold_members = []
if frappe.form_dict.country:
context.parents = [dict(label='All Service Providers',
route='service-providers', title='All Service Providers')]
filters = dict()
filters['show_in_website'] = 1
if frappe.form_dict.country:
filters['country'] = frappe.form_dict.country
gold_members = [d.name for d in frappe.get_all('Member', dict(membership_type='Gold'))]
if gold_members:
filters['member'] = ('in', gold_members)
context.gold_members = frappe.get_all('Service Provider',
'title, introduction, `image`, route, website_url, country', filters)
if context.gold_members:
context.has_gold_member = 1
else:
context.gold_members.append(dict(
title='Your Company',
introduction='Become a Gold Member today and get your company featured here',
image='/assets/foundation/img/gold.png',
route='/members',
placeholder=True
))
context.silver_members = []
silver_members = [d.name for d in frappe.get_all('Member', dict(membership_type='Silver'))]
if silver_members:
filters['member'] = ('in', silver_members)
context.silver_members = frappe.get_all('Service Provider',
'title, introduction, `image`, route, website_url, country', filters)
if context.silver_members:
context.has_silver_member = 1
else:
context.silver_members.append(dict(
title='Your Company',
introduction='Become a silver Member today and get your company featured here',
image='/assets/foundation/img/silver.png',
route='/members',
placeholder=True
))
context.individual_members = []
individual_members = [d.name for d in frappe.get_all('Member',
dict(membership_type='Individual'))]
if individual_members:
filters['member'] = ('in', individual_members)
context.individual_members = frappe.get_all('Service Provider',
'title, introduction, `image`, route, website_url, country', filters)
if context.individual_members:
context.has_individual_member = 1
else:
context.individual_members.append(dict(
title='Your Company',
introduction='Become an invidual member to list here',
route='/members'
))
| 2.046875 | 2 |
scale_client/sensors/community_seismic_network/virtual_csn_server/import_fixer.py | prav33nv/scale_client | 3 | 12762567 | <reponame>prav33nv/scale_client
# coding=utf-8
"""
Enables protocol buffer support despite conflicting root package.
Notes
-----
Taken from App Engine Python Google Group. View the `original thread`_.
.. _original thread: https://groups.google.com/forum/
?fromgroups=#!topic/google-appengine/25EO1Arklfw
"""
import os
import sys
BASE_PACKAGE = 'google'
def FixImports(*packages):
topdir = os.path.dirname(__file__)
def ImportPackage(full_package):
"""Import a fully qualified package."""
imported_module = __import__(full_package, globals(), locals())
# Check if the override path already exists for the module; if it does,
# that means we've already fixed imports.
original_module = sys.modules[full_package]
lib_path = os.path.join(topdir, full_package.replace('.', '/'))
if lib_path not in original_module.__path__:
# Insert after runtime path, but before anything else
original_module.__path__.insert(1, lib_path)
ImportPackage(BASE_PACKAGE)
for package in packages:
# For each package, we need to import all of its parent packages.
dirs = package.split('.')
full_package = BASE_PACKAGE
for my_dir in dirs:
full_package = '%s.%s' % (full_package, my_dir)
ImportPackage(full_package)
| 2.15625 | 2 |
demisto_sdk/commands/create_artifacts/tests/content_artifacts_creator_test.py | sturmianseq/demisto-sdk | 42 | 12762568 | from contextlib import contextmanager
from filecmp import cmp, dircmp
from pathlib import Path
from shutil import copyfile, copytree, rmtree
import pytest
from demisto_sdk.commands.common.constants import PACKS_DIR, TEST_PLAYBOOKS_DIR
from demisto_sdk.commands.common.logger import logging_setup
from demisto_sdk.commands.common.tools import src_root
from TestSuite.test_tools import ChangeCWD
TEST_DATA = src_root() / 'tests' / 'test_files'
TEST_CONTENT_REPO = TEST_DATA / 'content_slim'
TEST_PRIVATE_CONTENT_REPO = TEST_DATA / 'private_content_slim'
UNIT_TEST_DATA = (src_root() / 'commands' / 'create_artifacts' / 'tests' / 'data')
COMMON_SERVER = UNIT_TEST_DATA / 'common_server'
ARTIFACTS_EXPECTED_RESULTS = TEST_DATA / 'artifacts'
def same_folders(src1, src2):
"""Assert if folder contains different files"""
dcmp = dircmp(src1, src2)
if dcmp.left_only or dcmp.right_only:
return False
for sub_dcmp in dcmp.subdirs.values():
same_folders(sub_dcmp.left, sub_dcmp.right)
return True
@contextmanager
def destroy_by_ext(suffix: str):
"""Modify file content to invalid by file extension - json/yaml.
Open:
- Choose file by file extension.
- Modify file content to not valid.
Close:
- Modify content to the original state.
"""
if suffix == 'json':
file = TEST_CONTENT_REPO / "Packs" / "Sample01" / "Classifiers" / "classifier-sample_new.json"
else:
file = TEST_CONTENT_REPO / "Packs" / "Sample01" / "TestPlaybooks" / "playbook-sample_test1.yml"
old_data = file.read_text()
file.write_text("{123dfdsf,}\nfdsfdsf")
try:
yield
finally:
file.write_text(old_data)
@contextmanager
def duplicate_file():
"""Create duplicate file name in content repository.
Open:
- Create copy of file in content.
Close:
- Delete copied file.
"""
file = TEST_CONTENT_REPO / PACKS_DIR / "Sample01" / TEST_PLAYBOOKS_DIR / "playbook-sample_test1.yml"
new_file = TEST_CONTENT_REPO / PACKS_DIR / "Sample02" / TEST_PLAYBOOKS_DIR / "playbook-sample_test1.yml"
try:
copyfile(file, new_file)
yield
finally:
new_file.unlink()
@contextmanager
def temp_dir():
"""Create Temp directory for test.
Open:
- Create temp directory.
Close:
- Delete temp directory.
"""
temp = UNIT_TEST_DATA / 'temp'
try:
temp.mkdir(parents=True, exist_ok=True)
yield temp
finally:
rmtree(temp)
@pytest.fixture()
def mock_git(mocker):
"""Mock git Repo object"""
from demisto_sdk.commands.common.content import Content
# Mock git working directory
mocker.patch.object(Content, 'git')
Content.git().working_tree_dir = TEST_CONTENT_REPO
yield
@pytest.fixture()
def private_repo():
"""Create Temp private repo structure from original content structure.
Open:
- Create a copy of regular content.
- Delete - content/TestPlaybooks dir.
Close:
- Delete private content folder.
"""
try:
copytree(TEST_CONTENT_REPO, TEST_PRIVATE_CONTENT_REPO)
test_playbook_dir = TEST_PRIVATE_CONTENT_REPO / TEST_PLAYBOOKS_DIR
rmtree(test_playbook_dir)
yield TEST_PRIVATE_CONTENT_REPO
finally:
rmtree(TEST_PRIVATE_CONTENT_REPO)
def test_modify_common_server_constants():
""" Modify global variables in CommonServerPython.py
When: CommonServerPython.py contains:
- Global variable - CONTENT_RELEASE_VERSION = '0.0.0'
- Global variable - CONTENT_BRANCH_NAME = ''
Given: Parameters:
- Content version x.x.x
- Active branch - xxxx
Then: CommonServerPython.py changes:
- Global variable - CONTENT_RELEASE_VERSION = 'x.x.x'
- Global variable - CONTENT_BRANCH_NAME = 'xxxx'
Notes:
- After test clean up changes.
"""
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import \
modify_common_server_constants
path_before = COMMON_SERVER / 'CommonServerPython.py'
path_excepted = COMMON_SERVER / 'CommonServerPython_modified.py'
old_data = path_before.read_text()
modify_common_server_constants(path_before, '6.0.0', 'test')
assert cmp(path_before, path_excepted)
path_before.write_text(old_data)
def test_dump_pack(mock_git):
import demisto_sdk.commands.create_artifacts.content_artifacts_creator as cca
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (
ArtifactsManager, Pack, create_dirs, dump_pack)
cca.logger = logging_setup(0)
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp,
content_version='6.0.0',
zip=False,
suffix='',
cpus=1,
packs=False)
create_dirs(artifact_manager=config)
dump_pack(artifact_manager=config, pack=Pack(TEST_CONTENT_REPO / PACKS_DIR / 'Sample01'))
assert same_folders(src1=temp / 'content_packs' / 'Sample01',
src2=ARTIFACTS_EXPECTED_RESULTS / 'content' / 'content_packs' / 'Sample01')
def test_create_content_artifacts(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import \
ArtifactsManager
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp,
content_version='6.0.0',
zip=False,
suffix='',
cpus=1,
packs=False)
exit_code = config.create_content_artifacts()
assert exit_code == 0
assert same_folders(temp, ARTIFACTS_EXPECTED_RESULTS / 'content')
def test_create_private_content_artifacts(private_repo):
from demisto_sdk.commands.common.content import Content
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import \
ArtifactsManager
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp,
content_version='6.0.0',
zip=False,
suffix='',
cpus=1,
packs=False)
config.content = Content(private_repo)
config.packs = config.content.packs
exit_code = config.create_content_artifacts()
assert same_folders(temp, ARTIFACTS_EXPECTED_RESULTS / 'private')
assert exit_code == 0
@pytest.mark.parametrize(argnames="suffix", argvalues=["yml", "json"])
def test_malformed_file_failure(suffix: str, mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import \
ArtifactsManager
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp,
content_version='6.0.0',
zip=False,
suffix='',
cpus=1,
packs=False)
with destroy_by_ext(suffix):
exit_code = config.create_content_artifacts()
assert exit_code == 1
def test_duplicate_file_failure(mock_git):
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import \
ArtifactsManager
with temp_dir() as temp:
config = ArtifactsManager(artifacts_path=temp,
content_version='6.0.0',
zip=False,
suffix='',
cpus=1,
packs=False)
with duplicate_file():
exit_code = config.create_content_artifacts()
assert exit_code == 1
@pytest.mark.parametrize('key, tool', [('some_key', False), ('', True)])
def test_sign_packs_failure(repo, capsys, key, tool):
"""
When:
- Signing a pack.
Given:
- Pack object.
- Signature key without the signing tool, or vice-versa.
Then:
- Verify that exceptions are written to the logger.
"""
import demisto_sdk.commands.create_artifacts.content_artifacts_creator as cca
from demisto_sdk.commands.create_artifacts.content_artifacts_creator import (
ArtifactsManager, sign_packs)
cca.logger = logging_setup(2)
with ChangeCWD(repo.path):
with temp_dir() as temp:
artifact_manager = ArtifactsManager(artifacts_path=temp,
content_version='6.0.0',
zip=False,
suffix='',
cpus=1,
packs=True,
signature_key=key)
if tool:
with open('./tool', 'w') as tool_file:
tool_file.write('some tool')
artifact_manager.signDirectory = Path(temp / 'tool')
sign_packs(artifact_manager)
captured = capsys.readouterr()
assert 'Failed to sign packs. In order to do so, you need to provide both signature_key and ' \
'sign_directory arguments.' in captured.out
| 2.15625 | 2 |
tests/models/test_color666_pauli.py | dua-arpit/qecsim | 35 | 12762569 | <filename>tests/models/test_color666_pauli.py
import pytest
from qecsim import paulitools as pt
from qecsim.models.color import Color666Code
@pytest.mark.parametrize('size', [
3, 5, 7, 9
])
def test_color666_pauli_properties(size):
lattice = Color666Code(size)
pauli = lattice.new_pauli()
assert pauli.code == lattice
assert isinstance(repr(pauli), str)
assert isinstance(str(pauli), str)
@pytest.mark.parametrize('pauli', [
Color666Code(5).new_pauli(),
Color666Code(5).new_pauli().plaquette('X', (2, 0)).plaquette('Z', (5, 3)),
Color666Code(5).new_pauli().logical_x().plaquette('X', (6, 5)).plaquette('Y', (4, 1)),
Color666Code(5).new_pauli().logical_z().plaquette('Z', (1, 1)).plaquette('Z', (6, 2)),
Color666Code(5).new_pauli().logical_x().plaquette('X', (5, 3)).plaquette('X', (4, 4)),
Color666Code(5).new_pauli().logical_z().plaquette('Z', (4, 1)).plaquette('Z', (3, 2)),
])
def test_color666_pauli_new_to_bsf(pauli):
assert pauli.code.new_pauli(pauli.to_bsf()) == pauli, (
'Conversion to_bsf+from_bsf does not result in equality.')
@pytest.mark.parametrize('pauli', [
Color666Code(5).new_pauli(),
Color666Code(5).new_pauli().plaquette('X', (2, 0)).plaquette('Z', (5, 3)),
Color666Code(5).new_pauli().logical_x().plaquette('X', (6, 5)).plaquette('Y', (4, 1)),
Color666Code(5).new_pauli().logical_z().plaquette('Z', (1, 1)).plaquette('Z', (6, 2)),
Color666Code(5).new_pauli().logical_x().plaquette('X', (5, 3)).plaquette('X', (4, 4)),
Color666Code(5).new_pauli().logical_z().plaquette('Z', (4, 1)).plaquette('Z', (3, 2)),
])
def test_color666_pauli_copy(pauli):
copy_pauli = pauli.copy()
assert copy_pauli == pauli, 'Copy Pauli does not equal original Pauli'
assert copy_pauli.logical_x() != pauli, 'Modified copy Pauli still equals original Pauli'
@pytest.mark.parametrize('pauli, index, expected', [
(Color666Code(5).new_pauli(), (0, 0), 'I'),
(Color666Code(5).new_pauli().site('X', (2, 2)), (2, 2), 'X'),
(Color666Code(5).new_pauli().site('Y', (5, 5)), (5, 5), 'Y'),
(Color666Code(5).new_pauli().site('Z', (4, 3)), (4, 3), 'Z'),
(Color666Code(5).new_pauli().site('X', (2, 2)), (1, 0), 'I'),
(Color666Code(5).new_pauli().site('Y', (5, 5)), (6, 4), 'I'),
(Color666Code(5).new_pauli().site('Z', (4, 3)), (3, 1), 'I'),
])
def test_color666_pauli_operator(pauli, index, expected):
assert pauli.operator(index) == expected
@pytest.mark.parametrize('size, index', [
(5, (2, 0)), # not a site index
(5, (3, 2)), # not a site index
(5, (-1, -1)), # out of bounds
(5, (2, -1)), # out of bounds
(5, (7, 0)), # out of bounds
(5, (6, 7)), # out of bounds
(5, (0, -1)), # out of bounds and not a site index
])
def test_color666_pauli_operator_invalid_index(size, index):
pauli = Color666Code(size).new_pauli()
with pytest.raises(IndexError):
pauli.operator(index)
@pytest.mark.parametrize('pauli, op_counts, message', [
(Color666Code(5).new_pauli().site('I', (2, 2)),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'site failed.'),
(Color666Code(5).new_pauli().site('X', (2, 2)),
{'I': 18, 'X': 1, 'Y': 0, 'Z': 0}, 'site failed.'),
(Color666Code(5).new_pauli().site('Y', (2, 2)),
{'I': 18, 'X': 0, 'Y': 1, 'Z': 0}, 'site failed.'),
(Color666Code(5).new_pauli().site('Z', (2, 2)),
{'I': 18, 'X': 0, 'Y': 0, 'Z': 1}, 'site failed.'),
(Color666Code(5).new_pauli().site('X', (2, 2)).site('X', (2, 2)),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'site self-inverse failed.'),
(Color666Code(5).new_pauli().site('Y', (2, 2)).site('Y', (2, 2)),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'site self-inverse failed.'),
(Color666Code(5).new_pauli().site('Z', (2, 2)).site('Z', (2, 2)),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'site self-inverse failed.'),
(Color666Code(5).new_pauli().site('X', (2, 2)).site('Z', (2, 2)),
{'I': 18, 'X': 0, 'Y': 1, 'Z': 0}, 'site X+Z=Y failed.'),
(Color666Code(5).new_pauli().site('X', (2, 2)).site('Y', (2, 2)),
{'I': 18, 'X': 0, 'Y': 0, 'Z': 1}, 'site X+Y=Z failed.'),
(Color666Code(5).new_pauli().site('Y', (2, 2)).site('Z', (2, 2)),
{'I': 18, 'X': 1, 'Y': 0, 'Z': 0}, 'site Y+Z=X failed.'),
(Color666Code(5).new_pauli().site('X', (2, 2)).site('X', (4, 2)),
{'I': 17, 'X': 2, 'Y': 0, 'Z': 0}, 'multi-site failed.'),
(Color666Code(5).new_pauli().site('Y', (2, 2)).site('Y', (4, 2)),
{'I': 17, 'X': 0, 'Y': 2, 'Z': 0}, 'multi-site failed.'),
(Color666Code(5).new_pauli().site('Z', (2, 2)).site('Z', (4, 2)),
{'I': 17, 'X': 0, 'Y': 0, 'Z': 2}, 'multi-site failed.'),
(Color666Code(5).new_pauli().site('X', (0, -2)),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'site outside lattice failed.'),
(Color666Code(5).new_pauli().site('X', (0, 1)),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'site outside lattice failed.'),
(Color666Code(5).new_pauli().site('X', (7, 0)),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'site outside lattice failed.'),
(Color666Code(5).new_pauli().site('X', (3, 4)),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'site outside lattice failed.'),
])
def test_color666_pauli_site(pauli, op_counts, message):
pauli = pt.bsf_to_pauli(pauli.to_bsf())
for op, count in op_counts.items():
assert pauli.count(op) == count, message
@pytest.mark.parametrize('pauli, op_counts, message', [
(Color666Code(5).new_pauli().site('I', (2, 2), (4, 2)),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'sites failed.'),
(Color666Code(5).new_pauli().site('X', (2, 2), (4, 2)),
{'I': 17, 'X': 2, 'Y': 0, 'Z': 0}, 'sites failed.'),
(Color666Code(5).new_pauli().site('Y', (2, 2), (4, 2)),
{'I': 17, 'X': 0, 'Y': 2, 'Z': 0}, 'sites failed.'),
(Color666Code(5).new_pauli().site('Z', (2, 2), (4, 2)),
{'I': 17, 'X': 0, 'Y': 0, 'Z': 2}, 'sites failed.'),
(Color666Code(5).new_pauli().site('X', (2, 2), (2, 2)),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'sites self-inverse failed.'),
(Color666Code(5).new_pauli().site('Y', (2, 2), (2, 2)),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'sites self-inverse failed.'),
(Color666Code(5).new_pauli().site('Z', (2, 2), (2, 2)),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'sites self-inverse failed.'),
])
def test_color666_pauli_sites(pauli, op_counts, message):
pauli = pt.bsf_to_pauli(pauli.to_bsf())
for op, count in op_counts.items():
assert pauli.count(op) == count, message
@pytest.mark.parametrize('size, operator, index', [
(5, 'Z', (1, 1)), # not a site index
(5, 'X', (5, 3)), # not a site index
])
def test_color666_pauli_site_invalid_index(size, operator, index):
pauli = Color666Code(size).new_pauli()
with pytest.raises(IndexError):
pauli.site(operator, index)
@pytest.mark.parametrize('pauli, op_counts, message', [
(Color666Code(5).new_pauli().plaquette('X', (3, 2)),
{'I': 13, 'X': 6, 'Y': 0, 'Z': 0}, 'X plaquette failed.'),
(Color666Code(5).new_pauli().plaquette('X', (3, 2)).plaquette('X', (3, 2)),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'X plaquette self-inverse failed.'),
(Color666Code(5).new_pauli().plaquette('X', (3, 2)).plaquette('X', (5, 3)),
{'I': 11, 'X': 8, 'Y': 0, 'Z': 0}, 'X adjacent plaquettes failed.'),
(Color666Code(5).new_pauli().plaquette('X', (2, 0)),
{'I': 15, 'X': 4, 'Y': 0, 'Z': 0}, 'X boundary plaquette failed.'),
(Color666Code(5).new_pauli().plaquette('X', (4, -2)),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'X outside lattice plaquette failed.'),
(Color666Code(5).new_pauli().plaquette('Z', (3, 2)),
{'I': 13, 'X': 0, 'Y': 0, 'Z': 6}, 'Z plaquette failed.'),
(Color666Code(5).new_pauli().plaquette('Z', (3, 2)).plaquette('Z', (3, 2)),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'Z plaquette self-inverse failed.'),
(Color666Code(5).new_pauli().plaquette('Z', (3, 2)).plaquette('Z', (5, 3)),
{'I': 11, 'X': 0, 'Y': 0, 'Z': 8}, 'Z adjacent plaquettes failed.'),
(Color666Code(5).new_pauli().plaquette('Z', (2, 0)),
{'I': 15, 'X': 0, 'Y': 0, 'Z': 4}, 'Z boundary plaquette failed.'),
(Color666Code(5).new_pauli().plaquette('Z', (4, -2)),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'Z outside lattice plaquette failed.'),
(Color666Code(5).new_pauli().plaquette('X', (3, 2)).plaquette('Z', (3, 2)),
{'I': 13, 'X': 0, 'Y': 6, 'Z': 0}, 'X+Z plaquette failed.'),
(Color666Code(5).new_pauli().plaquette('X', (3, 2)).plaquette('Z', (5, 3)),
{'I': 9, 'X': 4, 'Y': 2, 'Z': 4}, 'X+Z adjacent plaquettes failed.'),
])
def test_color666_pauli_plaquette(pauli, op_counts, message):
pauli = pt.bsf_to_pauli(pauli.to_bsf())
for op, count in op_counts.items():
assert pauli.count(op) == count, message
@pytest.mark.parametrize('size, operator, index', [
(5, 'X', (0, 0)), # not a plaquette index
(5, 'Z', (5, 2)), # not a plaquette index
])
def test_color666_pauli_invalid_plaquette(size, operator, index):
pauli = Color666Code(size).new_pauli()
with pytest.raises(IndexError):
pauli.plaquette(operator, index)
@pytest.mark.parametrize('pauli, op_counts, message', [
(Color666Code(5).new_pauli().logical_x(),
{'I': 14, 'X': 5, 'Y': 0, 'Z': 0}, 'logical_x failed.'),
(Color666Code(5).new_pauli().logical_x().logical_x(),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'logical_x self-inverse failed.'),
(Color666Code(5).new_pauli().logical_z(),
{'I': 14, 'X': 0, 'Y': 0, 'Z': 5}, 'logical_z failed.'),
(Color666Code(5).new_pauli().logical_z().logical_z(),
{'I': 19, 'X': 0, 'Y': 0, 'Z': 0}, 'logical_z self-inverse failed.'),
(Color666Code(5).new_pauli().logical_x().logical_z(),
{'I': 14, 'X': 0, 'Y': 5, 'Z': 0}, 'logical_x_z failed.'),
])
def test_color666_pauli_logical(pauli, op_counts, message):
pauli = pt.bsf_to_pauli(pauli.to_bsf())
for op, count in op_counts.items():
assert pauli.count(op) == count, message
@pytest.mark.parametrize('pauli_1, pauli_2', [
(Color666Code(5).new_pauli(), Color666Code(5).new_pauli()),
(Color666Code(5).new_pauli().plaquette('X', (3, 2)), Color666Code(5).new_pauli().plaquette('X', (3, 2))),
(Color666Code(5).new_pauli().logical_x(), Color666Code(5).new_pauli().logical_x()),
(Color666Code(5).new_pauli().logical_z(), Color666Code(5).new_pauli().logical_z()),
])
def test_color666_pauli_eq(pauli_1, pauli_2):
assert pauli_1 == pauli_2
assert not pauli_1 != pauli_2
@pytest.mark.parametrize('pauli_1, pauli_2', [
(Color666Code(5).new_pauli(), Color666Code(5).new_pauli().plaquette('X', (3, 2))),
(Color666Code(5).new_pauli().plaquette('X', (3, 2)), Color666Code(5).new_pauli().plaquette('Z', (3, 2))),
(Color666Code(5).new_pauli().plaquette('X', (3, 2)), Color666Code(5).new_pauli().plaquette('X', (5, 3))),
(Color666Code(5).new_pauli().logical_x(), Color666Code(5).new_pauli().logical_z()),
(Color666Code(3).new_pauli(), Color666Code(5).new_pauli()),
(Color666Code(3).new_pauli(), None),
])
def test_color666_pauli_ne(pauli_1, pauli_2):
assert pauli_1 != pauli_2
assert not pauli_1 == pauli_2
| 2.265625 | 2 |
yahoo_weather.py | PingRen32/Inkyphat_Assistant | 1 | 12762570 | from weather import Weather, Unit
import inkyphat
from PIL import Image, ImageDraw, ImageFont
weather = Weather(unit=Unit.CELSIUS)
location = weather.lookup_by_location('west lafayette')
condition = location.condition
day, date, month, year, time, am_pm, zone = condition.date.split(" ")
inkyphat.set_colour("yellow")
inkyphat.set_border(inkyphat.BLACK)
img = Image.open("resources/ShibaInu_resources/cute.png")
draw = ImageDraw.Draw(img)
icon_image = Image.open("resources/Weather/"+str(condition.code)+".png")
date_font = ImageFont.truetype(inkyphat.fonts.FredokaOne, 14)
font = ImageFont.truetype(inkyphat.fonts.FredokaOne, 12)
draw.text((18, 12), day + " " + date + " " + month + " " + year, inkyphat.BLACK, font=date_font)
draw.text((18, 32), time + " " + am_pm + " " + zone, inkyphat.BLACK, font=date_font)
draw.text((50, 52), condition.text, inkyphat.BLACK, font=font)
draw.text((50, 72), condition.temp + " " + chr(176) + "C", inkyphat.BLACK, font=font)
img.paste(icon_image, (18, 52))
# Display the weather data on Inky pHAT
inkyphat.set_image(img,colswap=[2,0,1])
inkyphat.show() | 3.296875 | 3 |
context_propagation_python/module/__init__.py | AminoApps/context-propagation-python | 0 | 12762571 | <gh_stars>0
import logging
logger = logging.getLogger("context_propagation_python.module")
def auto_register():
try:
from context_propagation_python.module.requests import register
register()
except Exception as e:
logger.debug("Register requests failed! %s" % str(e))
| 2.4375 | 2 |
jactorch/transforms/coor/functional.py | dapatil211/Jacinle | 114 | 12762572 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
# File : functional.py
# Author : <NAME>
# Email : <EMAIL>
# Date : 03/03/2018
#
# This file is part of Jacinle.
# Distributed under terms of the MIT license.
import math
from PIL import Image
import numpy as np
import torchvision.transforms.functional as TF
import jactorch.transforms.image.functional as jac_tf
from jacinle.utils.argument import get_2dshape
def normalize_coor(img, coor):
coor = coor.copy()
coor[:, 0] /= img.width
coor[:, 1] /= img.height
return img, coor
def denormalize_coor(img, coor):
coor = coor.copy()
coor[:, 0] *= img.width
coor[:, 1] *= img.height
return img, coor
def crop(img, coor, i, j, h, w):
coor = coor.copy()
coor[:, 0] = (coor[:, 0] - j / img.width) * (img.width / w)
coor[:, 1] = (coor[:, 1] - i / img.height) * (img.height / h)
return TF.crop(img, i, j, h, w), coor
def center_crop(img, coor, output_size):
output_size = get_2dshape(output_size)
w, h = img.size
th, tw = output_size
i = int(round((h - th) / 2.))
j = int(round((w - tw) / 2.))
return crop(img, coor, i, j, th, tw)
def pad(img, coor, padding, mode='constant', fill=0):
if isinstance(padding, int):
padding = (padding, padding, padding, padding)
elif len(padding) == 2:
padding = (padding[0], padding[1], padding[0], padding[1])
else:
assert len(padding) == 4
img_new = jac_tf.pad(img, padding, mode=mode, fill=fill)
coor = coor.copy()
coor[:, 0] = (coor[:, 0] + padding[0] / img.width) * (img.width / img_new.width)
coor[:, 1] = (coor[:, 1] + padding[1] / img.height) * (img.height/ img_new.height)
return img_new, coor
def hflip(img, coor):
coor = coor.copy()
coor[:, 0] = 1 - coor[:, 0]
return TF.hflip(img), coor
def vflip(img, coor):
coor = coor.copy()
coor[:, 1] = 1 - coor[:, 1]
return TF.vflip(img), coor
def resize(img, coor, size, interpolation=Image.BILINEAR):
# Assuming coordinates are 0/1-normalized.
return TF.resize(img, size, interpolation=interpolation), coor
def resized_crop(img, coor, i, j, h, w, size, interpolation=Image.BILINEAR):
img, coor = crop(img, coor, i, j, h, w)
img, coor = resize(img, coor, size, interpolation)
return img, coor
def refresh_valid(img, coor, force=False):
if coor.shape[1] == 2:
if force:
coor = np.concatenate([coor, np.ones_like(coor[:, 0])], axis=1)
else:
return img, coor
assert coor.shape[1] == 3, 'Support only (x, y, valid) or (x, y) typed coordinates.'
out = []
for x, y, v in coor:
valid = (v == 1) and (x >= 0) and (x < img.width) and (y >= 0) and (y < img.height)
if valid:
out.append((x, y, v))
else:
out.append((0., 0., 0.))
return img, np.array(out, dtype='float32')
def rotate(img, coor, angle, resample, crop_, expand, center=None, translate=None):
assert translate is None
img_new = TF.rotate(img, angle, resample=resample, expand=expand, center=center)
matrix, extra_crop = get_rotation_matrix(img, angle, crop_, expand, center, translate)
_, coor = denormalize_coor(img, coor)
for i in range(coor.shape[0]):
coor[i, :2] = apply_affine_transform(*coor[i, :2], matrix)
_, coor = normalize_coor(img_new, coor)
if extra_crop is not None:
img_new, coor = crop(img_new, coor, *extra_crop)
return img_new, coor
def pad_multiple_of(img, coor, multiple, mode='constant', fill=0):
h, w = img.height, img.width
hh = h - h % multiple + multiple * int(h % multiple != 0)
ww = w - w % multiple + multiple * int(w % multiple != 0)
if h != hh or w != ww:
return pad(img, coor, (0, 0, ww - w, hh - h), mode=mode, fill=fill)
return img, coor
def get_rotation_matrix(image, angle, crop, expand, center, translate):
w, h = image.size
if translate is None:
translate = (0, 0)
if center is None:
center = (w / 2.0, h / 2.0)
angle = math.radians(angle % 360)
matrix = [
round(math.cos(angle), 15), round(math.sin(angle), 15), 0.0,
round(-math.sin(angle), 15), round(math.cos(angle), 15), 0.0
]
matrix[2], matrix[5] = apply_affine_transform(-center[0], -center[1], matrix)
matrix[2] += center[0] + translate[0]
matrix[5] += center[1] + translate[1]
# print('debug', angle, translate, center, matrix, apply_affine_transform(0.5, 0.5, matrix))
if crop or expand:
xx = []
yy = []
for x, y in ((0, 0), (w, 0), (w, h), (0, h)):
x, y = apply_affine_transform(x, y, matrix)
xx.append(x)
yy.append(y)
xx.sort()
yy.sort()
extra_crop = None
if crop:
assert not expand, 'Cannot use both expand and crop.'
nw = int(math.ceil(xx[2]) - math.floor(xx[1]))
nh = int(math.ceil(yy[2]) - math.floor(yy[1]))
# CAUSION! extra_crop is of format (dy, dx, h, w)
extra_crop = ((h - nh) // 2, (w - nw) // 2, nh, nw)
if expand:
nw = int(math.ceil(xx[3]) - math.floor(xx[0]))
nh = int(math.ceil(yy[3]) - math.floor(yy[0]))
matrix[2] += (nw - w) / 2.
matrix[5] += (nh - h) / 2.
return matrix, extra_crop
def apply_affine_transform(x, y, matrix):
(a, b, c, d, e, f) = matrix
return a*x + b*y + c, d*x + e*y + f
| 2.28125 | 2 |
engine/Project.py | ddhoogduin/webapp-gen-directus-manager | 0 | 12762573 | from engine.Database import db
from prettytable import PrettyTable
from utils import GeneralHelper
class Project:
def __init__(self, ref, name=None, database=None):
self.ref_name = GeneralHelper.prepare_string(ref)
if name:
self.name = GeneralHelper.prepare_name(name)
self.name = name
self.database = database
| 2.109375 | 2 |
services/engine/model.py | chrkaatz/BitVision | 1,070 | 12762574 | <reponame>chrkaatz/BitVision
#########
# GLOBALS
#########
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
######
# MAIN
######
class Model(object):
def __init__(self, training_data, hyperopt=False):
self.scaler = StandardScaler()
self.scaler.fit(training_data.drop("Trend", axis=1))
self.model = LogisticRegression(penalty="l1", tol=.001, C=1000, max_iter=150)
normalized_training_data = self.scaler.transform(training_data.drop("Trend", axis=1))
self.model.fit(normalized_training_data, training_data["Trend"])
## Public Methods ##
def predict(self, vector):
return self.model.predict(self.scaler.transform(vector.reshape(1, -1)))
| 3.03125 | 3 |
yaql/yaqlization.py | pgajdos/yaql | 0 | 12762575 | # Copyright (c) 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
YAQLIZATION_ATTR = '__yaqlization__'
def yaqlize(class_or_object=None, yaqlize_attributes=True,
yaqlize_methods=True, yaqlize_indexer=True,
auto_yaqlize_result=False, whitelist=None, blacklist=None,
attribute_remapping=None, blacklist_remapped_attributes=True):
def func(something):
if not hasattr(something, YAQLIZATION_ATTR):
setattr(something, YAQLIZATION_ATTR, build_yaqlization_settings(
yaqlize_attributes=yaqlize_attributes,
yaqlize_methods=yaqlize_methods,
yaqlize_indexer=yaqlize_indexer,
auto_yaqlize_result=auto_yaqlize_result,
whitelist=whitelist,
blacklist=blacklist,
attribute_remapping=attribute_remapping,
))
return something
if class_or_object is None:
return func
else:
return func(class_or_object)
def get_yaqlization_settings(class_or_object):
return getattr(class_or_object, YAQLIZATION_ATTR, None)
def is_yaqlized(class_or_object):
return hasattr(class_or_object, YAQLIZATION_ATTR)
def build_yaqlization_settings(
yaqlize_attributes=True, yaqlize_methods=True, yaqlize_indexer=True,
auto_yaqlize_result=False, whitelist=None, blacklist=None,
attribute_remapping=None, blacklist_remapped_attributes=True):
whitelist = set(whitelist or [])
blacklist = set(blacklist or [])
attribute_remapping = attribute_remapping or {}
if blacklist_remapped_attributes:
for value in six.itervalues(attribute_remapping):
if not isinstance(value, six.string_types):
name = value[0]
else:
name = value
blacklist.add(name)
return {
'yaqlizeAttributes': yaqlize_attributes,
'yaqlizeMethods': yaqlize_methods,
'yaqlizeIndexer': yaqlize_indexer,
'autoYaqlizeResult': auto_yaqlize_result,
'whitelist': whitelist,
'blacklist': blacklist,
'attributeRemapping': attribute_remapping
}
| 2.046875 | 2 |
tools.py | AdamPI314/Catalytic-Cycle | 0 | 12762576 | <gh_stars>0
"""
tools, for example routine helping making figures
"""
import os
import matplotlib
matplotlib.use('Agg')
from matplotlib import pylab as plt
from matplotlib.lines import Line2D
import numpy as np
from naming import get_suffix
import interpolation
def get_colors_markers_linestyles():
"""
return colors
markers
linestyles
"""
markers_tmp = []
for m_k in Line2D.markers:
try:
if len(m_k) == 1 and m_k != ' ':
markers_tmp.append(m_k)
except TypeError:
pass
markers_tmp = markers_tmp + [
r'$\lambda$',
r'$\bowtie$',
r'$\circlearrowleft$',
r'$\clubsuit$',
r'$\checkmark$']
markers = markers_tmp[2::]
markers.append(markers_tmp[0])
markers.append(markers_tmp[1])
colors = ('b', 'g', 'k', 'c', 'm', 'y', 'r')
linestyles = Line2D.lineStyles.keys()
return colors, markers, linestyles
def make_figure_template(data_dir):
"""
make_figure_template
"""
colors, markers, _ = get_colors_markers_linestyles()
# x axis file name
f_n_x = "fname_x.csv"
# y axis file name
f_n_y = "fname_y.csv"
# figure name
fig_name = "test.jpg"
data_x = np.loadtxt(os.path.join(data_dir, f_n_x),
dtype=float, delimiter=",")
data_y = np.loadtxt(os.path.join(data_dir, f_n_y),
dtype=float, delimiter=",")
# specify label for lines
labels = ["line" + str(i + 1) for i in range(len(data_y))]
delta_n = int(len(data_x) / 25)
if delta_n is 0:
delta_n = 1
fig, a_x = plt.subplots(1, 1, sharex=True, sharey=False)
for idx, _ in enumerate(data_y):
a_x.plot(data_x[::delta_n], data_y[idx, ::delta_n],
color=colors[idx], marker=markers[idx], label=labels[idx])
leg = a_x.legend(loc=0, fancybox=True, prop={'size': 10.0})
leg.get_frame().set_alpha(0.7)
a_x.set_xlim([data_x[0], data_x[-1]])
a_x.grid()
a_x.set_xlabel("1000/T(K$^{-1}$)")
a_x.set_ylabel("k(cm$^{3}$ molecule$^{-1}$s$^{-1}$)")
a_x.set_title("O$_2$ + npropyl")
fig.tight_layout()
fig.savefig(os.path.join(data_dir, fig_name), dpi=500)
plt.close()
def pathway_time_2_array_index(data_dir, init_spe=None, atom_followed="C",
end_t=1.0, species_path=False, time=1.0):
"""
pathway time converted to array index, pathway time read from pathway_time_canditate*
"""
suffix = get_suffix(data_dir, init_spe=init_spe,
atom_followed=atom_followed, end_t=end_t)
prefix = ""
if species_path is True:
prefix = "species_"
f_n_path_time = os.path.join(
data_dir, "output", prefix + "pathway_time_candidate" + suffix + ".csv")
p_time = np.genfromtxt(f_n_path_time, dtype=float, delimiter=',')
# in case of two dimensional pathway time
if len(np.shape(p_time)) == 2:
p_time = p_time[0, :]
y_idx = [float(i) for i in range(len(p_time))]
array_idx = interpolation.interp1d(p_time, y_idx, time)
array_idx = int(round(array_idx))
if array_idx >= len(p_time):
array_idx = len(p_time) - 1
if array_idx < 0:
array_idx = 0
print("time idx:\t", array_idx, "time:\t", p_time[array_idx])
return array_idx, p_time[array_idx]
| 2.8125 | 3 |
sdk/python/pulumi_databricks/azure_adls_gen2_mount.py | pulumi/pulumi-databricks | 0 | 12762577 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['AzureAdlsGen2MountArgs', 'AzureAdlsGen2Mount']
@pulumi.input_type
class AzureAdlsGen2MountArgs:
def __init__(__self__, *,
client_id: pulumi.Input[str],
client_secret_key: pulumi.Input[str],
client_secret_scope: pulumi.Input[str],
container_name: pulumi.Input[str],
initialize_file_system: pulumi.Input[bool],
mount_name: pulumi.Input[str],
storage_account_name: pulumi.Input[str],
tenant_id: pulumi.Input[str],
cluster_id: Optional[pulumi.Input[str]] = None,
directory: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a AzureAdlsGen2Mount resource.
"""
pulumi.set(__self__, "client_id", client_id)
pulumi.set(__self__, "client_secret_key", client_secret_key)
pulumi.set(__self__, "client_secret_scope", client_secret_scope)
pulumi.set(__self__, "container_name", container_name)
pulumi.set(__self__, "initialize_file_system", initialize_file_system)
pulumi.set(__self__, "mount_name", mount_name)
pulumi.set(__self__, "storage_account_name", storage_account_name)
pulumi.set(__self__, "tenant_id", tenant_id)
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if directory is not None:
pulumi.set(__self__, "directory", directory)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: pulumi.Input[str]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecretKey")
def client_secret_key(self) -> pulumi.Input[str]:
return pulumi.get(self, "client_secret_key")
@client_secret_key.setter
def client_secret_key(self, value: pulumi.Input[str]):
pulumi.set(self, "client_secret_key", value)
@property
@pulumi.getter(name="clientSecretScope")
def client_secret_scope(self) -> pulumi.Input[str]:
return pulumi.get(self, "client_secret_scope")
@client_secret_scope.setter
def client_secret_scope(self, value: pulumi.Input[str]):
pulumi.set(self, "client_secret_scope", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: pulumi.Input[str]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter(name="initializeFileSystem")
def initialize_file_system(self) -> pulumi.Input[bool]:
return pulumi.get(self, "initialize_file_system")
@initialize_file_system.setter
def initialize_file_system(self, value: pulumi.Input[bool]):
pulumi.set(self, "initialize_file_system", value)
@property
@pulumi.getter(name="mountName")
def mount_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "mount_name")
@mount_name.setter
def mount_name(self, value: pulumi.Input[str]):
pulumi.set(self, "mount_name", value)
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> pulumi.Input[str]:
return pulumi.get(self, "storage_account_name")
@storage_account_name.setter
def storage_account_name(self, value: pulumi.Input[str]):
pulumi.set(self, "storage_account_name", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Input[str]:
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: pulumi.Input[str]):
pulumi.set(self, "tenant_id", value)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter
def directory(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "directory")
@directory.setter
def directory(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "directory", value)
@pulumi.input_type
class _AzureAdlsGen2MountState:
def __init__(__self__, *,
client_id: Optional[pulumi.Input[str]] = None,
client_secret_key: Optional[pulumi.Input[str]] = None,
client_secret_scope: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
directory: Optional[pulumi.Input[str]] = None,
initialize_file_system: Optional[pulumi.Input[bool]] = None,
mount_name: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
storage_account_name: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering AzureAdlsGen2Mount resources.
"""
if client_id is not None:
pulumi.set(__self__, "client_id", client_id)
if client_secret_key is not None:
pulumi.set(__self__, "client_secret_key", client_secret_key)
if client_secret_scope is not None:
pulumi.set(__self__, "client_secret_scope", client_secret_scope)
if cluster_id is not None:
pulumi.set(__self__, "cluster_id", cluster_id)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if directory is not None:
pulumi.set(__self__, "directory", directory)
if initialize_file_system is not None:
pulumi.set(__self__, "initialize_file_system", initialize_file_system)
if mount_name is not None:
pulumi.set(__self__, "mount_name", mount_name)
if source is not None:
pulumi.set(__self__, "source", source)
if storage_account_name is not None:
pulumi.set(__self__, "storage_account_name", storage_account_name)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_id")
@client_id.setter
def client_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_id", value)
@property
@pulumi.getter(name="clientSecretKey")
def client_secret_key(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_secret_key")
@client_secret_key.setter
def client_secret_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret_key", value)
@property
@pulumi.getter(name="clientSecretScope")
def client_secret_scope(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "client_secret_scope")
@client_secret_scope.setter
def client_secret_scope(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "client_secret_scope", value)
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "cluster_id")
@cluster_id.setter
def cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_id", value)
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "container_name")
@container_name.setter
def container_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "container_name", value)
@property
@pulumi.getter
def directory(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "directory")
@directory.setter
def directory(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "directory", value)
@property
@pulumi.getter(name="initializeFileSystem")
def initialize_file_system(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "initialize_file_system")
@initialize_file_system.setter
def initialize_file_system(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "initialize_file_system", value)
@property
@pulumi.getter(name="mountName")
def mount_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "mount_name")
@mount_name.setter
def mount_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "mount_name", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "storage_account_name")
@storage_account_name.setter
def storage_account_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_account_name", value)
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "tenant_id")
@tenant_id.setter
def tenant_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tenant_id", value)
class AzureAdlsGen2Mount(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret_key: Optional[pulumi.Input[str]] = None,
client_secret_scope: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
directory: Optional[pulumi.Input[str]] = None,
initialize_file_system: Optional[pulumi.Input[bool]] = None,
mount_name: Optional[pulumi.Input[str]] = None,
storage_account_name: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a AzureAdlsGen2Mount resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: AzureAdlsGen2MountArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a AzureAdlsGen2Mount resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param AzureAdlsGen2MountArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(AzureAdlsGen2MountArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret_key: Optional[pulumi.Input[str]] = None,
client_secret_scope: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
directory: Optional[pulumi.Input[str]] = None,
initialize_file_system: Optional[pulumi.Input[bool]] = None,
mount_name: Optional[pulumi.Input[str]] = None,
storage_account_name: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = AzureAdlsGen2MountArgs.__new__(AzureAdlsGen2MountArgs)
if client_id is None and not opts.urn:
raise TypeError("Missing required property 'client_id'")
__props__.__dict__["client_id"] = client_id
if client_secret_key is None and not opts.urn:
raise TypeError("Missing required property 'client_secret_key'")
__props__.__dict__["client_secret_key"] = client_secret_key
if client_secret_scope is None and not opts.urn:
raise TypeError("Missing required property 'client_secret_scope'")
__props__.__dict__["client_secret_scope"] = client_secret_scope
__props__.__dict__["cluster_id"] = cluster_id
if container_name is None and not opts.urn:
raise TypeError("Missing required property 'container_name'")
__props__.__dict__["container_name"] = container_name
__props__.__dict__["directory"] = directory
if initialize_file_system is None and not opts.urn:
raise TypeError("Missing required property 'initialize_file_system'")
__props__.__dict__["initialize_file_system"] = initialize_file_system
if mount_name is None and not opts.urn:
raise TypeError("Missing required property 'mount_name'")
__props__.__dict__["mount_name"] = mount_name
if storage_account_name is None and not opts.urn:
raise TypeError("Missing required property 'storage_account_name'")
__props__.__dict__["storage_account_name"] = storage_account_name
if tenant_id is None and not opts.urn:
raise TypeError("Missing required property 'tenant_id'")
__props__.__dict__["tenant_id"] = tenant_id
__props__.__dict__["source"] = None
super(AzureAdlsGen2Mount, __self__).__init__(
'databricks:index/azureAdlsGen2Mount:AzureAdlsGen2Mount',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
client_id: Optional[pulumi.Input[str]] = None,
client_secret_key: Optional[pulumi.Input[str]] = None,
client_secret_scope: Optional[pulumi.Input[str]] = None,
cluster_id: Optional[pulumi.Input[str]] = None,
container_name: Optional[pulumi.Input[str]] = None,
directory: Optional[pulumi.Input[str]] = None,
initialize_file_system: Optional[pulumi.Input[bool]] = None,
mount_name: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
storage_account_name: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None) -> 'AzureAdlsGen2Mount':
"""
Get an existing AzureAdlsGen2Mount resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _AzureAdlsGen2MountState.__new__(_AzureAdlsGen2MountState)
__props__.__dict__["client_id"] = client_id
__props__.__dict__["client_secret_key"] = client_secret_key
__props__.__dict__["client_secret_scope"] = client_secret_scope
__props__.__dict__["cluster_id"] = cluster_id
__props__.__dict__["container_name"] = container_name
__props__.__dict__["directory"] = directory
__props__.__dict__["initialize_file_system"] = initialize_file_system
__props__.__dict__["mount_name"] = mount_name
__props__.__dict__["source"] = source
__props__.__dict__["storage_account_name"] = storage_account_name
__props__.__dict__["tenant_id"] = tenant_id
return AzureAdlsGen2Mount(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="clientId")
def client_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "client_id")
@property
@pulumi.getter(name="clientSecretKey")
def client_secret_key(self) -> pulumi.Output[str]:
return pulumi.get(self, "client_secret_key")
@property
@pulumi.getter(name="clientSecretScope")
def client_secret_scope(self) -> pulumi.Output[str]:
return pulumi.get(self, "client_secret_scope")
@property
@pulumi.getter(name="clusterId")
def cluster_id(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "cluster_id")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "container_name")
@property
@pulumi.getter
def directory(self) -> pulumi.Output[str]:
return pulumi.get(self, "directory")
@property
@pulumi.getter(name="initializeFileSystem")
def initialize_file_system(self) -> pulumi.Output[bool]:
return pulumi.get(self, "initialize_file_system")
@property
@pulumi.getter(name="mountName")
def mount_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "mount_name")
@property
@pulumi.getter
def source(self) -> pulumi.Output[str]:
return pulumi.get(self, "source")
@property
@pulumi.getter(name="storageAccountName")
def storage_account_name(self) -> pulumi.Output[str]:
return pulumi.get(self, "storage_account_name")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[str]:
return pulumi.get(self, "tenant_id")
| 1.898438 | 2 |
za/post/test2.py | hth945/pytest | 0 | 12762578 | #%%
import requests
# url = "http://1172.16.31.10"
url = "http://192.168.1.10"
# data = bytes("connection\r\n")
data = "connection\r\n"
res = requests.post(url=url,
data=data,
headers={'Content-Type': 'text/plain'})
print(res.text)
# %%
import requests
res = requests.post(url="http://192.168.1.5/",data="""C=ADD_RECORD&product=X1521&sn=PPPYWWDSSSSEEEERX+FFGGCUUCLPPPVHSSS&station_name=D-INSPECTION&station_id=SPWX_W03-2FT-01_1_D-INSPECTION&start_time=2019-06-28 08:11:28&stop_time=2019-06-28 08:11:39&result=PASS&reason=&stage=1&mac_address=88:51:FB:42:A1:35&value1=0.052&value2=0.052&value3=0.052&value4=0.052&value5=0.052&value6=0.052&value7=0.052&value8=0.052
""")
print(res.text) | 2.96875 | 3 |
IndigoPy/Math/plot.py | IndigoMad/IndigoPy | 0 | 12762579 | <gh_stars>0
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as mp3d
def functionviewer(afunction,brange):
x = np.arange(brange[0],brange[1],(brange[1]-brange[0])/100)
y = np.zeros(len(x))
for i in range(len(x)):
y[i]=afunction(x[i])
plt.plot(x,y)
plt.show()
def pointviewer(pointlists):
colorlist=['r','b','k','g','y','c','m','gold','dodgerblue','orange']
fig = plt.figure()
ax = mp3d.Axes3D(fig)
for l in range(len(pointlists)):
for p in pointlists[l]:
ax.scatter(p[0], p[1], p[2], c=colorlist[l])
plt.show() | 2.53125 | 3 |
gizer/opdelete.py | racker/gizer | 0 | 12762580 | """opdelete module
Implementation of "delete" operation for "realtime" etl process for
transferring data from MongoDB nested collections to PostgreSQL flat data
with using pregenerated schema and tailing records (events) in oplog.rs
collection.
How to use:
del = op_delete_stmts(dbreq, schema, path, str_id, database_name,
schema_name)
parameters:
dbreq - connection to PostgreSQL,
schema - schema of nested data represented as json object,
path - path to object for deleteion
str_id - string representation of root ObjectID
database_name - database name for destionation database (PostgreSQL)
schema_name - schema name for destionation database (PostgreSQL)
return value:
as delete operation is an complicated operation it usualy comes in
combination of sets two kinds of single operations UPDATE and DELETE for
PostgreSQL and retruned value has following view
{
'upd': {
'UPDATE database_name.schema_name."table_name" SET idx=(%s) WHERE
idx=(%s), parent_id_iod=(%s);': [1, 2, 'abc'],
'UPDATE database_name.schema_name."table_name" SET idx=(%s) WHERE
idx=(%s), parent_id_iod=(%s);': [2, 3, 'abc']
},
'del': {
'DELETE FROM database_name.schema_name."table_name" WHERE
idx=(%s), parent_id_iod=(%s);': [1, 'abc']
}
"""
__author__ = '<NAME>'
__email__ = "<EMAIL>"
from gizer.util import get_idx_column_name_from_list, SELECT_TMPLT, \
UPDATE_TMPLT, DELETE_TMPLT, get_indexes_dictionary_idx, \
get_root_table_from_path, get_ids_list, get_table_name_from_list, \
get_tables_structure, get_table_name_schema, get_last_idx_from_path, \
get_part_schema
import psycopg2
def op_delete_stmts(dbreq, schema, path, str_id, database_info):
"""delete operation wrapper"""
return gen_statements(dbreq, schema, path, str_id, database_info)
def get_max_id_in_array(dbreq, table, condition_list, database_info):
"""returns value fo max index in array from where object shod be deleted"""
cond_list = {}
for column in condition_list['target']:
if column != 'idx':
cond_list[column] = condition_list['target'][column]
where = get_where_templates({'target': cond_list, 'child': {}})['target']
sql_query = SELECT_TMPLT.format(
table='.'.join(filter(None, [database_info.database_name, database_info.schema_name,
table])),
conditions=where['template'])
curs = dbreq.cursor()
curs.execute(sql_query, tuple(where['values']))
idx = curs.fetchone()[0]
if idx is None:
idx = 0
return idx
def get_conditions_list(schema, path, str_id):
"""returns conditions list for target and for 'child' tables based on path
and sring represented root object ID value
"""
spath = path.split('.')
parental_tables_idx_list = get_indexes_dictionary_idx(path)
target_table = get_table_name_from_list(spath)
target_table_idxname_for_child = get_idx_column_name_from_list(spath)
params_target = {}
params_child = {}
root_table = get_root_table_from_path(path)
for parent_table_idx in parental_tables_idx_list:
if parent_table_idx != root_table:
if target_table_idxname_for_child != parent_table_idx:
params_target[parent_table_idx + '_idx'] = \
parental_tables_idx_list[parent_table_idx]
params_child[parent_table_idx + '_idx'] = \
parental_tables_idx_list[parent_table_idx]
else:
params_target['idx'] = parental_tables_idx_list[
parent_table_idx]
params_child[parent_table_idx + '_idx'] = \
parental_tables_idx_list[parent_table_idx]
ids = get_ids_list(schema)
root_id = ids.iterkeys().next()
if root_table == target_table:
params_target[root_id] = str(str_id)
else:
params_target[root_table + '_' + root_id] = str(str_id)
params_child[root_table + '_' + root_id] = str(str_id)
return {'target': params_target, 'child': params_child}
def get_where_templates(conditions_list):
"""generates where templates for target and 'child' tables based on
conditions list"""
def condition_with_quotes(key):
"""returns templae for value for query in wrapped in quotes or
not depending on value type"""
temp = ''
if key.endswith('_idx') or key == 'idx':
temp = '({0}=(%s))'.format(key)
else:
temp = '({0}=(%s))'.format(key)
return temp
where_list = {'target': {}, 'child': {}}
where_list['target']['template'] = ' and '.join(
sorted([(condition_with_quotes(key)) for key in
conditions_list['target']]))
where_list['target']['values'] = [conditions_list['target'][key] for key in
sorted(conditions_list['target'])]
where_list['child']['template'] = ' and '.join(
sorted(
[(condition_with_quotes(key)) for key in conditions_list['child']]))
where_list['child']['values'] = [conditions_list['child'][key] for key in
sorted(conditions_list['child'])]
return where_list
def gen_statements(dbreq, schema, path, str_id, database_info):
"""generates all SQL statements with parameteres related for oplog event
related to delete operation"""
tables_mappings = get_tables_structure(schema, path.split('.')[0], {}, {},
1, '')
# getting partial table mappings
if len(path.split('.')) <= 1:
schema_partial = schema
else:
schema_partial = get_part_schema(schema, path.split('.')[1:])
if schema_partial == None:
schema_partial = {}
tables_mappings_partial = get_tables_structure(schema_partial,
path.split('.')[0], {}, {},
1, '')
conditions_list = get_conditions_list(schema, path, str_id)
where_clauses = get_where_templates(conditions_list)
target_table = get_table_name_from_list(path.split('.'))
if not target_table in tables_mappings.keys():
return {'del': {}, 'upd': {}}
tables_list = []
tables_mappings_partial_fixed = {}
# fixing prefixes for partial table mappings
for table_partial in tables_mappings_partial:
tables_mappings_partial_fixed[table_partial.replace(
path.split('.')[0][:-1], target_table[:-1],)] = \
tables_mappings_partial[table_partial]
for table in tables_mappings_partial_fixed.keys():
if str.startswith(str(table), target_table[:-1], 0,
len(table)) and not table == target_table and \
table in tables_mappings.keys():
tables_list.append(table)
del_statements = {}
del_statements[DELETE_TMPLT.format(
table=get_table_name_schema([database_info.database_name,
database_info.schema_name, target_table]),
conditions=where_clauses['target']['template'])] = \
where_clauses['target']['values']
for table in tables_list:
del_statements[DELETE_TMPLT.format(
table=get_table_name_schema([database_info.database_name,
database_info.schema_name, table]),
conditions=where_clauses['child']['template'])] = \
where_clauses['child']['values']
update_statements = {}
idx = get_last_idx_from_path(path)
if idx == None:
return {'del': del_statements, 'upd': update_statements}
max_idx = get_max_id_in_array(dbreq, target_table, conditions_list,
database_info)
if idx <= max_idx:
return {'del': del_statements, 'upd': update_statements}
for ind in range(int(idx) + 1, int(max_idx) + 1):
spath = path.split('.')
del spath[-1]
spath.append(str(ind - 1))
path_to_update = '.'.join(spath)
udpate_where = get_where_templates(
get_conditions_list(schema, path_to_update, str_id))
update_statements[UPDATE_TMPLT.format(table=get_table_name_schema(
[database_info.database_name, database_info.schema_name,
target_table]),
statements='idx=' + str(ind - 1),
conditions=udpate_where['target'][
'template'])] = \
udpate_where['target'][
'values']
for table in tables_list:
update_statements[UPDATE_TMPLT.format(table=get_table_name_schema(
[database_info.database_name, database_info.schema_name,
table]),
statements=get_idx_column_name_from_list(path.split('.')) +
'_idx=' + str(ind - 1),
conditions=
udpate_where['child'][
'template'])] = \
udpate_where['child'][
'values']
return {'del': del_statements, 'upd': update_statements}
| 2.203125 | 2 |
codeforces/math数学/1100/136B三进制xor.py | yofn/pyacm | 0 | 12762581 | <gh_stars>0
#!/usr/bin/env python3
# https://codeforces.com/problemset/problem/1332/A
def f(l):
a,c = l
p = 1
r = 0
while a>0 or c>0:
r += p*((c%3-a%3)%3)
p *= 3
c = c//3
a = a//3
return r
l = list(map(int,input().split()))
print(f(l))
| 3.5 | 4 |
kivy/uix/behaviors/emacs.py | RiiotLabs/kivy | 0 | 12762582 | # -*- encoding: utf-8 -*-
'''
Emacs Behavior
==============
The :class:`~kivy.uix.behaviors.emacs.EmacsBehavior`
`mixin <https://en.wikipedia.org/wiki/Mixin>`_ allows you to add
`Emacs <https://www.gnu.org/software/emacs/>`_ keyboard shortcuts for basic
movement and editing to the :class:`~kivy.uix.textinput.TextInput` widget.
The shortcuts currently available are listed below:
Emacs shortcuts
---------------
=============== ========================================================
Shortcut Description
--------------- --------------------------------------------------------
Control + a Move cursor to the beginning of the line
Control + e Move cursor to the end of the line
Control + f Move cursor one character to the right
Control + b Move cursor one character to the left
Alt + f Move cursor to the end of the word to the right
Alt + b Move cursor to the start of the word to the left
Alt + Backspace Delete text left of the cursor to the beginning of word
Alt + d Delete text right of the cursor to the end of the word
Alt + w Copy selection
Control + w Cut selection
Control + y Paste selection
=============== ========================================================
.. warning::
If you have the :mod:`~kivy.modules.inspector` module enabled, the
shortcut for opening the inspector (Control + e) conflicts with the
Emacs shortcut to move to the end of the line (it will still move the
cursor to the end of the line, but the inspector will open as well).
'''
from kivy.properties import StringProperty
__all__ = ('EmacsBehavior', )
class EmacsBehavior(object):
'''
A `mixin <https://en.wikipedia.org/wiki/Mixin>`_ that enables Emacs-style
keyboard shortcuts for the :class:`~kivy.uix.textinput.TextInput` widget.
Please see the :mod:`Emacs behaviors module <kivy.uix.behaviors.emacs>`
documentation for more information.
.. versionadded:: 1.9.1
'''
key_bindings = StringProperty('emacs')
'''String name which determines the type of key bindings to use with the
:class:`~kivy.uix.textinput.TextInput`. This allows Emacs key bindings to
be enabled/disabled programmatically for widgets that inherit from
:class:`EmacsBehavior`. If the value is not ``'emacs'``, Emacs bindings
will be disabled. Use ``'default'`` for switching to the default key
bindings of TextInput.
:attr:`key_bindings` is a :class:`~kivy.properties.StringProperty`
and defaults to ``'emacs'``.
.. versionadded:: 1.9.2
'''
def __init__(self, **kwargs):
super(EmacsBehavior, self).__init__(**kwargs)
self.bindings = {
'ctrl': {
'a': lambda: self.do_cursor_movement('cursor_home'),
'e': lambda: self.do_cursor_movement('cursor_end'),
'f': lambda: self.do_cursor_movement('cursor_right'),
'b': lambda: self.do_cursor_movement('cursor_left'),
'w': lambda: self._cut(self.selection_text),
'y': self.paste,
},
'alt': {
'w': self.copy,
'f': lambda: self.do_cursor_movement('cursor_right',
control=True),
'b': lambda: self.do_cursor_movement('cursor_left',
control=True),
'd': self.delete_word_right,
'\x08': self.delete_word_left, # alt + backspace
},
}
def keyboard_on_key_down(self, window, keycode, text, modifiers):
key, key_str = keycode
mod = modifiers[0] if modifiers else None
is_emacs_shortcut = False
if key in range(256) and self.key_bindings == 'emacs':
is_emacs_shortcut = ((mod == 'ctrl' and
chr(key) in self.bindings['ctrl'].keys()) or
(mod == 'alt' and
chr(key) in self.bindings['alt'].keys()))
if is_emacs_shortcut:
# Look up mod and key
emacs_shortcut = self.bindings[mod][chr(key)]
emacs_shortcut()
else:
super(EmacsBehavior, self).keyboard_on_key_down(window, keycode,
text, modifiers)
def delete_word_right(self):
'''Delete text right of the cursor to the end of the word'''
if self._selection:
return
start_index = self.cursor_index()
start_cursor = self.cursor
self.do_cursor_movement('cursor_right', control=True)
end_index = self.cursor_index()
if start_index != end_index:
s = self.text[start_index:end_index]
self._set_unredo_delsel(start_index, end_index, s, from_undo=False)
self.text = self.text[:start_index] + self.text[end_index:]
self._set_cursor(pos=start_cursor)
def delete_word_left(self):
'''Delete text left of the cursor to the beginning of word'''
if self._selection:
return
start_index = self.cursor_index()
self.do_cursor_movement('cursor_left', control=True)
end_cursor = self.cursor
end_index = self.cursor_index()
if start_index != end_index:
s = self.text[end_index:start_index]
self._set_unredo_delsel(end_index, start_index, s, from_undo=False)
self.text = self.text[:end_index] + self.text[start_index:]
self._set_cursor(pos=end_cursor)
| 2.4375 | 2 |
friends/models.py | tss89/calendar-sem8-tp | 0 | 12762583 | <gh_stars>0
from django.db import models
from calendar_sem8.settings import AUTH_USER_MODEL
class Friends(models.Model):
facebook_id = models.CharField(max_length=150, db_index=True, blank=True, unique=True)
email = models.CharField(max_length=100, default=None, null=True)
first_name = models.CharField(max_length=100, default=None, null=True)
last_name = models.CharField(max_length=100, default=None, null=True)
birth_date = models.DateField(default=None, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class FriendUser(models.Model):
friend = models.ForeignKey(Friends, on_delete=models.PROTECT)
user = models.ForeignKey(AUTH_USER_MODEL, on_delete=models.PROTECT)
created_at = models.DateTimeField(auto_now_add=True)
| 2.140625 | 2 |
emails/tests.py | lordoftheflies/Django-CRM | 1 | 12762584 | <reponame>lordoftheflies/Django-CRM<gh_stars>1-10
import pytest
from django.test import TestCase
from django.test import Client
from common.models import User
from emails.models import Email
from emails.forms import EmailForm
class UserCreation(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create(
first_name="<EMAIL>",
username='jane',
email="<EMAIL>", role="ADMIN")
self.user.set_password('password')
self.user.save()
self.email = Email.objects.create(
from_email="<EMAIL>",
to_email="<EMAIL>",
subject="subject ", message="message",
important=False)
self.client.login(username='<EMAIL>', password='password')
class EmailSentEdit(UserCreation, TestCase):
def test_edit_form_valid(self):
form = EmailForm(data={'from_email': "<EMAIL>",
'to_email': "<EMAIL>",
'subject': "test subject",
'message': 'test message'})
# print('yes')
self.assertTrue(form.is_valid())
def test_edit_form_invalid(self):
form = EmailForm(data={'from_email': "<EMAIL>",
'to_email': "",
'subject': "test subject",
'message': 'test message'})
# print('yes2')
self.assertFalse(form.is_valid())
class EmailListTestCase(UserCreation, TestCase):
@pytest.mark.skip(reason="no way of currently testing this")
def test_email_list(self):
url = "/emails/list/"
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
class EmailTestCase(UserCreation, TestCase):
@pytest.mark.skip(reason="no way of currently testing this")
def test_email_compose(self):
url = "/emails/compose/"
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_email_trash_get(self):
url = "/emails/email_trash_delete/" + str(self.email.pk) + "/"
# print(url)
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
@pytest.mark.skip(reason="no way of currently testing this")
def test_email_send_fail(self):
url = "/emails/compose/"
data = {
'from_email': "<EMAIL>", 'to_email': "",
'subject': 'sample subject', 'message': "sample message"
}
response = self.client.post(url, data)
self.assertEqual(response.status_code, 200)
# email_trash_delete/
def test_email_send(self):
url = "/emails/compose/"
data = {
'from_email': "<EMAIL>", 'to_email': "<EMAIL>",
'subject': 'sample subject', 'message': "sample message"
}
response = self.client.post(url, data)
get_email = Email.objects.get(subject="sample subject")
# boo = Email.objects.get(important=True)
# print('yes')
self.assertFalse(get_email.important)
self.assertEqual(get_email.subject, get_email.__str__())
self.assertEqual(response.status_code, 302)
@pytest.mark.skip(reason="no way of currently testing this")
def test_email_sent(self):
url = "/emails/email_sent/"
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@pytest.mark.skip(reason="no way of currently testing this")
def test_email_trash(self):
url = "/emails/email_trash/"
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@pytest.mark.skip(reason="no way of currently testing this")
def test_email_draft(self):
url = "/emails/email_draft/"
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_email_draft_delete(self):
url = "/emails/email_draft_delete/" + str(self.email.pk) + "/"
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
def test_email_delete(self):
url = "/emails/email_delete/" + str(self.email.pk) + "/"
response = self.client.get(url)
self.assertEqual(response.status_code, 302)
@pytest.mark.skip(reason="no way of currently testing this")
def test_email_view(self):
url = "/emails/email_view/" + str(self.email.pk) + "/"
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@pytest.mark.skip(reason="no way of currently testing this")
def test_email_sent_edit_get(self):
url = "/emails/email_sent_edit/" + str(self.email.pk) + "/"
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
@pytest.mark.skip(reason="no way of currently testing this")
def test_email_sent_edit_post(self):
url = "/emails/email_sent_edit/" + str(self.email.pk) + "/"
data = {
'from_email': "<EMAIL>", 'to_email': "<EMAIL>",
'subject': 'subject', 'message': "message"
}
data1 = {
'from_email': "<EMAIL>", 'to_email': "",
'subject': 'subject', 'message': "message"
}
response = self.client.post(url, data)
response1 = self.client.post(url, data1)
self.assertEqual(response.status_code, 302)
self.assertEqual(response1.status_code, 200)
@pytest.mark.skip(reason="no way of currently testing this")
def test_email_imp_list(self):
url = "/emails/email_imp_list/"
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
# def test_email_move_to_trash(self):
# url = "/emails/email_move_to_trash/" + str(self.email.pk) + "/"
# response = self.client.get(url)
# self.assertEqual(response.status_code, 302)
# response = self.client.post(reverse('302'), {}, HTTP_REFERER=url)
# def test_email_trash_del(self):
# url = "/emails/trash_delete/"+str(self.email.pk)+"/$"
# print(url)
# response = self.client.get(url)
# self.assertEqual(response.status_code,200)
| 2.4375 | 2 |
services/dy-static-file-server/src/dy_static_file_server/ensure_random_workdir_data.py | sanderegg/osparc-services | 2 | 12762585 | <reponame>sanderegg/osparc-services
import random
from pathlib import Path
from typing import List
import uuid
import grp, pwd
import getpass
import os
TARGET_DIRECTORY = Path("/workdir/generated-data")
def make_random_file(target_dir: Path) -> None:
file_path = target_dir / f"{uuid.uuid4()}.txt"
file_path.write_text("no random data here")
print(f"Created {file_path}")
def get_files_in_directory(directory: Path) -> List[Path]:
# pylint: disable=unnecessary-comprehension
return [x for x in directory.rglob("*")]
def is_content_present(directory: Path) -> bool:
return len(get_files_in_directory(directory)) > 0
def print_user_and_directory_info() -> None:
user = getpass.getuser()
groups = [g.gr_name for g in grp.getgrall() if user in g.gr_mem]
gid = pwd.getpwnam(user).pw_gid
groups.append(grp.getgrgid(gid).gr_name)
print(f"User {user}, groups {groups}")
os.system("ls -lah /workdir")
def ensure_random_data(target_dir: Path) -> None:
target_dir.mkdir(parents=True, exist_ok=True)
print_user_and_directory_info()
print(f"Creating {target_dir} if missing")
if is_content_present(target_dir):
files = get_files_in_directory(target_dir)
print(f"Skipping content genration. Already detected: {files}")
return
for _ in range(random.randint(1, 10)):
make_random_file(target_dir)
def main() -> None:
ensure_random_data(TARGET_DIRECTORY)
if __name__ == "__main__":
main() | 2.625 | 3 |
answers/Utkarsh Srivastava/Day 5/Question 1.py | arc03/30-DaysOfCode-March-2021 | 22 | 12762586 | a = 0
b = 1
n = int(input())
for i in range(n):
print(a,end=",")
print(b,end=",")
a = a+b
b = b+a
print("\b")
| 3.734375 | 4 |
Sketchpad003_FIRWindow/Sketchpad003_FIRWindow.py | willfehlmusic/Python_Sketchpads | 8 | 12762587 | <reponame>willfehlmusic/Python_Sketchpads
import scipy.signal
import numpy as np
import matplotlib.pyplot as plt
import os
import imageio
import time
# system sample rate
fs = 48000
# a range of frequencies
freqRange = np.linspace(0,fs/2,fs)
# define an ideal low pass filter with cutoff at 1000 Hz
fc = 1000
# desired system frequency response
iFreqResp = []
for f in freqRange:
if f < fc:
iFreqResp.append(1)
else:
iFreqResp.append(0)
# plot frequency response of ideal low-pass filter
plt.figure(figsize=[16,9])
plt.title('Ideal Low Pass Filter:\nFrequency Response')
plt.semilogx(freqRange,iFreqResp)
plt.ylabel('Magnitude')
plt.ylim([-0.5,1.5])
plt.xlabel('Frequency [Hz]')
plt.xlim([20, 20000])
plt.grid()
plt.tight_layout()
plt.savefig('IdealFreqResponse.png', bbox_inches="tight")
#plt.show()
# use the inverse fft to create a corresponding impulse response
iTimeResp = np.fft.ifft(iFreqResp)
# only the first half of this impulse corresponds with the causal portion
iTimeResp = 2*iTimeResp[0:round(len(iTimeResp)/2)]
# create a range of time values for plotting
tRange = np.linspace(0,len(iTimeResp)-1,len(iTimeResp))
# plot the impulse response
plt.figure(figsize=[16,9])
plt.subplot(2, 1, 1)
plt.title('Ideal Low Pass Filter:\nImpulse Response')
plt.plot(tRange,iTimeResp)
plt.ylabel('Magnitude')
plt.ylim([min(iTimeResp),max(iTimeResp)])
plt.xlabel('Time [Samples]')
plt.xlim([-100, len(tRange)])
plt.grid()
plt.subplot(2, 1, 2)
plt.title('Ideal Low Pass Filter:\nImpulse Response [Zoomed]')
plt.plot(tRange,iTimeResp)
plt.ylabel('Magnitude')
plt.ylim([min(iTimeResp),max(iTimeResp)])
plt.xlabel('Time [Samples]')
plt.xlim([0, 512])
plt.grid()
plt.tight_layout()
plt.savefig('IdealTimeResponse.png', bbox_inches="tight")
#plt.show()
# hard truncate the impulse response to an acceptable number of coefficients
nCoeff = 2048
iTimeRespTrunc = np.concatenate([iTimeResp[0:nCoeff],
np.zeros(len(iTimeResp) - nCoeff)])
# get the frequency response
iFreqResp = np.fft.fft(iTimeRespTrunc,len(iTimeRespTrunc))
# a range of frequencies
freqRange = np.linspace(0,fs/2,len(iTimeRespTrunc))
# plot the impulse and frequency response
plt.figure(figsize=[16,9])
plt.subplot(2, 1, 1)
plt.title('Impulse Response [Truncated]')
plt.plot(tRange,iTimeRespTrunc)
plt.ylabel('Magnitude')
plt.ylim([min(iTimeRespTrunc),max(iTimeRespTrunc)])
plt.xlabel('Time [Samples]')
plt.xlim([0, len(tRange[0:nCoeff])])
plt.grid()
plt.subplot(2, 1, 2)
plt.title('Frequency Response [Truncated]')
plt.semilogx(freqRange,np.real(iFreqResp))
plt.ylabel('Magnitude')
plt.ylim([-0.5,1.5])
plt.xlabel('Frequency [Hz]')
plt.xlim([20, 20000])
plt.grid()
plt.tight_layout()
plt.savefig('TruncatedResponse.png', bbox_inches="tight")
#plt.show()
# setting up to export images...
# set up figure
fig = plt.figure(figsize=[16,9])
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
ax1.set_ylabel('Magnitude')
ax1.set_ylim([min(iTimeResp),max(iTimeResp)])
ax1.set_xlabel('Time [Samples]')
ax2.set_ylabel('Magnitude')
ax2.set_xlabel('Frequency [Hz]')
ax2.set_ylim([-0.5,1.5])
ax2.set_xlim([20, 20000])
#plt.show(block=False)
count = 0
for n in range(512, 8, -4):
# print(n)
# clear the plots
ax1.cla()
ax2.cla()
# adjust plot titles
ax1.set_title('Impulse Response: samples={}'.format(n))
ax2.set_title('Frequency Response n={}'.format(n))
# replotting for a new number of coefficients
nCoeff = n
iTimeRespTrunc = np.concatenate([iTimeResp[0:nCoeff],
np.zeros(len(iTimeResp) - nCoeff)])
# adjust data
iFreqResp = np.fft.fft(iTimeRespTrunc,len(iTimeRespTrunc))
# a range of frequencies
freqRange = np.linspace(0,fs/2,len(iTimeRespTrunc))
# plot the data
ax1.plot(tRange, iTimeRespTrunc)
ax2.semilogx(freqRange,np.real(iFreqResp))
# re-adjust the range
ax1.set_xlim([0, n])
ax2.set_ylim([-0.5,1.5])
ax2.set_xlim([20,20000])
ax1.grid(True)
ax2.grid(True)
a = str(count).zfill(8)
# save files
plt.savefig('Frames/TruncatedResponse_'+a+'.png', bbox_inches="tight")
count += 1
# make gif out of each image file
png_dir = 'Frames/'
images = []
for file_name in os.listdir(png_dir):
if file_name.endswith('.png'):
file_path = os.path.join(png_dir, file_name)
images.append(imageio.imread(file_path))
imageio.mimsave('TruncationAnimation.gif', images)
# convert .gif to .mp4
import moviepy.editor as mp
clip = mp.VideoFileClip("TruncationAnimation.gif")
clip.write_videofile("TruncationAnimation.mp4")
# FIR Filter Design: The Effect of Impulse Response Truncation on my YouTube
# now lets really do the truncation
# Hard truncate the impulse response to an acceptable number of coefficients
nCoeff = 64
iTimeRespTrunc = np.concatenate([iTimeResp[0:nCoeff],
np.zeros(len(iTimeResp) - nCoeff)])
# frequency response
iFreqResp = np.fft.fft(iTimeRespTrunc,len(iTimeRespTrunc))
# a range of frequencies
freqRange = np.linspace(0,fs/2,len(iTimeRespTrunc))
plt.figure(figsize=[16,9])
plt.subplot(2, 1, 1)
plt.title('Impulse Response [Truncated]')
plt.plot(tRange,iTimeRespTrunc)
plt.ylabel('Magnitude')
plt.ylim([min(iTimeRespTrunc),max(iTimeRespTrunc)])
plt.xlabel('Time [Samples]')
plt.xlim([0, len(tRange[0:nCoeff])])
plt.grid()
plt.subplot(2, 1, 2)
plt.title('Frequency Response [Truncated]')
plt.semilogx(freqRange,np.real(iFreqResp))
plt.ylabel('Magnitude')
plt.ylim([-0.5,1.5])
plt.xlabel('Frequency [Hz]')
plt.xlim([20, 20000])
plt.grid()
plt.tight_layout()
plt.savefig('TruncatedResponse_64Coefficient.png', bbox_inches="tight")
nCoeff = 128
# compare windowing functions use 128+ samples for acceptable frequency graph
negposTRange = np.linspace(-nCoeff,nCoeff,nCoeff*2)
boxcar_window = scipy.signal.windows.boxcar(nCoeff*2)
boxcar_window[0] = 0
boxcar_window[len(boxcar_window)-1] = 0
triang_window = scipy.signal.windows.triang(nCoeff*2)
exponential_window = scipy.signal.windows.exponential(nCoeff*2)
cosine_window = scipy.signal.windows.cosine(nCoeff*2)
blackmanharris_window = scipy.signal.windows.blackmanharris(nCoeff*2)
hamming_window = scipy.signal.windows.hamming(nCoeff*2)
# this function just gets the frequency response and frequency range for each window...
# make life easier
def getFreqResponse(window):
A = np.fft.fft(window, len(window)) / 25.5
mag = np.abs(np.fft.fftshift(A))
freq = np.linspace(-0.5, 0.5, len(A))
response = 20.0 * np.log10(np.real(mag))
response = np.clip(response, -120, 120)
return freq, response
boxcar_windowResponse = getFreqResponse(boxcar_window)
triang_windowResponse = getFreqResponse(triang_window)
exponential_windowResponse = getFreqResponse(exponential_window)
cosine_windowResponse = getFreqResponse(cosine_window)
blackmanharris_windowResponse = getFreqResponse(blackmanharris_window)
hamming_windowResponse = getFreqResponse(hamming_window)
# plot time domain response and frequency response
plt.figure(figsize=[16,10])
plt.subplot(3, 2, 1)
plt.plot(negposTRange, boxcar_window)
plt.title("Boxcar Window")
plt.ylabel("Amplitude")
plt.ylim([0,1.1])
plt.xlabel("Time [samples]")
plt.grid()
plt.subplot(3, 2, 2)
plt.plot(boxcar_windowResponse[0], boxcar_windowResponse[1])
plt.title("Boxcar Window")
plt.ylabel("Amplitude [dB]")
plt.xlabel("Frequency [normalized]")
plt.grid()
plt.tight_layout()
plt.subplot(3, 2, 3)
plt.plot(negposTRange, triang_window)
plt.title("Triangle Window")
plt.ylabel("Amplitude")
plt.ylim([0,1.1])
plt.xlabel("Time [samples]")
plt.grid()
plt.subplot(3, 2, 4)
plt.plot(triang_windowResponse[0], triang_windowResponse[1])
plt.title("Triangle window")
plt.ylabel("Amplitude [dB]")
plt.xlabel("Frequency [normalized]")
plt.grid()
plt.tight_layout()
plt.subplot(3, 2, 5)
plt.plot(negposTRange, exponential_window)
plt.title("Exponential Window")
plt.ylabel("Amplitude")
plt.ylim([0,1.1])
plt.xlabel("Time [samples]")
plt.grid()
plt.subplot(3, 2, 6)
plt.plot(exponential_windowResponse[0], exponential_windowResponse[1])
plt.title("Exponential window")
plt.ylabel("Amplitude [dB]")
plt.xlabel("Frequency [normalized]")
plt.grid()
plt.tight_layout()
plt.savefig('WindowResponse_128Coefficient_set1.png', bbox_inches="tight")
plt.figure(figsize=[16,10])
plt.subplot(3, 2, 1)
plt.plot(negposTRange, cosine_window)
plt.title("Cosine Window")
plt.ylabel("Amplitude")
plt.ylim([0,1.1])
plt.xlabel("Time [samples]")
plt.grid()
plt.subplot(3, 2, 2)
plt.plot(cosine_windowResponse[0], cosine_windowResponse[1])
plt.title("Cosine window")
plt.ylabel("Amplitude [dB]")
plt.xlabel("Frequency [normalized]")
plt.grid()
plt.tight_layout()
plt.subplot(3, 2, 3)
plt.plot(negposTRange, blackmanharris_window)
plt.title("Blackman-Harris Window")
plt.ylabel("Amplitude")
plt.ylim([0,1.1])
plt.xlabel("Time [samples]")
plt.grid()
plt.subplot(3, 2, 4)
plt.plot(blackmanharris_windowResponse[0], blackmanharris_windowResponse[1])
plt.title("Blackman-Harris window")
plt.ylabel("Amplitude [dB]")
plt.xlabel("Frequency [normalized]")
plt.grid()
plt.tight_layout()
plt.subplot(3, 2, 5)
plt.plot(negposTRange, hamming_window)
plt.title("Hamming Window")
plt.ylabel("Amplitude")
plt.ylim([0,1.1])
plt.xlabel("Time [samples]")
plt.grid()
plt.subplot(3, 2, 6)
plt.plot(hamming_windowResponse[0], hamming_windowResponse[1])
plt.title("Hamming window")
plt.ylabel("Amplitude [dB]")
plt.xlabel("Frequency [normalized]")
plt.grid()
plt.tight_layout()
plt.savefig('WindowResponse_128Coefficient_set2.png', bbox_inches="tight")
# okay so now back to our truncated impulse response...
# let's do truncation and apply the windowing function...
nCoeff = 64
# make double wide window...
boxcar_window = scipy.signal.windows.boxcar(nCoeff*2)
triang_window = scipy.signal.windows.triang(nCoeff*2)
cosine_window = scipy.signal.windows.cosine(nCoeff*2)
blackmanharris_window = scipy.signal.windows.blackmanharris(nCoeff*2)
hamming_window = scipy.signal.windows.hamming(nCoeff*2)
# take only the positive portion of the window and...
# make the array the same length as the impulse response
iTimeRespTrunc = np.concatenate([iTimeResp[0:nCoeff],
np.zeros(len(iTimeResp) - nCoeff)])
boxcar_window = np.concatenate([boxcar_window[nCoeff:],
np.zeros(len(iTimeRespTrunc) - nCoeff)])
boxcar_window = boxcar_window * iTimeRespTrunc
triang_window = np.concatenate([triang_window[nCoeff:],
np.zeros(len(iTimeRespTrunc) - nCoeff)])
triang_window = triang_window * iTimeRespTrunc
cosine_window = np.concatenate([cosine_window[nCoeff:],
np.zeros(len(iTimeRespTrunc) - nCoeff)])
cosine_window = cosine_window * iTimeRespTrunc
blackmanharris_window = np.concatenate([blackmanharris_window[nCoeff:],
np.zeros(len(iTimeRespTrunc) - nCoeff)])
blackmanharris_window = blackmanharris_window * iTimeRespTrunc
hamming_window = np.concatenate([hamming_window[nCoeff:],
np.zeros(len(iTimeRespTrunc) - nCoeff)])
hamming_window = hamming_window * iTimeRespTrunc
# get frequency response of windowed responses...
iFreqResp_boxcar = np.fft.fft(np.real(boxcar_window),len(boxcar_window))
iFreqResp_triang = np.fft.fft(np.real(triang_window),len(triang_window))
iFreqResp_cosine = np.fft.fft(np.real(cosine_window),len(cosine_window))
iFreqResp_blackmanharris = np.fft.fft(np.real(blackmanharris_window),len(blackmanharris_window))
iFreqResp_hamming = np.fft.fft(np.real(hamming_window),len(hamming_window))
# a range of frequencies
freqRange = np.linspace(0,fs/2,len(hamming_window))
plt.figure(figsize=[16,9])
plt.subplot(2, 1, 1)
plt.title('Impulse Response [Truncated]')
plt.plot(tRange,np.real(boxcar_window), label='boxcar')
plt.plot(tRange,np.real(triang_window), label='triangle')
plt.plot(tRange,np.real(cosine_window), label='cosine')
plt.plot(tRange,np.real(blackmanharris_window), label='blackman-harris')
plt.plot(tRange,np.real(hamming_window), label='hamming')
plt.ylabel('Magnitude')
plt.ylim([min(boxcar_window),max(boxcar_window)])
plt.xlabel('Time [Samples]')
plt.xlim([0, len(tRange[0:nCoeff])])
plt.legend()
plt.grid()
plt.subplot(2, 1, 2)
plt.title('Frequency Response [Truncated]')
plt.semilogx(freqRange,iFreqResp_boxcar, label='boxcar')
plt.semilogx(freqRange,iFreqResp_triang, label='triangle')
plt.semilogx(freqRange,iFreqResp_cosine, label='cosine')
plt.semilogx(freqRange,iFreqResp_blackmanharris, label='blackman-harris')
plt.semilogx(freqRange,iFreqResp_hamming, label='hamming')
plt.ylabel('Magnitude')
#plt.ylim([-0.5,1.5])
plt.xlabel('Frequency [Hz]')
plt.xlim([20, 20000])
plt.grid()
plt.tight_layout()
plt.savefig('FilterWindows_64Coefficient.png', bbox_inches="tight")
# print coefficients...
print('boxcar \t', np.real(boxcar_window[0:nCoeff]) )
print('triangle \t', np.real(triang_window[0:nCoeff]) )
print('cosine \t', np.real(cosine_window[0:nCoeff]) )
print('blackman-harris \t', np.real(blackmanharris_window[0:nCoeff]) )
print('hamming_window \t', np.real(hamming_window[0:nCoeff]) ) | 3.3125 | 3 |
curricula/serializers.py | code-dot-org/curriculumbuilder | 3 | 12762588 | <filename>curricula/serializers.py
import json
from rest_framework import serializers
from curricula.models import Unit, Chapter, Curriculum
from lessons.models import Lesson, Activity, Resource, Vocab, Annotation, Standard, Block
from documentation.models import Block
"""
Curriculum serializers
"""
class ResourceSerializer(serializers.ModelSerializer):
html = serializers.SerializerMethodField()
class Meta:
model = Resource
fields = ('name', 'type', 'url', 'html')
def get_html(self, obj):
if self.context.get('with_html') and obj.gd:
return obj.gd_html()
class VocabSerializer(serializers.ModelSerializer):
class Meta:
model = Vocab
fields = ('word', 'simpleDef', 'detailDef')
class BlockSerializer(serializers.ModelSerializer):
url = serializers.SerializerMethodField()
class Meta:
model = Block
fields = ('title', 'syntax', 'url')
def get_url(self, obj):
return obj.get_published_url()
class LessonSerializer(serializers.ModelSerializer):
teacher_desc = serializers.SerializerMethodField()
student_desc = serializers.SerializerMethodField()
teacher_resources = serializers.SerializerMethodField()
student_resources = serializers.SerializerMethodField()
vocab = serializers.SerializerMethodField()
blocks = serializers.SerializerMethodField()
class Meta:
model = Lesson
fields = ('title', 'number', 'student_desc', 'teacher_desc',
'student_resources', 'teacher_resources', 'vocab', 'blocks')
def get_teacher_desc(self, obj):
return obj.overview
def get_student_desc(self, obj):
return obj.description
def get_teacher_resources(self, obj):
resources = obj.resources.filter(student=False)
serializer = ResourceSerializer(resources, many=True, context=self.context)
return serializer.data
def get_student_resources(self, obj):
resources = obj.resources.filter(student=True)
serializer = ResourceSerializer(resources, many=True, context=self.context)
return serializer.data
def get_vocab(self, obj):
vocab = obj.vocab.all()
serializer = VocabSerializer(vocab, many=True)
return serializer.data
def get_blocks(self, obj):
blocks = obj.blocks.all()
serializer = BlockSerializer(blocks, many=True)
return serializer.data
class UnitSerializer(serializers.ModelSerializer):
teacher_desc = serializers.SerializerMethodField()
student_desc = serializers.SerializerMethodField()
lessons = serializers.SerializerMethodField()
class Meta:
model = Unit
fields = ('title', 'number', 'slug', 'unit_name', 'student_desc', 'teacher_desc', 'lessons')
def get_teacher_desc(self, obj):
return obj.content
def get_student_desc(self, obj):
return obj.description
def get_lessons(self, obj):
lessons = obj.lessons
serializer = LessonSerializer(lessons, many=True, context=self.context)
return serializer.data
class CurriculumSerializer(serializers.ModelSerializer):
units = serializers.SerializerMethodField()
teacher_desc = serializers.SerializerMethodField()
student_desc = serializers.SerializerMethodField()
class Meta:
model = Curriculum
fields = ('title', 'slug', 'student_desc', 'teacher_desc', 'units')
def get_units(self, obj):
units = obj.units
serializer = UnitSerializer(units, many=True, context=self.context)
return serializer.data
def get_teacher_desc(self, obj):
return obj.content
def get_student_desc(self, obj):
return obj.description
"""
Export serializers
"""
class UnitExportSerializer(serializers.ModelSerializer):
chapters = serializers.SerializerMethodField()
lessons = serializers.SerializerMethodField()
student_desc = serializers.SerializerMethodField()
teacher_desc = serializers.SerializerMethodField()
class Meta:
model = Unit
fields = ('title', 'number', 'slug', 'unit_name', 'show_calendar', 'student_desc', 'teacher_desc', 'chapters', 'lessons')
def get_teacher_desc(self, obj):
return obj.content
def get_student_desc(self, obj):
return obj.description
# A lesson could either appear directly in lessons, or nested within chapters.
# In 2020 courses, CSF and CSD use chapters but CSP does not.
def get_chapters(self, obj):
chapters = obj.chapters
serializer = ChapterExportSerializer(chapters, many=True, context=self.context)
return serializer.data
def get_lessons(self, obj):
# Only include lessons which are direct children of this unit, omitting any
# children of chapters in this unit.
lessons = Lesson.objects.filter(parent__unit=obj)
serializer = LessonExportSerializer(lessons, many=True, context=self.context)
return serializer.data
class ChapterExportSerializer(serializers.ModelSerializer):
lessons = serializers.SerializerMethodField()
class Meta:
model = Chapter
fields = ('title', 'number', 'questions', 'description', 'lessons')
def get_lessons(self, obj):
lessons = obj.lessons
serializer = LessonExportSerializer(lessons, many=True, context=self.context)
return serializer.data
class LessonExportSerializer(serializers.ModelSerializer):
teacher_desc = serializers.SerializerMethodField()
student_desc = serializers.SerializerMethodField()
activities = serializers.SerializerMethodField()
resources = serializers.SerializerMethodField()
vocab = serializers.SerializerMethodField()
blocks = serializers.SerializerMethodField()
objectives = serializers.SerializerMethodField()
standards = serializers.SerializerMethodField()
stage_name = serializers.SerializerMethodField()
creative_commons_license = serializers.SerializerMethodField()
class Meta:
model = Lesson
fields = ('title', 'number', 'student_desc', 'teacher_desc', 'activities', 'resources',
'vocab', 'blocks', 'objectives', 'standards', 'code_studio_url', 'stage_name',
'prep', 'cs_content', 'creative_commons_license', 'assessment')
def get_teacher_desc(self, obj):
return obj.overview
def get_student_desc(self, obj):
return obj.description
def get_activities(self, obj):
activities = obj.activity_set.iterator()
serializer = ActivityExportSerializer(activities, many=True, context=self.context)
return serializer.data
def get_resources(self, obj):
resources = obj.resources
serializer = ResourceExportSerializer(resources, many=True, context=self.context)
return serializer.data
def get_vocab(self, obj):
vocab = obj.vocab.all()
serializer = VocabExportSerializer(vocab, many=True, context=self.context)
return serializer.data
def get_blocks(self, obj):
blocks = obj.blocks.all()
serializer = BlockExportSerializer(blocks, many=True, context=self.context)
return serializer.data
def get_objectives(self, obj):
return obj.objective_set.values('name')
def get_standards(self, obj):
standards = obj.standards.all()
serializer = StandardSerializer(standards, many=True, context=self.context)
return serializer.data
def get_stage_name(self, obj):
if obj.stage:
return obj.stage['stageName']
return ''
def get_creative_commons_license(self, obj):
img_to_license = {
'img/creativeCommons-by-nc-sa.png': 'Creative Commons BY-NC-SA',
'img/creativeCommons-by-nc-nd.png': 'Creative Commons BY-NC-ND'
}
return img_to_license[obj.creative_commons_image]
class ActivityExportSerializer(serializers.ModelSerializer):
duration = serializers.SerializerMethodField()
class Meta:
model = Activity
fields = ('name', 'duration', 'content')
def get_duration(self, obj):
return obj.time
class ResourceExportSerializer(serializers.ModelSerializer):
class Meta:
model = Resource
fields = ('name', 'type', 'student', 'gd', 'url', 'dl_url', 'slug')
class VocabExportSerializer(serializers.ModelSerializer):
class Meta:
model = Vocab
fields = ('word', 'simpleDef', 'detailDef')
class BlockExportSerializer(serializers.ModelSerializer):
parent_slug = serializers.SerializerMethodField()
class Meta:
model = Block
fields = ('slug', 'parent_slug')
def get_parent_slug(self, obj):
return obj.parent_ide.slug
"""
Standards serializers
"""
class UnitLessonsSerializer(serializers.ModelSerializer):
lessons = serializers.SerializerMethodField()
class Meta:
model = Unit
fields = ('unit_name', 'lessons')
def get_lessons(self, obj):
lessons = obj.lessons
serializer = LessonStandardsSerializer(lessons, many=True, context=self.context)
return serializer.data
class LessonStandardsSerializer(serializers.ModelSerializer):
standards = serializers.SerializerMethodField()
class Meta:
model = Lesson
fields = ('title', 'number', 'standards')
def get_standards(self, obj):
standards = obj.standards.all()
serializer = StandardSerializer(standards, many=True)
return serializer.data
class StandardSerializer(serializers.ModelSerializer):
framework = serializers.SerializerMethodField()
class Meta:
model = Standard
fields = ('shortcode', 'framework')
def get_framework(self, obj):
return obj.framework.slug
"""
Annotation serializers
"""
class Range():
def __init__(self, start, end, startOffset, endOffset):
self.start = start
self.end = end
self.startOffset = startOffset
self.endOffset = endOffset
class RangeSerializer(serializers.BaseSerializer):
start = serializers.CharField(max_length=50)
end = serializers.CharField(max_length=50)
startOffset = serializers.IntegerField()
endOffset = serializers.IntegerField()
def to_representation(self, obj):
ranges = Range(obj[0], obj[1], obj[2], obj[3])
return [ranges.__dict__]
def to_internal_value(self, data):
try:
start = data[0]['start']
end = data[0]['end']
startOffset = data[0]['startOffset']
endOffset = data[0]['endOffset']
return Range(start, end, startOffset, endOffset)
except AttributeError:
return AttributeError
'''
class AnnotationSerializer(serializers.ModelSerializer):
class Meta:
model = Annotation
fields = ('pk', 'owner', 'lesson', 'annotator_schema_version', 'text', 'quote', 'uri', 'range_start',
'range_end', 'range_startOffset', 'range_endOffset')
'''
class AnnotationSerializer(serializers.Serializer):
id = serializers.CharField(label="id", required=False)
annotator_schema_version = serializers.CharField(max_length=8, allow_blank=True, required=False)
created = serializers.CharField(allow_blank=True, required=False)
updated = serializers.CharField(source='modified', allow_blank=True, required=False)
text = serializers.CharField()
quote = serializers.CharField()
uri = serializers.CharField(max_length=100, min_length=None, allow_blank=True, required=False)
ranges = RangeSerializer()
user = serializers.CharField(source='owner', label='user', required=False)
lesson = serializers.CharField()
def update(self, instance, validated_data):
instance.annotator_schema_version = validated_data.get('annotator_schema_version',
instance.annotator_schema_version)
instance.text = validated_data.get('text', instance.text)
instance.quote = validated_data.get('quote', instance.quote)
instance.uri = validated_data.get('uri', instance.uri)
# Unpacking the ranges dict into 4 fields in instance.
try:
ranges = validated_data['ranges']
instance.range_start = ranges.start
instance.range_end = ranges.end
instance.range_startOffset = ranges.startOffset
instance.range_endOffset = ranges.endOffset
except KeyError:
print "No ranges array passed to AnnotationSerializer."
instance.save()
return instance
def create(self, validated_data):
annotation = dict()
annotation['owner'] = self.context['request'].user
annotation['quote'] = validated_data.get('quote')
annotation['text'] = validated_data.get('text')
annotation['uri'] = validated_data.get('uri')
annotation['lesson'] = Lesson.objects.get(pk=validated_data.get('lesson'))
# Unpacking the ranges dict into 4 fields in instance.
try:
ranges = validated_data['ranges']
annotation['range_start'] = ranges.start
annotation['range_end'] = ranges.end
annotation['range_startOffset'] = ranges.startOffset
annotation['range_endOffset'] = ranges.endOffset
except KeyError:
print "No ranges array passed to AnnotationSerializer."
return Annotation.objects.create(**annotation)
| 2.796875 | 3 |
src/helpers.py | limeunhee/bike-demand-prediction | 1 | 12762589 | from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
import numpy as np
def cross_val(estimator, X_train, y_train, nfolds):
''' Takes an instantiated model (estimator) and returns the average
mean square error (mse) and coefficient of determination (r2) from
kfold cross-validation.
Parameters: estimator: model object
X_train: 2d numpy array
y_train: 1d numpy array
nfolds: the number of folds in the kfold cross-validation
Returns: mse: average mean_square_error of model over number of folds
r2: average coefficient of determination over number of folds
There are many possible values for scoring parameter in cross_val_score.
http://scikit-learn.org/stable/modules/model_evaluation.html#scoring-parameter
kfold is easily parallelizable, so set n_jobs = -1 in cross_val_score
'''
mse = cross_val_score(estimator, X_train, y_train,
scoring='neg_mean_squared_error',
cv=nfolds, n_jobs=-1) * -1
# mse multiplied by -1 to make positive
mean_mse = np.sqrt(mse.mean())
name = estimator.__class__.__name__
print("{0:<25s} Train CV | RMSLE: {1:0.3f} ".format(name,
mean_mse))
return mean_mse
def stage_score_plot(estimator, X_train, y_train, X_test, y_test):
'''
Parameters: estimator: GradientBoostingRegressor or AdaBoostRegressor
X_train: 2d numpy array
y_train: 1d numpy array
X_test: 2d numpy array
y_test: 1d numpy array
Returns: A plot of the number of iterations vs the MSE for the model for
both the training set and test set.
'''
estimator.fit(X_train, y_train)
name = estimator.__class__.__name__.replace('Regressor', '')
learn_rate = estimator.learning_rate
# initialize
train_scores = np.zeros((estimator.n_estimators,), dtype=np.float64)
test_scores = np.zeros((estimator.n_estimators,), dtype=np.float64)
# Get train score from each boost
for i, y_train_pred in enumerate(estimator.staged_predict(X_train)):
train_scores[i] = mean_squared_error(y_train, y_train_pred)
# Get test score from each boost
for i, y_test_pred in enumerate(estimator.staged_predict(X_test)):
test_scores[i] = mean_squared_error(y_test, y_test_pred)
fig, ax = plt.subplots(figsize = (8,10))
plt.plot(np.sqrt(train_scores), alpha=.5, label="{0} Train - learning rate {1}".format(
name, learn_rate))
plt.plot(np.sqrt(test_scores), alpha=.5, label="{0} Test - learning rate {1}".format(
name, learn_rate), ls='--')
plt.title(name, fontsize=16, fontweight='bold')
plt.ylabel('RMSLE', fontsize=14)
plt.xlabel('Iterations', fontsize=14)
return
def rf_score_plot(randforest, X_train, y_train, X_test, y_test):
'''
Parameters: randforest: RandomForestRegressor
X_train: 2d numpy array
y_train: 1d numpy array
X_test: 2d numpy array
y_test: 1d numpy array
Returns: The prediction of a random forest regressor on the test set
'''
randforest.fit(X_train, y_train)
y_test_pred = randforest.predict(X_test)
test_score = np.sqrt(mean_squared_error(y_test, y_test_pred))
plt.axhline(test_score, alpha = 0.7, c = 'grey', lw=1, ls='-.', label =
'Random Forest Test')
def gridsearch_with_output(estimator, parameter_grid, X_train, y_train):
'''
Parameters: estimator: the type of model (e.g. RandomForestRegressor())
paramter_grid: dictionary defining the gridsearch parameters
X_train: 2d numpy array
y_train: 1d numpy array
Returns: best parameters and model fit with those parameters
'''
model_gridsearch = GridSearchCV(estimator,
parameter_grid,
verbose=True,
n_jobs=-1,
scoring='neg_mean_squared_error')
model_gridsearch.fit(X_train, y_train)
best_params = model_gridsearch.best_params_
model_best = model_gridsearch.best_estimator_
print("\nResult of gridsearch:")
print("{0:<20s} | {1:<8s} | {2}".format("Parameter", "Optimal", "Gridsearch values"))
print("-" * 55)
for param, vals in parameter_grid.items():
print("{0:<20s} | {1:<8s} | {2}".format(str(param),
str(best_params[param]),
str(vals)))
return best_params, model_best
def display_default_and_gsearch_model_results(model_default, model_gridsearch,
X_test, y_test):
'''
Parameters: model_default: fit model using initial parameters
model_gridsearch: fit model using parameters from gridsearch
X_test: 2d numpy array
y_test: 1d numpy array
Return: None, but prints out mse and r2 for the default and model with
gridsearched parameters
'''
name = model_default.__class__.__name__.replace('Regressor', '') # for printing
y_test_pred = model_gridsearch.predict(X_test)
mse = np.sqrt(mean_squared_error(y_test, y_test_pred))
print("Results for {0}".format(name))
print("Gridsearched model rmlse: {0:0.3f})".format(mse))
y_test_pred = model_default.predict(X_test)
mse = np.sqrt(mean_squared_error(y_test, y_test_pred))
print(" Default model rmsle: {0:0.3f}".format(mse))
| 2.84375 | 3 |
graphormer/data/__init__.py | shawnwang-tech/Graphormer | 858 | 12762590 | DATASET_REGISTRY = {}
def register_dataset(name: str):
def register_dataset_func(func):
DATASET_REGISTRY[name] = func()
return register_dataset_func
| 1.921875 | 2 |
models/kp2uv_model.py | google/retiming | 152 | 12762591 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from third_party.models.base_model import BaseModel
from . import networks
class Kp2uvModel(BaseModel):
"""This class implements the keypoint-to-UV model (inference only)."""
@staticmethod
def modify_commandline_options(parser, is_train=True):
parser.set_defaults(dataset_mode='kpuv')
return parser
def __init__(self, opt):
"""Initialize this model class.
Parameters:
opt -- test options
"""
BaseModel.__init__(self, opt)
self.visual_names = ['keypoints', 'output_uv']
self.model_names = ['Kp2uv']
self.netKp2uv = networks.define_kp2uv(gpu_ids=self.gpu_ids)
self.isTrain = False # only test mode supported
# Our program will automatically call <model.setup> to define schedulers, load networks, and print networks
def set_input(self, input):
"""Unpack input data from the dataloader.
Parameters:
input: a dictionary that contains the data itself and its metadata information.
"""
self.keypoints = input['keypoints'].to(self.device)
self.image_paths = input['path']
def forward(self):
"""Run forward pass. This will be called by <test>."""
output = self.netKp2uv.forward(self.keypoints)
self.output_uv = self.output2rgb(output)
def output2rgb(self, output):
"""Convert network outputs to RGB image."""
pred_id, pred_uv = output
_, pred_id_class = pred_id.max(1)
pred_id_class = pred_id_class.unsqueeze(1)
# extract UV from pred_uv (48 channels); select based on class ID
selected_uv = -1 * torch.ones(pred_uv.shape[0], 2, pred_uv.shape[2], pred_uv.shape[3], device=pred_uv.device)
for partid in range(1, 25):
mask = (pred_id_class == partid).float()
selected_uv *= (1. - mask)
selected_uv += mask * pred_uv[:, (partid - 1) * 2:(partid - 1) * 2 + 2]
pred_uv = selected_uv
rgb = torch.cat([pred_id_class.float() * 10 / 255. * 2 - 1, pred_uv], 1)
return rgb
def optimize_parameters(self):
pass
| 2.328125 | 2 |
extra/cda/cachegen.py | heinsm/qira | 2,056 | 12762592 | #!/usr/bin/env python2.7
import os
import sys
import cda_config
basedir = os.path.dirname(os.path.realpath(__file__))
#sys.path.append(basedir+"/clang/llvm/tools/clang/bindings/python")
import clang.cindex as ci
ci.Config.set_library_file(cda_config.LIBCLANG_PATH)
import pickle
from clang.cindex import CursorKind
import json
from hashlib import sha1
# debug
DEBUG = 0
# cache generated
file_cache = {}
object_cache = {}
xref_cache = {}
# a single index for the runtime of the server
index = ci.Index.create()
def parse_node(node, d, filename, care):
#print node.location.file
if node.location.file != None and str(node.location.file) != filename:
return
ref = node.referenced
if type(ref) != type(None):
usr = ref.get_usr()
#print " "*d, node.kind, node.spelling, node.displayname, node.location, node.extent.start.offset, node.extent.end.offset, node.get_usr(), "****", ref.spelling, ref.location, ref.get_usr()
else:
usr = None
if DEBUG == 1:
print " "*d, node.kind, node.spelling, node.displayname, node.location, node.location.offset, node.extent.start.offset, node.extent.end.offset, usr
"""
if DEBUG == 1:
print " "*d, node.kind, node.spelling, node.displayname, node.location, node.location.offset, node.extent.start.offset, node.extent.end.offset, usr
"""
#print dir(node)
"""
print ref, node.get_usr()
print ref.location
for i in deff:
print i
"""
klass = str(node.kind).split('.')[-1]
(start, end) = (None, None)
if node.kind in [CursorKind.STRING_LITERAL, CursorKind.INTEGER_LITERAL, CursorKind.TYPE_REF, CursorKind.TEMPLATE_REF]:
#if node.kind in [CursorKind.STRING_LITERAL, CursorKind.TYPE_REF, CursorKind.TEMPLATE_REF]:
start = node.extent.start.offset
end = node.extent.end.offset
elif node.kind in [CursorKind.FUNCTION_DECL, CursorKind.FUNCTION_TEMPLATE, CursorKind.VAR_DECL, CursorKind.CLASS_DECL, CursorKind.CXX_METHOD, CursorKind.CLASS_TEMPLATE, CursorKind.PARM_DECL]:
start = node.location.offset
end = node.location.offset + len(node.spelling)
elif node.kind in [CursorKind.MEMBER_REF_EXPR]:
#print node.location.offset, node.extent.start.offset, node.extent.end.offset
if node.location.offset != 0:
start = node.location.offset
else:
start = node.extent.start.offset
end = node.extent.end.offset
#end = node.location.offset + len(node.displayname)
elif node.kind in [CursorKind.DECL_REF_EXPR]:
start = node.location.offset
end = node.extent.end.offset
if end != None:
care.append((start, end, klass, usr))
if end != None and usr != None and node.location.line > 0:
newval = filename+"#"+str(node.location.line)
if node.is_definition():
# defining the object
if usr in object_cache:
object_cache[usr].append(newval)
else:
object_cache[usr] = [newval]
else:
# xref
if usr in xref_cache:
xref_cache[usr].append(newval)
else:
xref_cache[usr] = [newval]
# link here is good
for child in node.get_children():
parse_node(child, d+1, filename, care)
def parse_file(filename, args=[]):
# traversal attack
exargs = ["-I", cda_config.CLANG_INCLUDES]
tu = index.parse(filename, args=exargs+args)
# bad shit happened
bad = False
for m in tu.diagnostics:
if m.severity >= 3:
print m
bad = True
if bad == True:
#raise Exception("parsing issue")
print "parsing issue"
# extract the things we care about
care = []
parse_node(tu.cursor, 0, filename, care)
care = sorted(care)
# get file data
rdat = open(filename).read()
return (care, rdat)
def parse_files(files, args=[]):
# for unbuilt clang
for fn in files:
print "CDA: caching",fn
try:
file_cache[fn] = parse_file(fn, args)
except Exception as e:
print "CDA: error on",fn,":",e
dat = (object_cache, file_cache, xref_cache)
return dat
| 2.1875 | 2 |
config.py | t1mosha/Helper5TGBot | 0 | 12762593 | <reponame>t1mosha/Helper5TGBot
token = "<PASSWORD>"
admins = [admin_id]
api_id = 2040
api_hash = "b18441a1ff607e10a989891a5462e627"
| 0.960938 | 1 |
write/models.py | power3247/project3 | 1 | 12762594 | from django.db import models
# Create your models here.
# class Essay(models.Model):
# score = models.IntegerField()
# essayA = models.TextField()
# essayQ = models.TextField()
# name = models.CharField(max_length=20)
#
# def __str__(self):
# return self.score
class choice(models.Model):
제목 = models.TextField()
점수 = models.IntegerField()
질문 = models.TextField()
답변 = models.TextField()
class lotto_data(models.Model):
a = models.DecimalField(max_digits=20,decimal_places=20)
b = models.DecimalField(max_digits=20,decimal_places=20)
c = models.DecimalField(max_digits=20,decimal_places=20)
d = models.DecimalField(max_digits=20,decimal_places=20)
e = models.DecimalField(max_digits=20,decimal_places=20)
f = models.DecimalField(max_digits=20,decimal_places=20)
g = models.DecimalField(max_digits=20,decimal_places=20)
h = models.DecimalField(max_digits=20,decimal_places=20)
i = models.DecimalField(max_digits=20,decimal_places=20)
j = models.DecimalField(max_digits=20,decimal_places=20)
k = models.DecimalField(max_digits=20,decimal_places=20)
l = models.DecimalField(max_digits=20,decimal_places=20)
m = models.DecimalField(max_digits=20,decimal_places=20)
| 2.515625 | 3 |
components/collector/tests/unittests/collectors/test_openvas.py | Hedde/quality-time | 0 | 12762595 | """Unit tests for the OpenVAS source."""
from datetime import datetime, timezone
import unittest
from unittest.mock import Mock, patch
from src.collector import MetricCollector
class OpenVASTest(unittest.TestCase):
"""Unit tests for the OpenVAS metrics."""
def setUp(self):
self.mock_response = Mock()
self.sources = dict(source_id=dict(type="openvas", parameters=dict(url="http://openvas.xml")))
def test_warnings(self):
"""Test that the number of warnings is returned."""
self.mock_response.text = """<?xml version="1.0"?>
<report>
<results>
<result id="id">
<name>Name</name>
<description>Description</description>
<threat>Low</threat>
<host>1.2.3.4</host>
<port>80/tcp</port>
</result>
</results>
</report>"""
metric = dict(type="security_warnings", addition="sum", sources=self.sources)
with patch("requests.get", return_value=self.mock_response):
response = MetricCollector(metric).get()
self.assertEqual(
[dict(key="id", severity="Low", name="Name", description="Description", host="1.2.3.4", port="80/tcp")],
response["sources"][0]["entities"])
self.assertEqual("1", response["sources"][0]["value"])
def test_source_up_to_dateness(self):
"""Test that the report age in days is returned."""
self.mock_response.text = """
<report extension="xml" type="scan" content_type="text/xml">
<name>2019-04-09T17:56:14Z</name>
<creation_time>2019-04-09T17:56:14Z</creation_time>
<modification_time>2019-04-09T18:05:40Z</modification_time>
</report>"""
metric = dict(type="source_up_to_dateness", addition="max", sources=self.sources)
with patch("requests.get", return_value=self.mock_response):
response = MetricCollector(metric).get()
expected_age = (datetime.now(timezone.utc) - datetime(2019, 4, 9, 17, 56, 14, tzinfo=timezone.utc)).days
self.assertEqual(str(expected_age), response["sources"][0]["value"])
| 2.96875 | 3 |
module6/13 BBS&BLOG/blog/blog_app01/urls.py | Strugglingrookie/oldboy2 | 1 | 12762596 | from django.urls import path
from blog_app01 import views
urlpatterns = [
path('index/', views.index),
path('login/', views.login),
path('regist/', views.regist),
path('valid_img/', views.valid_img),
]
| 1.671875 | 2 |
mnist/DecoyMNIST/00_make_data.py | laura-rieger/deep-explanation-penalization | 105 | 12762597 | <reponame>laura-rieger/deep-explanation-penalization
import torch
import torchvision
import torchvision.datasets as datasets
import sys
import numpy as np
import torch.utils.data as utils
from colour import Color
from os.path import join as oj
mnist_trainset = datasets.MNIST(root='../data', train=True, download=True, transform=None)
color_x = np.zeros((60000, 1, 28, 28))
color_x = mnist_trainset.data[:, None].numpy().astype(np.float32)
color_y = mnist_trainset.targets.numpy().copy()
choice_1 = np.random.choice(2, size = len(color_x))*23
choice_2 = np.random.choice(2, size = len(color_x))*23
for i in range(len(color_x)):
color_x[i, :, choice_1[i]:choice_1[i]+5, choice_2[i]:choice_2[i]+5] = 255- 25*color_y[i]
color_x /=color_x.max()
color_x = color_x*2 -1
np.save(oj("../../data/ColorMNIST", "train_x_decoy.npy"), color_x)
from os.path import join as oj
mnist_trainset = datasets.MNIST(root='../data', train=False, download=True, transform=None)
color_x = np.zeros((len(mnist_trainset.data), 1, 28, 28))
color_x = mnist_trainset.data[:, None].numpy().astype(np.float32)
color_y = mnist_trainset.targets.numpy().copy()
choice_1 = np.random.choice(2, size = len(color_x))*23
choice_2 = np.random.choice(2, size = len(color_x))*23
for i in range(len(color_x)):
color_x[i, :, choice_1[i]:choice_1[i]+5, choice_2[i]:choice_2[i]+5] = 0+ 25*color_y[i]
color_x /=color_x.max()
color_x = color_x*2 -1
np.save(oj("../data/ColorMNIST", "test_x_decoy.npy"), color_x)
| 2.5625 | 3 |
bot/cogs/__init__.py | Ankit404butfound/PyWhatKit_Discord_Bot | 5 | 12762598 | <reponame>Ankit404butfound/PyWhatKit_Discord_Bot
__all__ = ["admin", "docs", "extras"]
| 0.929688 | 1 |
zipper.py | mas250/Python3 | 1 | 12762599 | a = [3, 4, 5, 6]
b = ['a', 'b', 'c', 'd']
def zipper(a,b):
newZip = []
if len (a) == len(b):
for i in range(len(a)):
newZip.append([a[i], b[i]])
print (newZip)
if len(a)!= len(b):
print("lists do not match")
zipper(a,b)
| 4.0625 | 4 |
cride/frontend/views.py | alexhernandez-git/cride-frontend | 0 | 12762600 | <reponame>alexhernandez-git/cride-frontend<gh_stars>0
from django.shortcuts import render
# Create your views here.
from django.views import View
def index(request):
return render(request, 'index.html')
| 1.554688 | 2 |
mcdc_tnt/pyk_kernels/all/sample_event.py | jpmorgan98/MCDC-TNT-2 | 1 | 12762601 | """
Name: SampleEvent
breif: Samples events for particles provided in a phase space for MCDC-TNT
Author: <NAME> (OR State Univ - <EMAIL>) CEMeNT
Date: Dec 2nd 2021
"""
import numpy as np
import pykokkos as pk
@pk.workload
class SampleEvent:
def __init__(self, p_mesh_cell, p_alive, mesh_cap_xsec, mesh_scat_xsec, mesh_fis_xsec, scatter_event_index, capture_event_index, fission_event_index, num_part, nu_new_neutrons, rands, clever_out):
self.p_mesh_cell: pk.View1D[int] = p_mesh_cell
self.p_alive: pk.View1D[int] = p_alive
self.mesh_cap_xsec: pk.View1D[pk.double] = mesh_cap_xsec
self.mesh_scat_xsec: pk.View1D[pk.double] = mesh_scat_xsec
self.mesh_fis_xsec: pk.View1D[pk.double] = mesh_fis_xsec
self.scatter_event_index: pk.View1D[int] = scatter_event_index
self.capture_event_index: pk.View1D[int] = capture_event_index
self.fission_event_index: pk.View1D[int] = fission_event_index
self.num_part: int = num_part
self.nu_new_neutrons: int = num_part
self.rands: pk.View1D[pk.double] = rands
self.fissions_to_add: int = 0
self.scat_count: int = 0
self.cap_count: int = 0
self.fis_count: int = 0
self.killed: int = 0
self.clever_out: pk.View1D[int] = clever_out
#print('made it through init!')
@pk.main
def run(self):
for i in range(self.num_part):
#normalize cross sections in each mesh cell
total_scat_xsec: pk.double = self.mesh_scat_xsec[self.p_mesh_cell[i]] + self.mesh_cap_xsec[self.p_mesh_cell[i]] + self.mesh_fis_xsec[self.p_mesh_cell[i]]
mesh_scat_xsec_temp: pk.double = self.mesh_scat_xsec[self.p_mesh_cell[i]] / total_scat_xsec
mesh_cap_xsec_temp: pk.double = self.mesh_cap_xsec[self.p_mesh_cell[i]] / total_scat_xsec
mesh_fis_xsec_temp: pk.double = self.mesh_fis_xsec[self.p_mesh_cell[i]] / total_scat_xsec
#pk.printf('%d %d %d\n ',self.scat_count, self.cap_count, self.fis_count)
if self.p_alive[i] == 1:
event_rand:pk.double = self.rands[i]
#scatter?
if event_rand < mesh_scat_xsec_temp:
self.scatter_event_index[self.scat_count] = i
self.scat_count += 1
#pk.printf('had a scatter! %d\n', self.scat_count)
#capture?
elif mesh_scat_xsec_temp < event_rand and event_rand < mesh_scat_xsec_temp + mesh_cap_xsec_temp:
self.p_alive[i] = 0
self.killed += 1
self.capture_event_index[self.cap_count] = i
self.cap_count +=1
#pk.printf('had a capture! %d\n', self.cap_count)
#fission?
elif mesh_scat_xsec_temp + mesh_cap_xsec_temp < event_rand and event_rand < mesh_scat_xsec_temp + mesh_cap_xsec_temp + mesh_fis_xsec_temp:
self.p_alive[i] = 0
self.killed += 1
self.fissions_to_add += self.nu_new_neutrons
self.fission_event_index[self.fis_count] = i
self.fis_count += 1
#pk.printf('had a fission! %d\n', self.fis_count)
else:
pk.printf('Well shoot dang')
self.clever_out[0] = self.scat_count
self.clever_out[1] = self.cap_count
self.clever_out[2] = self.fis_count
def test_SampleEvent():
p_mesh_cell = np.array([0,1,0,5], dtype=np.int32)
p_alive = np.array([1,1,1,0], dtype=np.int32)
mesh_cap_xsec = 1/3*np.ones(2, dtype=float)
mesh_scat_xsec = 1/3*np.ones(2, dtype=float)
mesh_fis_xsec = 1/2*np.ones(2, dtype=float)
scatter_event_index = np.zeros(3, dtype=np.int32)
capture_event_index = np.zeros(3, dtype=np.int32)
fission_event_index = np.zeros(3, dtype=np.int32)
controled_rands = np.array([.2, .4, .8], dtype=float)
nu = 2
num_part = 3
p_mesh_cell = pk.from_numpy(p_mesh_cell)
p_alive = pk.from_numpy(p_alive)
mesh_cap_xsec = pk.from_numpy(mesh_cap_xsec)
mesh_scat_xsec = pk.from_numpy(mesh_scat_xsec)
mesh_fis_xsec = pk.from_numpy(mesh_fis_xsec)
scatter_event_index = pk.from_numpy(scatter_event_index)
capture_event_index = pk.from_numpy(capture_event_index)
fission_event_index = pk.from_numpy(fission_event_index)
controled_rands = pk.from_numpy(controled_rands)
clever_out = np.zeros(3, dtype=np.int32)
clever_out = pk.from_numpy(clever_out)
print("Running!")
pk.execute(pk.ExecutionSpace.OpenMP, SampleEvent(p_mesh_cell, p_alive, mesh_cap_xsec, mesh_scat_xsec, mesh_fis_xsec, scatter_event_index, capture_event_index, fission_event_index, num_part, nu, controled_rands, clever_out))
print('Made it through')
scat_count = clever_out[0]
cap_count = clever_out[1]
fis_count = clever_out[2]
print(scat_count)
assert (fis_count == 1)
assert (scat_count == 1)
assert (cap_count == 1)
assert (capture_event_index[0] == 1)
assert (fission_event_index[0] == 2)
assert (scatter_event_index[0] == 0)
if __name__ == '__main__':
test_SampleEvent()
| 2.1875 | 2 |
modules/apertium_wikistats.py | apertium/phenny | 7 | 12762602 | <gh_stars>1-10
#!/usr/bin/env python3
"""
apertium_wikistats.py - Phenny Apertium Wiki Stats Module
"""
import os
import requests
import shlex
import subprocess
import urllib.request
import urllib.error
from web import REQUEST_TIMEOUT
from tools import dot_path
PARENT_DIRECTORY = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
BOT = os.path.join(PARENT_DIRECTORY, 'apertium_wikistats_bot.py')
BOT_AUTOCOVERAGE = ('/bot_autocoverage.py', 'bot_autocoverage.py')
AUTOCOVERAGE = os.path.join(PARENT_DIRECTORY, 'autocoverage.py')
IS_COVERAGE_RUNNING = ''
def awikstats(phenny, input):
"""Issue commands to the Apertium Stem Counter Bot."""
if (not hasattr(phenny.config, 'stemCounterBotLogin') or
not hasattr(phenny.config, 'stemCounterBotPassword')):
phenny.say('Bot login/password needs to be set in configuration file (default.py).')
phenny.say('Keys are stemCounterBotLogin and stemCounterBotPassword.')
return
botLogin = phenny.config.stemCounterBotLogin
botPassword = <PASSWORD>.stemCounterBotPassword
try:
rawInput = input.group()
option = rawInput.split(' ')[1].strip()
except:
phenny.say('Invalid .awikstats command; try something like %s' % repr(awikstats.example_update))
return
if option == 'update':
try:
langs = ''.join(rawInput.split(' ')[2:]).split(',')
except:
phenny.say('Invalid .awikstats update command; try something like %s' % repr(awikstats.example_update))
return
commands = shlex.split('python3 %s %s "%s" dict -p %s -r "%s"' % (BOT, botLogin, botPassword, ' '.join(langs), input.nick))
process = subprocess.Popen(commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dot_path(''))
stdout, stderr = process.communicate()
for line in stderr.splitlines():
phenny.msg(input.nick, line)
elif option == 'coverage':
global IS_COVERAGE_RUNNING
if IS_COVERAGE_RUNNING == '':
try:
lang = rawInput.split(' ')[2].strip()
except:
phenny.say('Invalid .awikstats coverage command; try something like %s' % repr(awikstats.example_coverage))
return
try:
urllib.request.urlopen('https://wiki.apertium.org/wiki/Apertium-' + lang)
except urllib.error.HTTPError:
phenny.say('%s: No wiki for specified language!' % input.nick)
return
phenny.say('%s: Calculating coverage... It may take a while, I will inform you after it\'s completed.' % input.nick)
commands = shlex.split('python3 %s %s "%s" coverage -p %s -r "%s"' % (BOT, botLogin, botPassword, lang, input.nick))
IS_COVERAGE_RUNNING = lang
process = subprocess.Popen(commands, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=dot_path(''))
stdout, stderr = process.communicate()
IS_COVERAGE_RUNNING = ''
try:
out = stdout.splitlines()[-1].decode('utf-8').strip()
if out.startswith('Coverage:'):
phenny.msg(input.nick, '%s - https://wiki.apertium.org/wiki/Apertium-%s/stats' % (out, lang))
else:
for line in stderr.splitlines():
phenny.msg(input.nick, line)
except:
for line in stderr.splitlines():
phenny.msg(input.nick, line)
else:
phenny.say('%s: Sorry, there is already %s coverage running, try again after it\'s completed!' % (input.nick, IS_COVERAGE_RUNNING))
else:
phenny.say('Invalid .awikstats option: %s' % option)
return
awikstats.commands = ['awikstats']
awikstats.example_update = '.awikstats update tat, kaz, tat-kaz'
awikstats.example_coverage = '.awikstats coverage tyv'
awikstats.priority = 'high'
| 2.265625 | 2 |
hydragnn/models/CGCNNStack.py | pzhanggit/HydraGNN | 0 | 12762603 | <reponame>pzhanggit/HydraGNN<gh_stars>0
##############################################################################
# Copyright (c) 2021, Oak Ridge National Laboratory #
# All rights reserved. #
# #
# This file is part of HydraGNN and is distributed under a BSD 3-clause #
# license. For the licensing terms see the LICENSE file in the top-level #
# directory. #
# #
# SPDX-License-Identifier: BSD-3-Clause #
##############################################################################
import torch
import torch.nn.functional as F
from torch.nn import ModuleList
from torch_geometric.nn import CGConv, BatchNorm, global_mean_pool
from .Base import Base
class CGCNNStack(Base):
def __init__(
self,
edge_dim: int,
input_dim,
output_dim,
output_type,
config_heads,
**kwargs,
):
self.edge_dim = edge_dim
# CGCNN does not change embedding dimensions
# We use input dimension (first argument of base constructor)
# also as hidden dimension (second argument of base constructor)
# We therefore pass all required args explicitly.
super().__init__(
input_dim,
input_dim,
output_dim,
output_type,
config_heads,
**kwargs,
)
def get_conv(self, input_dim, _):
return CGConv(
channels=input_dim,
dim=self.edge_dim,
aggr="add",
batch_norm=False,
bias=True,
)
def _init_node_conv(self):
"""It overwrites _init_node_conv() in Base since purely convolutional layers in _init_node_conv() is not implemented yet.
Here it serves as a temporary place holder. Purely cgcnn conv is not feasible for node feature predictions with
arbitrary output dimensions, unless we combine it with mlp"""
# *******convolutional layers for node level predictions******* #
node_feature_ind = [
i for i, head_type in enumerate(self.head_type) if head_type == "node"
]
if len(node_feature_ind) == 0:
return
self.num_conv_layers_node = self.config_heads["node"]["num_headlayers"]
self.hidden_dim_node = self.config_heads["node"]["dim_headlayers"]
# fixme: CGConv layer alone will present the same out dimension with the input, instead of having different "in_channels" and "out_channels" as in the other conv layers;
# so to predict output node features with different dimensions from the input node feature's, CGConv can be
# combined with, e.g.,mlp
for ihead in range(self.num_heads):
if (
self.head_type[ihead] == "node"
and self.config_heads["node"]["type"] == "conv"
):
raise ValueError(
'"conv" for node features decoder part in CGCNN is not ready yet. Please set config["NeuralNetwork"]["Architecture"]["output_heads"]["node"]["type"] to be "mlp" or "mlp_per_node" in input file.'
)
def __str__(self):
return "CGCNNStack"
| 1.859375 | 2 |
graph_recsys_benchmark/nn/kgcn_conv.py | ecom-research/graph_recsys_benchmark | 1 | 12762604 | import torch
from torch.nn import Parameter
import torch.nn.functional as F
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.utils import remove_self_loops
from torch_geometric.nn.inits import glorot, zeros
class KGCNConv(MessagePassing):
def __init__(
self, in_channels, out_channels,
negative_slope=0.2, bias=True, **kwargs):
super(KGCNConv, self).__init__(aggr='add', **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.negative_slope = negative_slope
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
zeros(self.bias)
def forward(self, x, edge_index, att_map, size=None):
""""""
if size is None and torch.is_tensor(x):
edge_index, _ = remove_self_loops(edge_index)
return self.propagate(edge_index, size=size, x=x, att_map=att_map)
def message(self, x_j, att_map):
return x_j * att_map.view(-1, 1)
def update(self, aggr_out, x):
aggr_out = F.relu(torch.mm(aggr_out + x, self.weight) + self.bias)
return aggr_out
def __repr__(self):
return '{}({}, {}, heads={})'.format(self.__class__.__name__,
self.in_channels,
self.out_channels, self.heads)
| 2.25 | 2 |
tests/unit/test_tmux_installer.py | hallgrimur1471/drvn_installer | 0 | 12762605 | <gh_stars>0
# pylint: disable=no-self-use, protected-access
import drvn.installer.installers.tmux_installer as installer
import pytest
from unittest.mock import MagicMock, patch
class TestCalculateUrlToLatestReleaseTarball:
def test_normal(self):
calculated_url = installer._calculate_url_to_latest_release_tarball("2.9a")
assert (
calculated_url
== "https://github.com/tmux/tmux/releases/download/2.9a/tmux-2.9a.tar.gz"
)
@patch("drvn.installer._utils.open_url")
class TestFindUrlToLatestRelease:
def test_normal(self, open_url):
open_url.return_value = '{"tag_name":"3.0a"}'
assert "https" in installer._find_url_to_latest_release()
| 2.09375 | 2 |
ehc/unit.py | jamesbowman/ga144tools | 13 | 12762606 | <filename>ehc/unit.py
import sys
import time
import struct
import serial
import random
from ga144 import GA144
def trivial(load, send, recv):
load("trivial.ga")
for i in range(4):
send('NORTH', 100)
assert 100 == recv('EAST')
send('EAST', 200)
assert 200 == recv('SOUTH')
send('SOUTH', 300)
assert 300 == recv('WEST')
send('WEST', 400)
assert 400 == recv('NORTH')
def packer(load, send, recv):
load("packer.ga")
for x in (0, 511, 512, 513, 0x3ffff, 0x35555):
send("EAST", x >> 9)
send("EAST", x & 511)
assert recv("WEST") == x
def dryrun(load, send, recv):
# load("b01.ga", verbose = 1)
b = 0
while True:
load("b%02d.ga" % b)
if b == 2:
print recv('NORTH')
break
b = recv("WEST")
print 'next', b
# for i in range(8): print 'r%d ' % i, (recv("WEST"))
if __name__ == '__main__':
# v = draw.Viz(g.active())
# v.render("pictures/%s.png" % sys.argv[2])
ser = serial.Serial(sys.argv[1], 460800)
g = GA144()
def load1(sourcefile, verbose = 0):
ser.setRTS(0) # Reboot by dropping RTS
ser.setRTS(1)
g.__init__()
g.log = lambda a,b:None
g.loadprogram('fixture.ga')
g.node['508'].load(open(sourcefile).read())
if verbose:
print "\n".join(g.node['508'].listing)
print
ser.write(g.async())
ser.flush()
# print "\n".join(g.node['608'].listing)
def xfer(addr, din):
# print hex(din), addr
ser.write(g.sget([din, addr]))
s = ser.read(4)
(v, ) = struct.unpack("<I", s)
assert (v & 0xff) == 0xa5
return (v >> 8) & 0x3ffff
dirs = {
"OTHER" : 999,
"NORTH" : 608,
"EAST" : 509,
"SOUTH" : 408,
"WEST" : 507}
def send(node, din):
xfer(0x20000 | dirs[node], din)
def recv(node):
return xfer(dirs[node], 0)
t0 = time.time()
node1tests = [
trivial,
packer,
# dryrun,
]
for t in node1tests:
print t.__name__
t(load1, send, recv)
g.loadprogram('testram.ga')
ser.setRTS(0) # Reboot by dropping RTS
ser.setRTS(1)
ser.write(g.async())
ser.flush()
def rd(a):
send("OTHER", 0)
send("OTHER", a)
return recv("OTHER")
def wr(a, v):
send("OTHER", 1)
send("OTHER", a)
send("OTHER", v)
for a in range(5):
print hex(rd(a))
print
wr(3, 0x1234)
for a in range(5):
print hex(rd(a))
random.seed(0)
def r_w(aa, dd):
[wr(a, d) for a,d in zip(aa, dd)]
assert [rd(a) for a in aa] == dd
aa = [2 ** i for i in range(1, 18)]
dd = [random.getrandbits(16) for _ in aa]
r_w(aa, dd)
print "\n".join(g.node['008'].listing)
for i in xrange(10):
aa = random.sample(range(0, 2**18, 2), 10)
dd = [random.getrandbits(16) for _ in aa]
r_w(aa, dd)
def loadblk(dst, prg):
prg_s = []
for p in prg:
prg_s.append(p >> 9)
prg_s.append(p & 511)
d = [len(prg) - 1] + prg_s
for i,d in enumerate(d):
wr(2 * (256 * dst + i), d)
prgs = [
(0, [0xaa, 0x3ffff]),
(2, [random.getrandbits(18) for _ in range(127)]),
(1, [2**i for i in range(18)]),
(3, [random.getrandbits(18) for _ in range(127)]),
]
for bn,prg in prgs:
loadblk(bn, prg)
for bn,prg in prgs:
send("OTHER", 2)
send("OTHER", bn)
a = [recv("OTHER") for _ in prg]
assert prg == a
| 2.25 | 2 |
visual_studio/NativeClientVSAddIn/InstallerResources/NaCl/compiler_wrapper.py | sbc100/nativeclient-sdk | 6 | 12762607 | <reponame>sbc100/nativeclient-sdk
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file
"""Python wrapper around gcc to make it behave a little
more like cl.exe WRT to parallel building.
"""
import multiprocessing
import os
import Queue
import shlex
import subprocess
import sys
import threading
import time
verbose = int(os.environ.get('NACL_GCC_VERBOSE', '0'))
show_commands = int(os.environ.get('NACL_GCC_SHOW_COMMANDS', '0'))
stop_on_error = False
def RunGCC(cmd, basename):
"""Run gcc and return the result along will the stdout/stderr."""
cmdstring = subprocess.list2cmdline(cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
p.wait()
if show_commands:
logmsg = cmdstring
else:
logmsg = basename
stderr = logmsg + '\n' + stderr
return (p.returncode, stdout, stderr)
def BuildSerial(base_cmd, outpath, files):
final_result = 0
for filename in files:
cmd, basename = MakeCommand(base_cmd, outpath, filename)
rtn, stdout, stderr = RunGCC(cmd, basename)
sys.stdout.write(stdout)
sys.stdout.flush()
sys.stderr.write(stderr)
sys.stderr.flush()
if rtn:
final_result = rtn
if stop_on_error:
break
return final_result
def Worker(queue, out_queue):
"""Entry point got worker threads.
Each thread will compiler jobs from the queue until
there are no jobs left or until the main thread signals
for the work to stop.
"""
while not queue.empty() and Worker.running:
item = queue.get(False)
if not item:
break
results = RunGCC(item[0], item[1])
out_queue.put(results)
def MakeCommand(base_cmd, outpath, filename):
"""Build the full commandline given that output root
and the intput filename.
If VS passes an existing directory to -o, then we derive the
actual object name by combining he directory name with the
basename of the source file and andding ".obj"
"""
basename = os.path.basename(filename)
out = os.path.join(outpath, os.path.splitext(basename)[0] + '.obj')
return (base_cmd + ['-c', filename, '-o', out], basename)
def BuildParallel(cores, base_cmd, outpath, files):
Worker.running = True
pool = []
job_queue = Queue.Queue()
out_queue = Queue.Queue()
for filename in files:
cmd, basename = MakeCommand(base_cmd, outpath, filename)
job_queue.put((cmd, basename))
# Create worker thread pool, passing job queue
# and output queue to each worker.
args = (job_queue, out_queue)
for i in xrange(cores):
t = threading.Thread(target=Worker, args=args)
t.start()
results = 0
Trace("waiting for %d results" % len(files))
final_result = 0
while results < len(files):
results += 1
rtn, stdout, stderr = out_queue.get()
# stdout seem to be completely ignored by visual studio
# but GCC should output all useful information on stderr
# anyway.
sys.stdout.write(stdout)
sys.stdout.flush()
sys.stderr.write(stderr)
sys.stderr.flush()
if rtn:
final_result = rtn
if stop_on_error:
# stop all workers
Worker.running = False
break
return final_result
def Log(msg):
"""Log message to stderr."""
# Since Visual Studio basically seems to completely ignore the stdout
# of the compiler and only echo stderr we print everythign to stderr.
sys.stderr.write(str(msg) + '\n')
sys.stderr.flush()
def Trace(msg):
if verbose:
Log("nacl_compiler:" + str(msg))
def main(args):
if args[0][0] == '@':
rspfile = args[0][1:]
args = shlex.split(open(rspfile).read())
# find the last occurrence of '--' in the argument
# list and use that to signify the start of the
# list of sources
index = list(reversed(args)).index('--')
index = len(args) - index
base_cmd = args[:index-1]
files = args[index:]
# remove -o <path> from base_cmd
index = base_cmd.index('-o')
outpath = base_cmd[index+1]
del base_cmd[index+1]
del base_cmd[index]
cores = int(os.environ.get('NACL_GCC_CORES', '0'))
if not cores:
cores = multiprocessing.cpu_count()
cores = min(cores, len(files))
Trace("compiling %d sources using %d threads" % (len(files), cores))
rtn = BuildParallel(cores, base_cmd, outpath, files)
Trace("returning %d" % rtn)
return rtn
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| 2.328125 | 2 |
pymlconf/yamlhelper.py | pylover/pymlconf | 36 | 12762608 | <gh_stars>10-100
import sys
import yaml
from yaml import YAMLError
try:
from yaml import CLoader as Loader
except ImportError: # pragma: no cover
from yaml import Loader
def loads(string):
try:
return yaml.load(string, Loader)
except YAMLError as ex:
print('YAML parsing error', file=sys.stderr)
print('Input string start', file=sys.stderr)
print(string, file=sys.stderr)
print('Input string end', file=sys.stderr)
raise ex
def load(filename):
with open(filename) as f:
return loads(f.read())
def dumps(o):
return yaml.dump(o, default_flow_style=False)
| 2.765625 | 3 |
ipython.symlink/profile_default/ipython_config.py | peteowlett/dotfiles | 0 | 12762609 | <reponame>peteowlett/dotfiles
c = get_config()
c.TerminalInteractiveShell.confirm_exit = False
c.TerminalInteractiveShell.editing_mode = 'vi'
| 1.203125 | 1 |
solarforecastarbiter/plotting/tests/test_plotting_utils.py | awig/solarforecastarbiter-core | 0 | 12762610 | <filename>solarforecastarbiter/plotting/tests/test_plotting_utils.py<gh_stars>0
import pandas as pd
from pandas.testing import assert_index_equal
import pytest
from solarforecastarbiter.plotting import utils
@pytest.mark.parametrize('var,exp', [
('ghi', 'GHI (W/m^2)'),
('dc_power', 'DC Power (MW)')
])
def test_format_variable_name(var, exp):
out = utils.format_variable_name(var)
assert out == exp
@pytest.mark.parametrize('dobj,removal', [
(pd.Series, slice(5, 10)),
(pd.DataFrame, slice(12, 15))
])
def test_align_index(dobj, removal):
index = pd.date_range(start='now', freq='5min',
periods=20, name='timestamp')
data = dobj(index=index, dtype=float)
data = data.drop(index[removal])
out = utils.align_index(data, pd.Timedelta('5min'))
assert_index_equal(out.index, index)
def test_align_index_new_length():
index = pd.date_range(start='now', freq='5min',
periods=20, name='timestamp')
data = pd.Series(index=index, dtype=float)
out = utils.align_index(data, pd.Timedelta('1min'))
nindex = pd.date_range(start=index[0], end=index[-1], freq='1min',
name='timestamp')
assert_index_equal(out.index, nindex)
def test_align_index_limit():
index = pd.date_range(start='now', freq='5min',
periods=20, name='timestamp')
data = pd.Series(index=index, dtype=float)
out = utils.align_index(data, pd.Timedelta('5min'),
limit=pd.Timedelta('60min'))
nindex = pd.date_range(start=index[-13], end=index[-1], freq='5min',
name='timestamp')
assert_index_equal(out.index, nindex)
@pytest.mark.parametrize('label,method', [
('instant', 'line'),
('beginning', 'step'),
('ending', 'step'),
('event', 'step'),
pytest.param('other', '', marks=pytest.mark.xfail(raises=ValueError))
])
def test_line_or_step(label, method):
out = utils.line_or_step(label)
assert out[0] == method
assert isinstance(out[1], dict)
assert isinstance(out[2], dict)
@pytest.mark.parametrize('label', [
'instant',
'beginning',
'ending',
'event',
pytest.param('other', marks=pytest.mark.xfail(raises=ValueError))
])
def test_line_or_step_plotly(label):
out = utils.line_or_step_plotly(label)
assert isinstance(out, dict)
| 2.25 | 2 |
utility/url/url_response.py | eugen0/Comp_result_open_data | 0 | 12762611 | <filename>utility/url/url_response.py
from urllib.request import urlopen
import json
def valid_url(url):
"""
check if url return success
:param url: eg.'https://data.gov.ro/api/3/action/package_list'
:return: returns bool from success status of url
"""
response = urlopen(url).read()
json_format = json.loads(response)
return json_format['success']
# get available packages
def get_avlb_package(url):
"""
Checks if url returns a payload
:param url: eg. 'https://data.gov.ro/api/3/action/package_list'
:return: expected a list of available payload if 0
"""
response = urlopen(url).read()
json_data = json.loads(response)
return json_data['result']
| 3.34375 | 3 |
server/player.py | S4more/web-dev-project | 0 | 12762612 | class Player:
def __init__(self, socket, name, color, board):
self.name = name
self.board = board
self.socket = socket
self.lastMove = []
self.isTurn = False
#self.color = color
self.color = 'w'
async def init(self):
await self.board.addPlayer(self)
def __str__(self):
return self.name
def __repr__(self):
return str(self.name)
| 3.015625 | 3 |
live2d/warp_to_cistem.py | bbarad/Live2D | 9 | 12762613 | <reponame>bbarad/Live2D
#! /usr/bin/env python
# Copyright 2019 Genentech Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Process warp data to prepare for Frealign. Requires relion_preprocess
# and `star_to_par.com`
from functools import partial
import multiprocessing
import os
import time
import processing_functions_blocking_shell as processing_functions
# Process arguments and memorize the starting directory to move back to at the
# end of the script - NB if the script crashes the user gets dumped into the
# wrong place - I will need to fix that...
# Starting Info
warp_directory = "/local/scratch/krios/Warp_Transfers/TestData"
# star_file = "allparticles_BoxNet2Mask_20180918.star"
star_file = "allparticles_GenentechNet2Mask_20190627.star"
# star_file = "all_particles_short.star"
starting_directory = os.getcwd()
working_directory = "/local/scratch/krios/Warp_Transfers/TestData/classification"
stack_label = "combined_stack"
pixel_size = 1.2007
high_res_limit_initial = 40.0
high_res_limit_final = 8.0
low_res_limit = 300
process_count = 32
resolution_cycle_count = 10
classify_by_resolution = True
long_cycle_count = 5
run_long_cycles = True
class_number = 50
angular_search_step = 15.0
max_search_range = 49.5
particles_per_class_target = 300
get_new_particles_from_warp = True
times = []
times.append(time.time())
os.chdir(working_directory)
previous_classes_bool, recent_class, start_cycle_number = processing_functions.find_previous_classes()
if get_new_particles_from_warp:
processing_functions.import_new_particles(stack_label=stack_label, warp_folder=warp_directory, warp_star_filename=star_file, working_directory=working_directory)
new_star_file = processing_functions.generate_star_file(stack_label=stack_label, previous_classes_bool=previous_classes_bool, recent_class=recent_class)
times.append(time.time())
print("Generating stack files took {0:.1f} seconds".format(times[-1]-times[-2]))
else:
new_star_file = recent_class
particle_count, particles_per_process, class_fraction = processing_functions.calculate_particle_statistics(filename=new_star_file, class_number=class_number, particles_per_class=particles_per_class_target, process_count=process_count)
if not previous_classes_bool:
processing_functions.generate_new_classes(class_number=class_number, input_stack="{}.mrcs".format(stack_label), pixel_size=pixel_size, low_res=low_res_limit, high_res=high_res_limit_initial)
times.append(time.time())
print("Generating new classes took {0:.1f} seconds".format(times[-1]-times[-2]))
new_star_file = "classes_0.star"
print(new_star_file)
if classify_by_resolution:
print("=====================================")
print("Beginning Iterative 2D Classification")
print("=====================================")
print("Of the total {} particles, {:.0f}% will be classified into {} classes".format(particle_count, class_fraction*100, class_number))
print("Classification will begin at {}Å and step up to {}Å resolution over {} iterative cycles of classification".format(high_res_limit_initial, high_res_limit_final, resolution_cycle_count))
print("{0} particles per process will be classified by {1} processes.".format(particles_per_process, process_count))
for cycle_number in range(resolution_cycle_count):
high_res_limit = high_res_limit_initial-((high_res_limit_initial-high_res_limit_final)/(resolution_cycle_count-1))*cycle_number
filename_number = cycle_number + start_cycle_number
# high_res_limit = high_res_limit_final
print("High Res Limit: {0:.2}".format(high_res_limit))
print("Fraction of Particles: {0:.2}".format(class_fraction))
pool = multiprocessing.Pool(processes=process_count)
refine_job = partial(processing_functions.refine_2d_subjob, round=filename_number, input_star_filename=new_star_file, input_stack="{}.mrcs".format(stack_label), particles_per_process=particles_per_process, low_res_limit=low_res_limit, high_res_limit=high_res_limit, class_fraction=class_fraction, particle_count=particle_count, pixel_size=pixel_size, angular_search_step=angular_search_step, max_search_range=max_search_range, process_count=process_count)
results_list = pool.map(refine_job, range(process_count))
pool.close()
print(results_list[0].decode('utf-8'))
processing_functions.merge_2d_subjob(filename_number, process_count=process_count)
processing_functions.make_photos("classes_{}".format(filename_number+1), working_directory)
new_star_file = processing_functions.merge_star_files(filename_number, process_count=process_count)
start_cycle_number = start_cycle_number + resolution_cycle_count
if run_long_cycles:
print("====================================================")
print("Long 2D classifications to incorporate all particles")
print("====================================================")
print("All {} particles will be classified into {} classes at resolution {}Å".format(particle_count, class_number, high_res_limit_final))
print("{0} particles per process will be classified by {1} processes.".format(particles_per_process, process_count))
# 5 cycles of finalizing refinement to clean it up.
for cycle_number in range(long_cycle_count):
high_res_limit = high_res_limit_final
print("High Res Limit: {0:.2}".format(high_res_limit))
print("Fraction of Particles: {0:.2}".format(1.0))
filename_number = cycle_number + start_cycle_number
pool = multiprocessing.Pool(processes=process_count)
refine_job = partial(processing_functions.refine_2d_subjob, round=filename_number, input_star_filename=new_star_file, input_stack="{}.mrcs".format(stack_label), particles_per_process=particles_per_process, low_res_limit=low_res_limit, high_res_limit=high_res_limit, class_fraction=1.0, particle_count=particle_count, pixel_size=pixel_size, angular_search_step=angular_search_step, max_search_range=max_search_range, process_count=process_count)
results_list = pool.map(refine_job, range(process_count))
print(results_list[0].decode('utf-8'))
processing_functions.merge_2d_subjob(filename_number, process_count=process_count)
processing_functions.make_photos("class_{}".format(filename_number+1), working_directory)
new_star_file = processing_functions.merge_star_files(filename_number, process_count=process_count)
#
times.append(time.time())
os.chdir(starting_directory)
print("Total Runtime: {} seconds".format(times[-1]-times[0]))
| 2.03125 | 2 |
Core/dicom2nii/__init__.py | YongLiuLab/BrainRadiomicsTools | 10 | 12762614 | <gh_stars>1-10
from .Dicom2nii import convertDicoms,convertDicom | 1.070313 | 1 |
app/user.py | yashpatel5400/Synergy | 2 | 12762615 | """
__authors__ = <NAME>
__description__ = Class definition of the User object as defined as those
to be stored in the User DB (users table)
__name__ = user.py
"""
from flask import g
from flask_login import UserMixin
class User(UserMixin):
def __init__(self, userid, name, email, active=True):
self.userid = userid
self.name = name
self.email = email
self.active = active
def is_authenticated(self):
#return true if user is authenticated, provided credentials
return True
def is_active(self):
#return true if user is activte and authenticated
return self.active
def is_annonymous(self):
#return true if annon, actual user return false
return False
def get_id(self):
#return unicode id for user, and used to load user from user_loader callback
return self.userid
def __str__(self):
return """
self.userid = {}
self.name = {}
self.email = {}
""".format(self.userid, self.name, self.email) | 3.796875 | 4 |
self_link.py | danya02/slon-2021-formal-languages | 0 | 12762616 | <reponame>danya02/slon-2021-formal-languages<filename>self_link.py
import pygame
import math
from data import Data
import common_utils
import config
class SelfLink:
def __init__(self, node=None, mouse=None):
self.node = node
self.anchor_angle = 0
self.mouse_offset_angle = 0
self.text = ''
if mouse:
self.set_anchor_point(mouse.x, mouse.y)
@property
def nodeA(self):
return self.node # for compatibility with normal links
@property
def nodeB(self):
return self.node # for compatibility with normal links
def set_mouse_start(self, x, y):
self.mouse_offset_angle = self.anchor_angle - math.atan2(y - self.node.y, x - self.node.x) + self.mouse_offset_angle
def set_anchor_point(self, x, y):
self.anchor_angle = math.atan2(y - self.node.y, x - self.node.x)
# snap to 90 degres
snap = round(self.anchor_angle / (math.pi / 2)) * (math.pi / 2)
if abs(self.anchor_angle - snap) < 0.1: self.anchor_angle = snap
# keep in the range -pi to pi so our contains_point() function always works
if self.anchor_angle < -math.pi: self.anchor_angle += 2*math.pi
if self.anchor_angle > math.pi: self.anchor_angle -= 2*math.pi
def get_end_points_and_circle(self):
circleX = self.node.x + 1.5 * config.node_radius * math.cos(self.anchor_angle)
circleY = self.node.y + 1.5 * config.node_radius * math.sin(self.anchor_angle)
circleRadius = 0.75 * config.node_radius
startAngle = self.anchor_angle - math.pi * 0.8
endAngle = self.anchor_angle + math.pi * 0.8
startX = circleX + circleRadius * math.cos(startAngle)
startY = circleY + circleRadius * math.sin(startAngle)
endX = circleX + circleRadius * math.cos(endAngle)
endY = circleY + circleRadius * math.sin(endAngle)
return Data(
has_circle = True,
startX = startX,
startY = startY,
endX = endX,
endY = endY,
start_angle = startAngle,
end_angle = endAngle,
circleX = circleX,
circleY = circleY,
circle_radius = circleRadius
)
def draw(self, surface, selected_object, caret_visible=None, **kwargs):
c = common_utils.get_color(self, selected_object, **kwargs)
stuff = self.get_end_points_and_circle()
# draw arc
r = pygame.Rect(0, 0, stuff.circle_radius*2, stuff.circle_radius*2)
r.centerx = stuff.circleX
r.centery = stuff.circleY
sa, ea = stuff.start_angle, stuff.end_angle
sa, ea = ea, sa
pygame.draw.arc(surface, c, r, -sa, -ea, 3)
# draw the text on the loop farthest from the node
textX = stuff.circleX + stuff.circle_radius * math.cos(self.anchor_angle);
textY = stuff.circleY + stuff.circle_radius * math.sin(self.anchor_angle);
common_utils.draw_text(surface, self.text, textX, textY, self.anchor_angle, c, caret_visible)
# draw the head of the arrow
common_utils.draw_arrow(surface, stuff.endX, stuff.endY, stuff.end_angle + math.pi * 0.4, c)
def contains_point(self, x, y):
stuff = self.get_end_points_and_circle()
dx = x - stuff.circleX
dy = y - stuff.circleY
distance = math.sqrt(dx*dx + dy*dy) - stuff.circle_radius
return abs(distance) < config.hit_target_padding
def save(self):
d = {}
d['node'] = self.node.id
d['anchor_angle'] = self.anchor_angle
if self.text:
d['text'] = self.text
return d
@classmethod
def load(cls, d, nodes):
mynode = None
for node in nodes:
if node.id == d['node']:
mynode = node
break
self = cls()
self.node = mynode
self.text = d.get('text', '')
self.anchor_angle = d.get('anchor_angle', 0)
return self
| 3.328125 | 3 |
regtests/bench/add.py | gython/Gython | 65 | 12762617 | <reponame>gython/Gython<filename>regtests/bench/add.py
'''
loop and add (integer)
'''
from time import clock
def main():
if PYTHON=='PYTHONJS': ## about 25% faster with normal and javascript backends
pythonjs.configure( direct_operator='+' )
pass
start = clock()
a = -1000000
for i in range(1000000):
for j in range(100):
a = 1 + 2
print(clock()-start)
# in Go a variable must be used for something, or the compiler will throw an error,
# here just print 'a' to pass the benchmark.
print('#', a)
| 2.65625 | 3 |
security/cores.py | jsilhan/django-security | 0 | 12762618 | import json
from django.apps import apps
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.core.serializers.json import DjangoJSONEncoder
from django.db.models import TextField
from django.db.models.functions import Cast
from django.template.defaultfilters import truncatechars
from django.utils.html import format_html, format_html_join, mark_safe, format_html_join
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from pyston.filters.default_filters import SimpleMethodEqualFilter
from pyston.utils.decorators import filter_by, order_by, filter_class
from is_core.generic_views.inlines.inline_table_views import InlineTableView
from is_core.generic_views.mixins import TabItem, TabsViewMixin
from is_core.generic_views.table_views import TableView
from is_core.main import UIRESTModelISCore
from is_core.utils import render_model_objects_with_link, render_model_object_with_link
from is_core.utils.decorators import short_description
from security.config import settings
from security.models import CommandLog, InputLoggedRequest, OutputLoggedRequest, CeleryTaskLog, CeleryTaskRunLog
from ansi2html import Ansi2HTMLConverter
def display_json(value):
return json.dumps(value, indent=4, ensure_ascii=False, cls=DjangoJSONEncoder)
def display_as_code(value):
return format_html('<code style="white-space:pre-wrap;">{}</code>', value) if value else value
def display_related_objects(request, related_objects):
related_object_instances = []
for related_object in related_objects:
try:
related_object_instances.append(related_object.object)
except (ObjectDoesNotExist, AttributeError):
pass
return render_model_objects_with_link(request, related_object_instances)
def get_content_type_pks_of_parent_related_classes():
return {
ContentType.objects.get_for_model(model_class).pk
for model_class in (CommandLog, InputLoggedRequest, OutputLoggedRequest, CeleryTaskLog, CeleryTaskRunLog)
}
class UsernameUserFilter(SimpleMethodEqualFilter):
def get_filter_term(self, value, operator_slug, request):
user_model = get_user_model()
return {
'user_id__in': list(
user_model.objects.filter(
**{'{}__contains'.format(user_model.USERNAME_FIELD): value}
).annotate(
str_id=Cast('id', output_field=TextField())
).values_list('str_id', flat=True)
)
}
class SecurityISCoreMixin:
@short_description(_('related objects'))
def display_related_objects(self, obj, request):
return display_related_objects(
request, obj.related_objects.exclude(object_ct_id__in=get_content_type_pks_of_parent_related_classes())
)
@short_description(_('source'))
def display_source(self, obj, request):
return display_related_objects(
request, obj.related_objects.filter(object_ct_id__in=get_content_type_pks_of_parent_related_classes())
)
@short_description(_('raised output logged requests'))
def display_output_logged_requests(self, obj, request):
return render_model_objects_with_link(
request,
OutputLoggedRequest.objects.filter(
related_objects__object_id=obj.pk,
related_objects__object_ct_id=ContentType.objects.get_for_model(obj).pk
)
)
@short_description(_('raised command logs'))
def display_command_logs(self, obj, request):
return render_model_objects_with_link(
request,
CommandLog.objects.filter(
related_objects__object_id=obj.pk,
related_objects__object_ct_id=ContentType.objects.get_for_model(obj).pk
)
)
@short_description(_('raised celery task logs'))
def display_celery_task_logs(self, obj, request):
return render_model_objects_with_link(
request,
CeleryTaskLog.objects.filter(
related_objects__object_id=obj.pk,
related_objects__object_ct_id=ContentType.objects.get_for_model(obj).pk
)
)
class RequestsLogISCore(SecurityISCoreMixin, UIRESTModelISCore):
abstract = True
can_create = can_update = can_delete = False
@short_description(_('queries'))
def queries_code(self, obj):
return display_as_code(display_json(obj.queries)) if obj else None
@short_description(_('request body'))
def request_body_code(self, obj):
return display_as_code(obj.request_body) if obj else None
@short_description(_('request headers'))
def request_headers_code(self, obj):
return display_as_code(display_json(obj.request_headers)) if obj else None
@short_description(_('response body'))
def response_body_code(self, obj):
return display_as_code(obj.response_body) if obj else None
@short_description(_('response headers'))
def response_headers_code(self, obj):
return display_as_code(display_json(obj.response_headers)) if obj else None
@short_description(_('error description'))
def error_description_code(self, obj):
return display_as_code(obj.error_description) if obj else None
class InputRequestsLogISCore(RequestsLogISCore):
model = InputLoggedRequest
abstract = True
ui_list_fields = (
'id', 'created_at', 'changed_at', 'request_timestamp', 'response_timestamp', 'response_time', 'status',
'response_code', 'host', 'short_path', 'slug', 'ip', 'user', 'method', 'type', 'short_response_body',
'short_request_body', 'short_queries', 'short_request_headers'
)
form_fieldsets = (
(_('Request'), {'fields': ('created_at', 'changed_at', 'request_timestamp', 'host', 'method', 'path',
'queries_code', 'request_headers_code', 'request_body_code', 'is_secure')}),
(_('Response'), {'fields': ('response_timestamp', 'response_code', 'status', 'response_headers_code',
'response_body_code', 'type', 'error_description_code')}),
(_('User information'), {'fields': ('user', 'ip')}),
(_('Extra information'), {'fields': ('slug', 'response_time', 'display_related_objects',
'display_output_logged_requests', 'display_command_logs',
'display_celery_task_logs')}),
)
def get_form_fieldsets(self, request, obj=None):
form_fieldsets = list(super().get_form_fieldsets(request, obj))
app_names = {app.name for app in apps.get_app_configs()}
if (settings.SHOW_DEBUG_TOOLBAR and 'security.contrib.debug_toolbar_log' in app_names
and obj and hasattr(obj, 'input_logged_request_toolbar')):
form_fieldsets.append((None, {'fields': ('debug_toolbar',)}))
return form_fieldsets
@short_description(_('user'))
@filter_class(UsernameUserFilter)
def user(self, obj):
return obj.user
@short_description('')
def debug_toolbar(self, obj):
return mark_safe(obj.input_logged_request_toolbar.toolbar)
class OutputRequestsLogISCore(RequestsLogISCore):
model = OutputLoggedRequest
abstract = True
ui_list_fields = (
'id', 'created_at', 'changed_at', 'request_timestamp', 'response_timestamp', 'response_time', 'status',
'response_code', 'host', 'short_path', 'method', 'slug', 'short_response_body', 'short_request_body',
'short_queries', 'short_request_headers'
)
form_fieldsets = (
(_('Request'), {'fields': ('created_at', 'changed_at', 'request_timestamp', 'host', 'method', 'path',
'queries_code', 'request_headers_code', 'request_body_code', 'is_secure')}),
(_('Response'), {'fields': ('response_timestamp', 'response_code', 'status', 'response_headers_code',
'response_body_code', 'error_description_code')}),
(_('Extra information'), {'fields': ('slug', 'response_time', 'display_related_objects', 'display_source')}),
)
class CommandLogISCore(SecurityISCoreMixin, UIRESTModelISCore):
model = CommandLog
can_create = can_update = can_delete = False
ui_list_fields = (
'id', 'created_at', 'changed_at', 'name', 'start', 'stop', 'time', 'executed_from_command_line', 'is_successful'
)
form_fieldsets = (
(None, {
'fields': ('created_at', 'changed_at', 'name', 'input', 'output_html', 'error_message',
'display_related_objects', 'display_source', 'display_output_logged_requests',
'display_command_logs', 'display_celery_task_logs'),
'class': 'col-sm-6'
}),
(None, {
'fields': ('start', 'stop', 'time', 'executed_from_command_line', 'is_successful'),
'class': 'col-sm-6'
}),
)
abstract = True
@short_description(_('output'))
def output_html(self, obj=None):
if obj and obj.output is not None:
conv = Ansi2HTMLConverter()
output = mark_safe(conv.convert(obj.output, full=False))
return display_as_code(output)
return None
class CeleryTaskLogTabs(TabsViewMixin):
tabs = (
TabItem('list-celerytasklog', _('celery task')),
TabItem('list-celerytaskrunlog', _('celery task run')),
)
class CeleryTaskLogTableView(CeleryTaskLogTabs, TableView):
pass
class CeleryTaskRunLogISCore(SecurityISCoreMixin, UIRESTModelISCore):
model = CeleryTaskRunLog
abstract = True
can_create = can_update = can_delete = False
rest_extra_filter_fields = (
'celery_task_id',
)
ui_list_fields = (
'celery_task_id', 'created_at', 'changed_at', 'name', 'state', 'start', 'stop', 'time', 'result', 'retries',
'get_task_log'
)
form_fields = (
'celery_task_id', 'task_log', 'start', 'stop', 'time', 'state', 'result', 'error_message', 'output_html',
'retries', 'estimated_time_of_next_retry', 'display_related_objects', 'display_output_logged_requests',
'display_command_logs', 'display_celery_task_logs'
)
ui_list_view = CeleryTaskLogTableView
default_ordering = ('-created_at',)
@short_description(_('celery task log'))
def task_log(self, obj):
return obj.get_task_log()
@short_description(_('output'))
def output_html(self, obj):
if obj and obj.output is not None:
conv = Ansi2HTMLConverter()
output = mark_safe(conv.convert(obj.output, full=False))
return display_as_code(output)
return None
class CeleryTaskRunLogInlineTableView(InlineTableView):
model = CeleryTaskRunLog
fields = (
'created_at', 'changed_at', 'start', 'stop', 'time', 'state', 'result', 'retries'
)
def _get_list_filter(self):
return {
'filter': {
'celery_task_id': self.parent_instance.celery_task_id
}
}
class CeleryTaskLogISCore(SecurityISCoreMixin, UIRESTModelISCore):
model = CeleryTaskLog
abstract = True
can_create = can_update = can_delete = False
ui_list_fields = (
'celery_task_id', 'created_at', 'changed_at', 'name', 'short_input', 'state', 'get_start', 'get_stop',
'queue_name'
)
form_fieldsets = (
(None, {
'fields': (
'celery_task_id', 'created_at', 'changed_at', 'name', 'state', 'get_start', 'get_stop',
'estimated_time_of_first_arrival', 'expires', 'stale', 'queue_name', 'input', 'display_related_objects',
'display_source'
)
}),
(_('celery task runs'), {'inline_view': CeleryTaskRunLogInlineTableView}),
)
ui_list_view = CeleryTaskLogTableView
@filter_by('input')
@order_by('input')
@short_description(_('input'))
def short_input(self, obj):
return truncatechars(obj.input, 50)
def is_active_menu_item(self, request, active_group):
return active_group in {
self.menu_group,
'celerytaskrunlog',
}
| 1.726563 | 2 |
solutions/python3/1232.py | sm2774us/amazon_interview_prep_2021 | 42 | 12762619 | <gh_stars>10-100
class Solution:
def checkStraightLine(self, c: List[List[int]]) -> bool:
return len(set(a[0] == b[0] or (b[1] - a[1]) / (b[0] - a[0]) for a, b in zip(c, c[1:]))) == 1 | 2.78125 | 3 |
gears/compressors/__init__.py | gears/gears | 9 | 12762620 | from .base import BaseCompressor, ExecCompressor
from .cssmin import CSSMinCompressor
from .slimit import SlimItCompressor
| 1.023438 | 1 |
tests/test_map.py | wiremas/map | 6 | 12762621 | <reponame>wiremas/map
import os
import sys
import unittest
class TestAsyncMap(unittest.TestCase):
def test_single_arg_funcs(self):
""" test if we can pass single arguments of differnt types """
single_int_arg = [1]
r = async.map(simple_return_func, single_int_arg, chunk_size=1, modules=['time as t'])
self.assertEqual(single_int_arg, r)
single_int_arg = [1, 2, 3, 4, 5]
r = async.map(simple_return_func, single_int_arg, chunk_size=1, modules=['time as t'])
self.assertEqual(single_int_arg, r)
single_str_arg = ['a', 'b', 'c', 'd']
r = async.map(simple_return_func, single_str_arg, chunk_size=1, modules=['time as t'])
self.assertEqual(single_str_arg, r)
single_list_arg = [[[1, 2]], [[3, 4]]]
r = async.map(simple_return_func, single_list_arg, chunk_size=1, modules=['time as t'])
self.assertEqual([[1, 2], [3, 4]], r)
def test_double_arg_func(self):
""" test if we can pass multiple arguments """
double_arg = [[1,2], [3,4], [5,6]]
r = async.map(double_arg_func, double_arg, callback=on_exit, chunk_size=1, modules=['time as t', 'os', 'numpy'])
def test_custom_return_obj_arg_func(self):
""" test if we can pass a custer iterable object and return another
custom object """
obj = CustomIter(10)
r = async.map(simple_return_func, obj, modules=['time as t'], runtime_globals=[ReturnObject])
ref = ReturnObject(3)
# for some reason isinstance evaluate to false - don't ask
# maybe because double import / messing with sys.path?
[self.assertTrue(str(type(obj)), str(type(ReturnObject))) for obj in r]
self.assertEqual(len(r), 10)
class CustomIter(object):
def __init__(self, maximum=10):
self.maximum = maximum
def __iter__(self):
t.sleep(0.01)
for i in range(self.maximum):
yield ReturnObject(i)
class ReturnObject(object):
def __init__(self, value):
self.value = value
def simple_return_func(x):
return x
def double_arg_func(x, y):
t.sleep(1)
return x * y
def single_list_arg_func(l=[]):
t.sleep(1)
return l
def double_list_arg_func(l1=[], l2=[]):
t.sleep(1)
return l1, l2
def on_exit(result):
print 'DONE', result
# single_arg = [1, 2, 3, 4, 5]
# r = async.map(single_arg_func, single_arg, callback=on_exit, chunk_size=1, modules=['time as t', 'os', 'numpy'])
#
# import numpy
# single_np_arg = numpy.array([1, 2, 3, 4, 5])
# r = async.map(single_arg_func, single_np_arg, callback=on_exit, chunk_size=1, modules=['time as t', 'os', 'numpy'])
#
#
# single_list_arg = [[[1,2,3,4]],[[5,6,7,8]]]
# r = async.map(single_list_arg_func, single_list_arg, callback=on_exit, chunk_size=1, modules=['time as t', 'os', 'numpy'])
#
# double_list_arg =[[[1,2],[2,3]],[[4,5],[6,7]]]
# r = async.map(double_list_arg_func, double_list_arg, callback=on_exit, chunk_size=1, modules=['time as t', 'os', 'numpy'])
#
# reload(async)
# r = async.apply(single_arg_func, [10], callback=on_exit, modules=['time as t'])
if __name__ == '__main__':
package_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(package_path)
import async
unittest.main()
| 3.25 | 3 |
tests/unit/h_matchers/matcher/collection/_mixin/size_test.py | hypothesis/h-matcher | 0 | 12762622 | <gh_stars>0
import pytest
from h_matchers.exception import NoMatch
from h_matchers.matcher.collection._mixin.size import SizeMixin
class HostClass(SizeMixin):
def __eq__(self, other):
try:
self._check_size(list(other))
except NoMatch:
return False
return True
class TestSizeMixin:
def test_it_matches_exact_size(self):
matcher = HostClass.of_size(3)
assert matcher == [1, 2, 3]
assert {1, 2, 3} == matcher
assert matcher != set()
assert matcher != [1, 2]
def test_it_matches_minimum_size(self):
matcher = HostClass.of_size(at_least=2)
assert matcher == [1, 2]
assert matcher == [1, 2, 3]
assert matcher != [1]
def test_it_complains_with_incorrect_size(self):
with pytest.raises(ValueError):
HostClass.of_size()
with pytest.raises(ValueError):
HostClass.of_size(at_least=100, at_most=1)
def test_it_matches_maximum_size(self):
matcher = HostClass.of_size(at_most=2)
assert matcher == []
assert matcher == [1, 2]
assert matcher != [1, 2, 3]
| 2.328125 | 2 |
WEEKS/CD_Sata-Structures/_RESOURCES/pytutor/PythonTutor-scripts/scoring.py | webdevhub42/Lambda | 0 | 12762623 | <reponame>webdevhub42/Lambda
# visit: https://imgur.com/a/oemBqyv
count = 0
total = 0
# Handle any exceptions using try/except
try:
def main():
# Initialize variables
count = 0
total = 0
# Opens the Section1.txt file.
infile = open("Section1.txt", "r")
# Reads the numbers in the file into a list
num = infile.readlines()
for num in infile:
number = float(num)
total = total + number
count = count + 1
average = total / count
# Close the file
infile.close
# Output: display the number of the scores, and the average of the scores
print("Number of scores in Section 1: ", count)
print("Average: ", format((average), ".2f"), "Letter Grade: ")
total2 = 0
count2 = 0
infile2 = open("Section2.txt.", "r")
for num in infile2:
number = float(num)
total2 = total2 + number
count2 = count2 + 1
average2 = total2 / count2
infile2.close
print("Number of scores in Section 2: ", count2)
print("Average: ", format((average2), ".2f"), "Letter Grade: ", score)
total_count = count1 + count2
total_average = (total1 + total2) / total_count
print("Numbers of score in both sections combined: ", total_count)
print("Average: ", format((total_average), ".2f"), "Letter grade: ", score)
scoring(grade)
def scoring(grade):
# Create outputs for numerical scores, make "else" anything below 0 or over 100
if 89.5 <= grade <= 100:
print("The letter grade is A")
elif 79.5 <= grade <= 89.4:
print("The letter grade is B")
elif 69.5 <= grade <= 79.4:
print("The letter grade is C")
elif 59.5 <= grade <= 69.4:
print("The letter grade is D")
elif 0 <= grade <= 59.4:
print("The letter grade is F")
else:
print("invalid score")
main()
except IOError:
print("An error occurred trying to open the file")
except ValueError:
print("Non-numeric data found in the file")
except Exception as err:
print(err)
| 3.84375 | 4 |
examples/radcure_simple.py | bhklab/med-imagetools | 9 | 12762624 | import os
from argparse import ArgumentParser
from imgtools.io import (ImageFileLoader, ImageFileWriter,
read_dicom_rtstruct, read_dicom_series, read_dicom_rtdose, read_dicom_pet)
from imgtools.ops import StructureSetToSegmentation, ImageFileInput, ImageFileOutput, Resample
from imgtools.pipeline import Pipeline
###############################################################
# Example usage:
# python radcure_simple.py ./data/RADCURE/data ./RADCURE_output
###############################################################
class RADCUREPipeline(Pipeline):
"""Example processing pipeline for the RADCURE dataset.
This pipeline loads the CT images and structure sets, re-samples the images,
and draws the GTV contour using the resampled image.
"""
def __init__(self,
input_directory,
output_directory,
spacing=(1., 1., 0.),
n_jobs=-1,
missing_strategy="drop",
show_progress=False,
warn_on_error=False):
super().__init__(
n_jobs=n_jobs,
missing_strategy=missing_strategy,
show_progress=show_progress,
warn_on_error=warn_on_error)
# pipeline configuration
self.input_directory = input_directory
self.output_directory = output_directory
self.spacing = spacing
# pipeline ops
# input ops
self.image_input = ImageFileInput(
self.input_directory, # where to look for the images
get_subject_id_from="subject_directory", # how to extract the subject ID, 'subject_directory' means use the name of the subject directory
subdir_path="*/ImageSet_*", # whether the images are stored in a subdirectory of the subject directory (also accepts glob patterns)
reader=read_dicom_series # the function used to read individual images
)
self.structure_set_input = ImageFileInput(
self.input_directory,
get_subject_id_from="subject_directory",
subdir_path="*/structures/RTSTRUCT.dcm",
reader=read_dicom_rtstruct
)
self.rtdose_input = ImageFileInput(
self.input_directory,
get_subject_id_from="subject_directory",
subdir_path="*/dose/DOSE.dcm",
reader=read_dicom_rtdose
)
self.petscan_input = ImageFileInput(
self.input_directory,
get_subject_id_from="subject_directory",
subdir_path="*/pet_*",
reader=read_dicom_pet
)
# image processing ops
self.resample = Resample(spacing=self.spacing)
# Note: the ROI name is temporarily changed to match the example data
# since RADCURE is still not public. The correct ROI name for RADCURE is 'GTV'.
self.make_binary_mask = StructureSetToSegmentation(roi_names="GTV-1")#"GTV")
# output ops
self.image_output = ImageFileOutput(
os.path.join(self.output_directory, "images"), # where to save the processed images
filename_format="{subject_id}_image.nrrd", # the filename template, {subject_id} will be replaced by each subject's ID at runtime
create_dirs=True, # whether to create directories that don't exists already
compress=True # enable compression for NRRD format
)
self.mask_output = ImageFileOutput(
os.path.join(self.output_directory, "masks"),
filename_format="{subject_id}_mask.nrrd",
create_dirs=True,
compress=True
)
self.dose_output = ImageFileOutput(
os.path.join(self.output_directory, "doses"),
filename_format="{subject_id}_dose.nrrd",
create_dirs=True,
compress=True
)
self.petscan_output = ImageFileOutput(
os.path.join(self.output_directory, "petscan"),
filename_format="{subject_id}_petscan.nrrd",
create_dirs=True,
compress=True
)
def process_one_subject(self, subject_id):
"""Define the processing operations for one subject.
This method must be defined for all pipelines. It is used to define
the preprocessing steps for a single subject (note: that might mean
multiple images, structures, etc.). During pipeline execution, this
method will receive one argument, subject_id, which can be used to
retrieve inputs and save outputs.
Parameters
----------
subject_id : str
The ID of currently processed subject
"""
image = self.image_input(subject_id)
structure_set = self.structure_set_input(subject_id)
dose_set = self.rtdose_input(subject_id)
pet_set = self.petscan_input(subject_id)
image = self.resample(image)
# note that the binary mask can be generated with correct spacing using
# the resampled image, eliminating the need to resample it separately
# mask = self.make_binary_mask(structure_set, image)
self.image_output(subject_id, image)
# self.mask_output(subject_id, mask)
self.dose_output(subject_id, dose_set)
self.petscan_output(subject_id, pet_set)
if __name__ == "__main__":
parser = ArgumentParser("Example RADCURE processing pipeline.")
parser.add_argument(
"input_directory",
type=str,
help="Path to the input directory of RADCURE dataset.")
parser.add_argument(
"output_directory",
type=str,
help="Path to the directory where the processed images will be saved.")
parser.add_argument(
"--spacing",
nargs=3,
type=float,
default=(1., 1., 0.),
help="The resampled voxel spacing in (x, y, z) directions.")
parser.add_argument(
"--n_jobs",
type=int,
default=1,
help="The number of parallel processes to use.")
parser.add_argument(
"--show_progress",
action="store_true",
help="Whether to print progress to standard output.")
args = parser.parse_args()
pipeline = RADCUREPipeline(
input_directory=args.input_directory,
output_directory=args.output_directory,
spacing=args.spacing,
n_jobs=args.n_jobs,
show_progress=args.show_progress)
pipeline.run()
| 2.328125 | 2 |
reflex/scripts/infer_one.py | ankur-gos/RE-Flex | 3 | 12762625 | <reponame>ankur-gos/RE-Flex
import click
from reflex.models.reflex import Reflex
@click.command()
@click.option('--context', type=str, help='Contextual evidence of relation')
@click.option('--entity', type=str, help='Entity name')
@click.option('--template', type=str, help='Relation template. Example: [X] plays for the [Y] to extract an entity [X] plays for')
@click.option('--model-dir', type=str, default='./roberta_large/', help='Model directory')
@click.option('--model-name', type=str, default='model.pt', help='Model name')
@click.option('--device', type=str, default='cpu', help='Device string to put model on.')
@click.option('--k', type=int, default=16, help='Approximation hyperparameter value')
@click.option('--expand/--no-expand', type=bool, default=False, help='Expand the anchor token')
@click.option('--spacy-model-name', type=str, default='en_core_web_lg', help='Name of spacy model to load')
def run(context, entity, template, model_dir, model_name, device, k, expand, spacy_model_name):
reflex = Reflex(model_dir, model_name, device, k, spacy_model_name)
prediction = reflex.predict_one(context, entity, template, expand=expand)[0]
print(prediction)
if __name__ == '__main__':
run()
| 2.328125 | 2 |
dnsTunnelIdentifier/DNSInfo.py | Inocustonner/dnsTunnelIdentifier | 0 | 12762626 | <filename>dnsTunnelIdentifier/DNSInfo.py
import enum
from dnsTunnelIdentifier.functional import compose
from dnsTunnelIdentifier.utils import getLogger
from scapy.utils import RawPcapReader
from scapy.layers.l2 import Ether
from scapy.layers.inet import IP, UDP
from scapy.layers.dns import DNS, DNSQR, DNSRR
from typing import Union, List, Tuple
import traceback
class DnsType(enum.Enum):
A = 1
NS = 2
CNAME = 5
SOA = 6
NULL = 10
PTR = 12
HINFO = 13
MX = 15
TXT = 16
AAAA = 28
SRV = 33
OPT = 41
class DNSInfo:
def __init__(self, raw: bytes, ts: float = 0):
packet = Ether(raw)
log = getLogger()
self.ts = ts
ip = packet[IP]
if not ip.haslayer(DNS):
self.notDns = True
return
self.notDns = False
try:
self.sip = ip.src
self.dip = ip.dst
self.dns = ip[DNS]
#assume we have only one query
self.qtype = self.dns.qd.get_field('qtype').i2repr(self.dns.qd, self.dns.qd.qtype)
self.name = self.dns.qd.qname
except KeyboardInterrupt:
raise
except Exception as e:
print(traceback.format_exc() + '\n' + repr(self.dns))
# if self.dns.haslayer(DNSRR):
# self.ans = self.dns
def isResponse(self) -> bool:
return self.dns.haslayer(DNSRR)
def getAns(self) -> Union[DNSRR, None]:
if self.isResponse():
return self.dns[DNSRR]
else:
return None
def getTTL(self) -> Union[int, None]:
if ans := self.getAns():
return ans.ttl
else:
return None
def getName(self) -> bytes:
return self.name
def getServerIP(self) -> str:
if self.isResponse():
return self.dip
else:
return self.sip
def get(self, attr: str, default=None) -> Union[str, None]:
switch = {
'name': lambda: self.name,
'type': lambda: self.qType,
'answer': self.getAns,
'ttl': self.getTTL,
}
return switch.get(attr, lambda: default)()
def __eq__(self, dns):
return dns.getServerIP() == self.getServerIP()
def __hash__(self):
return hash(''.join(sorted(self.sip + self.dip)))
def __repr__(self):
return f"{self.__class__.__name__}({', '.join([f'{k}={repr(v)}' for k,v in vars(self).items()])})"
"""
Takes a path to .pcap file and returns packet with `n` index in DnsInfo
"""
def from_pcap(file_path: str, n: int = 0) -> DNSInfo:
pt = convertPacket(RawPcapReader(file_path).read_all(n + 1)[-1])
return DNSInfo(pt[0], pt[1])
to_ts = lambda meta: float(meta.sec) + meta.usec / 10**6
convertPacket = lambda pkt: (pkt[0], to_ts(pkt[1]))
"""
Takes a file path to a pcap file and returns array of tuples of the following form
(raw packet, timestamp)
"""
def pcap_to_packets(file_path: str, cnt: int = -1) -> List[Tuple[bytes, float]]:
return list(map(convertPacket, RawPcapReader(file_path).read_all(cnt)))
"""
Takes a file path to a pcap file and returns generator of
array of tuples of the following form (timestamp, raw packet)
"""
def pcap_to_packets_lazy(file_path: str):
def lazy():
reader = RawPcapReader(file_path)
v = next(reader)
while v:
yield convertPacket(v)
v = next(reader) | 2.46875 | 2 |
ROM_Demos/Burgers_DEIM/Parameters.py | Romit-Maulik/Tutorials-Demos-Practice | 8 | 12762627 | <filename>ROM_Demos/Burgers_DEIM/Parameters.py
import numpy as np
# Parameters
Rnum = 1000.0
spatial_resolution = 128 # Grid points
temporal_resolution = 300 # Snapshots
final_time = 2.0 # Keep fixed
# Independent variables
x = np.linspace(0.0,1.0,num=spatial_resolution)
dx = 1.0/np.shape(x)[0]
tsteps = np.linspace(0.0,2.0,num=temporal_resolution)
dt = final_time/np.shape(tsteps)[0]
# Compression metrics - POD field
K = 12 # num_modes field
# Compression metrics - DEIM nonlinear
M = 24 # num_modes nonlinear
fac = 2 # Gappy DEIM parameter
gappy = False # True or False for gappy DEIM
# ML hyperparameters
num_epochs = 100
num_neurons = 50
seq_num = 10
if __name__ == "__main__":
print('Parameter file') | 1.929688 | 2 |
simtools/tests/__init__.py | gammasim/gammasim-tools | 5 | 12762628 | import logging
logging.basicConfig(format='%(levelname)s::%(module)s(l%(lineno)s)::%(funcName)s::%(message)s')
| 1.804688 | 2 |
model.py | tyranitar/bananas | 0 | 12762629 | from contextlib import contextmanager
import torch.nn.functional as F
import torch.nn as nn
import torch
import math
# Initializes a layer with normally-distributed weights.
def normal_weights(layer):
classname = layer.__class__.__name__
if classname.find('Linear') != -1:
n = layer.in_features
y = 1.0 / math.sqrt(n)
layer.weight.data.normal_(0, y)
# A Dueling DQN.
class QNetwork(nn.Module):
def __init__(self, state_size, action_size, seed=1337):
super(QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
state_val_net_layer_dims = [
state_size,
128,
32,
# 1
]
advantage_net_layer_dims = [
state_size,
128,
32,
# 4
]
# V(s)
self.state_val_net = nn.Sequential(
*self.gen_linear_layers(state_val_net_layer_dims),
nn.Linear(state_val_net_layer_dims[-1], 1)
)
# A(s, a)
self.advantage_net = nn.Sequential(
*self.gen_linear_layers(advantage_net_layer_dims),
nn.Linear(advantage_net_layer_dims[-1], action_size)
)
self.apply(normal_weights)
def gen_linear_layers(self, layer_dims):
return [
nn.Sequential(
nn.Linear(layer_dims[i], layer_dims[i + 1]),
nn.BatchNorm1d(layer_dims[i + 1]),
nn.ReLU(),
)
for i in range(len(layer_dims) - 1)
]
def forward(self, state):
state_vals = self.state_val_net(state)
advantages = self.advantage_net(state)
# Q(s, a) = V(s) + A(s, a) - mean(A(s, a'))
return state_vals + advantages - advantages.mean()
# Use this to interact with the environment
# since action ranks don't change with V(s).
def get_advantages(self, state):
return self.advantage_net(state)
@contextmanager
def eval_no_grad(self):
with torch.no_grad():
try:
self.eval()
yield
finally:
self.train()
| 2.515625 | 3 |
python-IO-learning/second_asyncio.py | landbroken/python-learning | 1 | 12762630 | <filename>python-IO-learning/second_asyncio.py
import threading
import asyncio
import time
import random
def ioTime(ran: float):
if ran >= 0.8:
return 10
elif ran>=0.5:
return 5
elif ran>=0.3:
return 1
else:
return 0.1
@asyncio.coroutine
def hello():
ran = random.random() # 0.2的概率IO堵塞很久
curIOTime = ioTime(ran)
startTime = time.time()
print('Hello world! (%s)' % threading.currentThread())
print("now: "+str(startTime)+" ioTime = "+str(curIOTime))
yield from asyncio.sleep(curIOTime)
endTime = time.time()
print('Hello again! (%s)' % threading.currentThread())
print(str(endTime))
print("Time span is: " + str(endTime) + " - " + str(startTime) + " = " + str(endTime - startTime))
loop = asyncio.get_event_loop()
tasks = [hello(), hello(), hello(), hello(), hello()]
loop.run_until_complete(asyncio.wait(tasks))
loop.close()
| 3.796875 | 4 |
bin/crests.py | symroe/government-organisation-data | 1 | 12762631 | <reponame>symroe/government-organisation-data
#!/usr/bin/env python3
# File to retrieve organisations list from the json api url and produce a file
# that we find useful.
import sys
import urllib.request as ul
import json
from entry.representations.tsv import Writer
from entry import Entry
# official branding colour map for ministerial some depts.
colour_map = {}
colour_map['attorney-generals-office'] = '#9f1888'
colour_map['cabinet-office'] = '#005abb'
colour_map['civil-service'] = '#af292e'
colour_map['department-for-business-innovation-skills'] = '#003479'
colour_map['department-for-communities-and-local-government'] = '#00857e'
colour_map['department-for-culture-media-sport'] = '#d40072'
colour_map['department-for-education'] = '#003a69'
colour_map['department-for-environment-food-rural-affairs'] = '#898700'
colour_map['department-for-international-development'] = '#002878'
colour_map['department-for-transport'] = '#006c56'
colour_map['department-for-work-pensions'] = '#00beb7'
colour_map['department-of-energy-climate-change'] = '#009ddb'
colour_map['department-of-health'] = '#00ad93'
colour_map['foreign-commonwealth-office'] = '#003e74'
colour_map['hm-government'] = '#0076c0'
colour_map['hm-revenue-customs'] = '#009390'
colour_map['hm-treasury'] = '#af292e'
colour_map['home-office'] = '#9325b2'
colour_map['ministry-of-defence'] = '#4d2942'
colour_map['ministry-of-justice'] = '#231f20'
colour_map['northern-ireland-office'] = '#002663'
colour_map['office-of-the-advocate-general-for-scotland'] = '#002663'
colour_map['office-of-the-leader-of-the-house-of-lords'] = '#9c132e'
colour_map['scotland-office'] = '#002663'
colour_map['the-office-of-the-leader-of-the-house-of-commons'] = '#317023'
colour_map['uk-export-finance'] = '#005747'
colour_map['uk-trade-investment'] = '#C80651'
colour_map['wales-office'] = '#a33038'
# logo lookup map for ministerial departments only
crest_map = {}
crest_map['prime-ministers-office-10-downing-street'] = 'org-crest'
crest_map['attorney-generals-office'] = 'org-crest'
crest_map['cabinet-office'] = 'org-crest'
crest_map['department-for-business-innovation-skills'] = 'bis_crest'
crest_map['department-for-communities-and-local-government'] = 'org-crest'
crest_map['department-for-culture-media-sport'] = 'org-crest'
crest_map['department-for-education'] = 'org-crest'
crest_map['department-for-environment-food-rural-affairs'] = 'org-crest'
crest_map['department-for-international-development'] = 'org-crest'
crest_map['department-for-transport'] = 'org-crest'
crest_map['department-for-work-pensions'] = 'org-crest'
crest_map['department-of-energy-climate-change'] = 'org-crest'
crest_map['department-of-health'] = 'org-crest'
crest_map['foreign-commonwealth-office'] = 'org-crest'
crest_map['hm-treasury'] = 'org-crest'
crest_map['home-office'] = 'home-office-crest'
crest_map['ministry-of-defence'] = 'mod_crest'
crest_map['ministry-of-justice'] = 'org-crest'
crest_map['northern-ireland-office'] = 'org-crest'
crest_map['office-of-the-advocate-general-for-scotland'] = \
'scotland-office-crest'
crest_map['the-office-of-the-leader-of-the-house-of-commons'] = \
'portcullis-crest'
crest_map['office-of-the-leader-of-the-house-of-lords'] = 'portcullis-crest'
crest_map['scotland-office'] = 'scotland-office-crest'
crest_map['uk-export-finance'] = 'org-crest'
crest_map['wales-office'] = 'wales_crest'
def array_to_string(arr, pre_process):
res = ""
first = True
for i in arr:
if first:
first = False
else:
res += ";"
res += pre_process(i)
return res
def json_from_url(url):
input = ul.urlopen(url)
charset = input.info().get_param('charset', 'utf8')
text = input.read()
input.close()
return json.loads(text.decode(charset))
def init_output():
fieldnames = [
'government-organisation',
'name',
'website',
'government-organisation-type',
'parent-bodies',
'text',
'crest',
'official-colour',
'abbreviation',
]
writer = Writer(sys.stdout, fieldnames=fieldnames)
return writer
def write_records_to(records, output):
written_ids = []
for result in records['results']:
entry = Entry()
detailsJson = result['details']
result_id = detailsJson['slug']
setattr(entry, 'government-organisation', result_id)
entry.name = result['title'].replace('\t', ' ')
entry.website = result['id'].replace('\t', ' ')
setattr(entry,
'government-organisation-type',
result['format'].replace('\t', ' '))
setattr(entry,
'parent-bodies',
array_to_string(
result['parent_organisations'],
lambda x: x['id']))
entry.text = ''
if result_id in crest_map:
entry.crest = crest_map[result_id]
else:
entry.crest = ''
if 'abbreviation' in detailsJson and detailsJson['abbreviation']:
entry.abbreviation = detailsJson['abbreviation']
entry.abbreviation = entry.abbreviation.replace('\t', '')
if result_id in colour_map:
setattr(entry, 'official-colour', colour_map[result_id])
else:
setattr(entry, 'official-colour', '')
if result_id not in written_ids:
output.write(entry)
written_ids.append(result_id)
# print "%s\n" % parser.getAnchorMap()
next_page = "https://www.gov.uk/api/organisations?page=1"
output = init_output()
while(next_page is not None):
print("Processing page url: %s" % (next_page), file=sys.stderr)
jsonRes = json_from_url(next_page)
write_records_to(jsonRes, output)
if 'next_page_url' in jsonRes:
next_page = jsonRes['next_page_url']
else:
next_page = None
output.close()
| 2.8125 | 3 |
src/ZPublisher/tests/test_utils.py | arnaud-fontaine/Zope | 0 | 12762632 | ##############################################################################
#
# Copyright (c) 2017 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import unittest
class SafeUnicodeTests(unittest.TestCase):
def _makeOne(self, value):
from ZPublisher.utils import safe_unicode
return safe_unicode(value)
def test_ascii(self):
self.assertEqual(self._makeOne('foo'), 'foo')
self.assertEqual(self._makeOne(b'foo'), 'foo')
def test_latin_1(self):
self.assertEqual(self._makeOne(b'fo\xf6'), 'fo\ufffd')
def test_unicode(self):
self.assertEqual(self._makeOne('foö'), 'foö')
def test_utf_8(self):
self.assertEqual(self._makeOne('test\xc2\xae'), 'test\xc2\xae')
self.assertEqual(self._makeOne(b'test\xc2\xae'), 'test\xae')
class NoUpdatePropertyManager:
"""PropertyManager without _updateProperty method.
This is a simplified version of the original PropertyManager,
with only the methods we need.
"""
_properties = ()
def _setPropValue(self, id, value):
if type(value) == list:
value = tuple(value)
setattr(self, id, value)
def _setProperty(self, id, value, type='string'):
self._properties = self._properties + ({'id': id, 'type': type},)
self._setPropValue(id, value)
def hasProperty(self, id):
for p in self._properties:
if id == p['id']:
return 1
return 0
def getProperty(self, id, d=None):
if self.hasProperty(id):
return getattr(self, id)
return d
def getPropertyType(self, id):
for md in self._properties:
if md['id'] == id:
return md.get('type', 'string')
return None
def _propertyMap(self):
return self._properties
def propertyMap(self):
return tuple(dict.copy() for dict in self._propertyMap())
class NoPropertiesManager(NoUpdatePropertyManager):
"""PropertyManager with _updateProperty method but without _properties."""
_properties = None
def _updateProperty(self, id, value):
self._setPropValue(id, value)
class FixPropertiesTests(unittest.TestCase):
def _makeOne(self):
from OFS.PropertyManager import PropertyManager
return PropertyManager()
def test_lines(self):
from ZPublisher.utils import fix_properties
obj = self._makeOne()
obj._setProperty("mixed", ["text and", b"bytes"], "lines")
self.assertEqual(obj.getProperty("mixed"), ("text and", b"bytes"))
self.assertEqual(obj.getPropertyType("mixed"), "lines")
fix_properties(obj)
self.assertEqual(obj.getProperty("mixed"), ("text and", "bytes"))
self.assertEqual(obj.getPropertyType("mixed"), "lines")
def test_ulines(self):
from ZPublisher.utils import fix_properties
obj = self._makeOne()
obj._setProperty("mixed", ["text and", b"bytes"], "ulines")
self.assertEqual(obj.getProperty("mixed"), ("text and", b"bytes"))
self.assertEqual(obj.getPropertyType("mixed"), "ulines")
fix_properties(obj)
self.assertEqual(obj.getProperty("mixed"), ("text and", "bytes"))
self.assertEqual(obj.getPropertyType("mixed"), "lines")
def test_utokens(self):
from ZPublisher.utils import fix_properties
obj = self._makeOne()
obj._setProperty("mixed", ["text", "and", b"bytes"], "utokens")
self.assertEqual(obj.getProperty("mixed"), ("text", "and", b"bytes"))
self.assertEqual(obj.getPropertyType("mixed"), "utokens")
fix_properties(obj)
self.assertEqual(obj.getProperty("mixed"), ("text", "and", "bytes"))
self.assertEqual(obj.getPropertyType("mixed"), "tokens")
def test_utext(self):
from ZPublisher.utils import fix_properties
obj = self._makeOne()
obj._setProperty("prop1", "multiple\nlines", "utext")
self.assertEqual(obj.getProperty("prop1"), "multiple\nlines")
self.assertEqual(obj.getPropertyType("prop1"), "utext")
fix_properties(obj)
self.assertEqual(obj.getProperty("prop1"), "multiple\nlines")
self.assertEqual(obj.getPropertyType("prop1"), "text")
def test_ustring(self):
from ZPublisher.utils import fix_properties
obj = self._makeOne()
obj._setProperty("prop1", "single line", "ustring")
self.assertEqual(obj.getProperty("prop1"), "single line")
self.assertEqual(obj.getPropertyType("prop1"), "ustring")
fix_properties(obj)
self.assertEqual(obj.getProperty("prop1"), "single line")
self.assertEqual(obj.getPropertyType("prop1"), "string")
def test_no_update(self):
# Test that an object without _updateProperty method does not trip up
# our code.
from ZPublisher.utils import fix_properties
obj = NoUpdatePropertyManager()
obj._setProperty("mixed", ["text and", b"bytes"], "lines")
self.assertEqual(obj.getProperty("mixed"), ("text and", b"bytes"))
self.assertEqual(obj.getPropertyType("mixed"), "lines")
# This should not raise an error.
fix_properties(obj)
# The properties should have remained the same.
self.assertEqual(obj.getProperty("mixed"), ("text and", b"bytes"))
self.assertEqual(obj.getPropertyType("mixed"), "lines")
def test_no_properties(self):
# Test that an object with a failing propertyMap method,
# due to _properties=None, does not trip up our code.
from ZPublisher.utils import fix_properties
obj = NoPropertiesManager()
# This should not raise an error.
fix_properties(obj)
| 2.046875 | 2 |
srsran_controller/uu_events/system_information_block_6.py | matan1008/srsran-controller | 0 | 12762633 | <gh_stars>0
from srsran_controller.common.pyshark import items_in_tree
SIB6_NAME = 'System Information Block 6'
def parse_fdd_carrier_freq_utras(sib6):
fdd_carrier_freq_utras = []
for carrier_freq_element in items_in_tree(sib6, 'carrierFreqListUTRA_FDD', 'CarrierFreqUTRA_FDD_element'):
fdd_carrier_freq_utras.append({
'carrier_freq': int(carrier_freq_element.carrierFreq),
'cell_reselection_priority': int(carrier_freq_element.cellReselectionPriority),
'thresh_x_high': int(carrier_freq_element.threshX_High) * 2,
'thresh_x_low': int(carrier_freq_element.threshX_Low) * 2,
'q_rx_lev_min': int(carrier_freq_element.utra_q_RxLevMin) * 2 + 1,
'p_max_utra': int(carrier_freq_element.p_MaxUTRA),
'q_qual_min': int(carrier_freq_element.q_QualMin),
})
return fdd_carrier_freq_utras
def create(pkt):
try:
c1 = pkt['mac-lte'].lte_rrc.BCCH_DL_SCH_Message_element.message_tree.c1_tree
sys_info_element = c1.systemInformation_element.criticalExtensions_tree.systemInformation_r8_element
sib6 = [
sib
for sib
in items_in_tree(sys_info_element, 'sib_TypeAndInfo', 'sib_TypeAndInfo_item_tree')
if sib.has_field('sib6_element')
][0].sib6_element
return {
'event': SIB6_NAME,
'data': {
'utra_fdd_carriers': parse_fdd_carrier_freq_utras(sib6),
't_reselection_utra': int(sib6.t_ReselectionUTRA),
},
}
except (KeyError, AttributeError, IndexError):
pass
| 2.171875 | 2 |
app/blueprints/question.py | rubberduckdebuggingtwitch/pond | 0 | 12762634 | from flask import Blueprint
from app import db
question = Blueprint('question', __name__)
@question.route('/question/create/<newquestion>')
def ask(newquestion):
| 2.5 | 2 |
test_case.py | DUCK251/castingapi | 0 | 12762635 | import random
from random import randint
from app import app
from models import (
db,
Actor,
Movie,
Role,
ETHNICITY_TYPE,
HAIR_COLOR_TYPE,
BODY_TYPE,
GENDER_TYPE,
EYE_COLOR_TYPE
)
PHONES = [
'+1-202-555-0169',
'+1-202-555-0125',
'+1-202-555-0133',
'+1-202-555-0143',
'+1-202-555-0163',
'+1-202-555-0159',
'+1-202-555-0166',
'+1-202-555-0169',
'+1-202-555-0172',
'+1-202-555-0102',
'+1-202-555-0183',
'+1-202-555-0137'
]
EMAILS = [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>'
]
def insert_movie(title, release_date, company, description):
movie = Movie(
title=title,
release_date=release_date,
company=company,
description=description)
movie.insert()
def insert_actor(name, age, gender, location):
actor = Actor(name=name, age=age, gender=gender, location=location)
actor.passport = random.choice([True, False])
actor.driver_license = random.choice([True, False])
actor.ethnicity = random.choice(ETHNICITY_TYPE)
actor.hair_color = random.choice(HAIR_COLOR_TYPE)
actor.eye_color = random.choice(EYE_COLOR_TYPE)
actor.body_type = random.choice(BODY_TYPE)
actor.height = randint(160, 200)
actor.phone = random.choice(PHONES)
actor.email = random.choice(EMAILS)
actor.insert()
def insert_role(movie_id, name, gender, min_age, max_age):
role = Role(movie_id=movie_id,
name=name,
gender=gender,
min_age=min_age,
max_age=max_age)
role.insert()
MOVIES = [
[
'Blancanieves', '2021-05-30', 'Nix Films',
('A twist on the Snow White fairy tale that'
' is set in 1920s Seville and centered on a female bullfighter.')
],
[
'Aufschneider', '2021-01-01', 'Nix Films',
('About a pathologist with a complicated life.'
' His problems with himself, his colleagues and'
' patients who come down to him, dead or alive.')
],
[
'Edge of Darkness', '2020-12-30', 'BBC Films',
('As homicide detective <NAME> investigates'
' the murder of his activist daughter, he uncovers'
' a corporate cover-up and government conspiracy that'
' attracts an agent tasked with cleaning up the evidence.')
],
[
'A Crime', '2022-10-21', 'BBC Films',
("Vincent's life is on hold until he finds his wife's killer."
" Alice, his neighbor, is convinced she can make him happy. "
"She decides to invent a culprit, so that Vincent can find "
"revenge and leave the past behind. But there is no ideal "
"culprit and no perfect crime.")
],
[
'Diabolique', '2022-05-05', 'ABC Productions',
("The wife and mistress of the sadistic dean of an "
"exclusive prep school conspire to murder him.")
],
[
'<NAME>', '2021-08-10', 'ABC Productions',
("Everything changes for 15-year-old Mia when her "
"mum brings home a new boyfriend.")
],
[
"Manderlay", "2022-07-15", "ABC Productions",
"A story of slavery, set in the southern U.S. in the 1930s."
],
[
"Precious", "2021-05-15", "ABC Productions",
("In New York City's Harlem circa 1987, an overweight, abused,"
" illiterate teen who is pregnant with her second child is "
"invited to enroll in an alternative school in hopes that"
" her life can head in a new direction.")
],
[
"The Last Temptation of Christ", "2023-10-01", "<NAME>",
("The life of Jesus Christ, his journey through life as he faces"
" the struggles all humans do, and his final "
"temptation on the cross.")
],
[
"Palace Beach Hotel", "2023-01-01", "<NAME>",
("Three young soldiers who participated in a military operation"
" that went wrong, and where one of their comrades had been "
"killed before their eyes, are placed in a luxury hotel to "
"prevent a scandal. Despite the help of a young military "
"psychiatrist, the young trio denies any trauma suffered, "
"but they seem to hold a very different secret truth.")
],
[
"Nordwand", "2021-03-28", "<NAME>",
("Based on a true story, North Face is a survival drama film"
" about a competition to climb the most dangerous rock face"
" in the Alps. Set in 1936, as Nazi propaganda urges the "
"nation's Alpinists to conquer the unclimbed north face of"
" the Swiss massif - the Eiger - two reluctant German "
"climbers begin their daring ascent.")
],
[
"Das Zeugenhaus", "2020-12-20", "<NAME>",
("Witnesses about to testify at the Nuremberg War Trials "
"needed a safe place to wait. All under one roof, each "
"with their own secrets. And the countess assigned to "
"take care of them. What was her secret?")
],
[
"<NAME>", "2021-04-12", "Met film",
("A look at the life of philosopher and political "
"theorist <NAME>, who reported for 'The New "
"Yorker' on the trial of the Nazi leader Adolf "
"Eichmann in Jerusalem.")
],
[
"<NAME>", "2021-04-13", "Met film",
("A woman inexplicably finds herself cut off from"
" all human contact when an invisible, unyielding"
" wall suddenly surrounds the countryside. Accompanied"
" by her loyal dog Lynx, she becomes immersed in a world"
" untouched by civilization and ruled by the laws of nature.")
],
[
"Winnetou", "2022-02-02", "Met film",
("When violent conflict breaks out between greedy railroaders"
" and a tribe of Mescalero Apaches, only two men, destined "
"to be blood brothers, can prevent all-out war: chief's son"
" Winnetou and German engineer <NAME>.")
],
[
"Am Limit", "2023-09-09", "Met film",
("Daredevil mountain climbers on their attempt to"
" break yet another speed climbing record.")
],
[
"<NAME>, Anna!", "2024-06-06", "Netflix",
"Anna life stroy"
],
[
"Momentversagen", "2024-07-17", "Netflix",
("In a trendy restaurant, public prosecutor <NAME>"
" and his colleagues toast his promotion. On the way home,"
" he meets a quarrelling junkie couple in a park. Manuel "
"wants to help the woman, who is beaten and strangled, "
"and intervenes.")
],
[
"Fake Movie", "2024-08-28", "Matrix film",
"Just Fake one"
],
[
"Fun Movie", "2025-01-01", "Matrix film",
"Just Fun one"
]
]
ACTORS = [
['<NAME>', 24, 'female', 'LA'],
['<NAME>', 62, 'male', 'CA'],
['<NAME>', 33, 'female', 'LA'],
['<NAME>', 40, 'female', 'CA'],
['<NAME>', 52, 'male', 'MA'],
['<NAME>', 15, 'male', 'MA'],
['<NAME>', 22, 'female', 'MA'],
['<NAME>', 38, 'male', 'KA'],
['<NAME>', 40, 'female', 'KA'],
['<NAME>', 30, 'male', 'KA']
]
ROLES = [
[1, 'kimmich', 'male', 25, 30],
[1, 'revan', 'male', 30, 35],
[2, 'jack', 'male', 15, 25],
[2, 'mich', 'female', 10, 15],
[3, 'kim', 'female', 20, 25],
[3, 'lee', 'female', 20, 25],
[4, 'park', 'male', 50, 60],
[4, 'park', 'male', 70, 80],
]
for movie in MOVIES:
insert_movie(movie[0], movie[1], movie[2], movie[3])
for actor in ACTORS:
insert_actor(actor[0], actor[1], actor[2], actor[3])
for role in ROLES:
insert_role(role[0], role[1], role[2], role[3], role[4])
| 2.90625 | 3 |
psychic/utils.py | wmvanvliet/psychic | 0 | 12762636 | import logging
import numpy as np
from .dataset import DataSet
from .markers import markers_to_events
def sliding_window_indices(window_size, window_step, sig_len):
'''Returns indices for a sliding window with shape [nwindows x window_size]'''
nwindows = int(np.floor((sig_len - window_size + window_step) /
float(window_step)))
print(nwindows)
starts = np.arange(nwindows).reshape(nwindows, 1) * window_step
return starts + np.arange(window_size)
def sliding_window(signal, window_size, window_step, win_func=None):
'''Apply a sliding window to a 1D signal. Returns [#windows x window_size].'''
signal = np.asarray(signal)
if signal.ndim != 1:
raise ValueError('Sliding window works on 1D arrays only!')
if win_func is not None:
if win_func.size != window_size:
raise ValueError('window_size (%d) does not match win_func.size (%d)' % (
window_size, win_func.size))
indices = sliding_window_indices(window_size, window_step, signal.shape[0])
windows = signal.take(indices=indices)
if win_func is not None:
windows = windows * win_func # broadcasting matches from last dim
return windows
def stft(signal, nfft, stepsize):
'''Calculate the short-time Fourier transform (STFT).
Returns [windows x FFT coefficients]'''
wins = sliding_window(signal, nfft, stepsize, win_func=np.hanning(nfft))
return np.fft.rfft(wins, axis=1)
def spectrogram(signal, nfft, stepsize):
'''
Calculate a spectrogram using STFT.
Returns [windows x frequencies], in units related to power.
Equivalent to power spectral density.
'''
spec = stft(signal, nfft, stepsize)
# convert to power. The abs() is the magnitude of a complex number
spec = np.abs(spec) ** 2 / nfft
# compensate for missing negative frequencies
spec[:, 1:-1] *= 2
# correct for window
spec /= np.mean(np.abs(np.hanning(nfft)) ** 2)
# compensate for overlapping windows
nwins = spec.shape[0]
overlap = stepsize / float(nfft)
spec *= (1 + (nwins - 1) * overlap) / nwins
return spec
def get_samplerate(d, axis=1):
'''
Derive the sample rate from the timestamps given in either ``feat_lab`` or
``d.ids``. The median of the difference between consecutive time stamps is
takes to be the sample rate.
Parameters
----------
d : :class:`psychic.DataSet`
The data to estimate the sample rate of. Must contain time stamps
in ``d.ids``
axis : int (default 1)
The axis along which time samples are stored. If the last axis is specified
here, time stamps are taken from the ``ids`` property, otherwise they are
taken from the corresponding index of ``feat_lab``.
Returns
-------
sample_rate : float
The estimated samplerate.
'''
assert axis < d.data.ndim, 'Invalid axis specified'
if axis == d.data.ndim - 1:
return np.round(1./np.median(np.diff(d.ids[0])))
else:
return np.round(1./np.median(np.diff([float(x) for x in d.feat_lab[axis]])))
def find_segments(events, event_indices, start_mark, end_mark):
'''Helper to find matching start/end markers in an event array'''
events, event_indices = np.asarray(events), np.asarray(event_indices)
assert events.size == event_indices.size
mask = (events==start_mark) | (events==end_mark)
sevents, sevent_ids = events[mask], event_indices[mask]
stack, result = [], []
for si in range(sevent_ids.size):
if sevents[si] == start_mark:
stack.append(sevent_ids[si])
else:
assert stack != [], 'Missing start marker'
result.append((stack.pop(), sevent_ids[si]))
if not stack == []:
logging.getLogger('psychic.utils.find_segments').warning(
'Did not end start marker(s) at %s' % repr(stack))
return result
def cut_segments(d, marker_tuples, offsets=[0, 0]):
'''
Cut a dataset into segments using (start_marker, end_marker) tuples.
Parameters
----------
d : :class:`psychic.DataSet`
Continuous data to cut into segments.
marker_tuples : list of tuples
A list of (start_marker, end_marker) marker codes delimiting each
type of segment.
Returns
-------
data : list of :class:`psychic.DataSet`
A list with datasets.
'''
start_off, end_off = offsets
segments = []
e, ei, _ = markers_to_events(d.labels.flat)
for (sm, em) in marker_tuples:
segments.extend(find_segments(e, ei, sm, em))
segments.sort()
return [d[s + start_off:e + end_off] for (s, e) in segments]
def wolpaw_bitr(N, P):
assert 0 <= P <= 1
assert 2 <= N
result = np.log2(N)
if P > 0:
result += P * np.log2(P)
if P < 1:
result += (1 - P) * np.log2((1 - P)/(N - 1.))
return result
def split_in_bins(d, order, n, legend=lambda i,b: 'slice %d' % i, ascending=True):
idx = np.argsort(order)
if not ascending:
idx = idx[::-1]
bin_size = int(len(order) / float(n))
bins = [idx[i*bin_size:(i+1)*bin_size] for i in range(n)]
labels = np.zeros((n, d.ninstances), dtype=np.bool)
for i,b in enumerate(bins):
labels[i, b] = True
cl_lab = [legend(i, bins[i]) for i in range(n)]
return (bins, DataSet(labels=labels, cl_lab=cl_lab, default=d))
| 2.703125 | 3 |
boa3_test/tests/compiler_tests/test_class.py | DanPopa46/neo3-boa | 0 | 12762637 | from boa3.neo.cryptography import hash160
from boa3.neo.vm.type.String import String
from boa3_test.tests.boa_test import BoaTest
from boa3_test.tests.test_classes.testengine import TestEngine
class TestClass(BoaTest):
default_folder: str = 'test_sc/class_test'
def test_notification_get_variables(self):
path = self.get_contract_path('NotificationGetVariables.py')
output, manifest = self.compile_and_save(path)
script = hash160(output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'script_hash', [],
expected_result_type=bytes)
self.assertEqual(len(engine.notifications), 0)
self.assertEqual(bytes(20), result)
result = self.run_smart_contract(engine, path, 'event_name', [])
self.assertEqual(len(engine.notifications), 0)
self.assertEqual('', result)
result = self.run_smart_contract(engine, path, 'state', [])
self.assertEqual(len(engine.notifications), 0)
self.assertEqual([], result)
result = self.run_smart_contract(engine, path, 'script_hash', [1])
self.assertEqual(len(engine.notifications), 1)
self.assertEqual(script, result)
engine.reset_engine()
result = self.run_smart_contract(engine, path, 'event_name', [1])
self.assertEqual(len(engine.notifications), 1)
self.assertEqual('notify', result)
engine.reset_engine()
result = self.run_smart_contract(engine, path, 'state', [1])
self.assertEqual(len(engine.notifications), 1)
self.assertEqual([1], result)
engine.reset_engine()
result = self.run_smart_contract(engine, path, 'state', ['1'])
self.assertEqual(len(engine.notifications), 1)
self.assertEqual(['1'], result)
def test_notification_set_variables(self):
path = self.get_contract_path('NotificationSetVariables.py')
output, manifest = self.compile_and_save(path)
script = hash160(output)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'script_hash', script,
expected_result_type=bytes)
self.assertEqual(script, result)
result = self.run_smart_contract(engine, path, 'event_name', 'unit test')
self.assertEqual('unit test', result)
result = self.run_smart_contract(engine, path, 'state', (1, 2, 3))
self.assertEqual([1, 2, 3], result)
def test_contract_constructor(self):
path = self.get_contract_path('ContractConstructor.py')
output, manifest = self.compile_and_save(path)
engine = TestEngine()
result = self.run_smart_contract(engine, path, 'new_contract')
self.assertEqual(5, len(result))
if isinstance(result[2], str):
result[2] = String(result[2]).to_bytes()
if isinstance(result[3], str):
result[3] = String(result[3]).to_bytes()
self.assertEqual(0, result[0])
self.assertEqual(0, result[1])
self.assertEqual(bytes(20), result[2])
self.assertEqual(bytes(), result[3])
self.assertEqual({}, result[4])
| 2.140625 | 2 |
auctions/migrations/0002_auctionlisting_bids_comments_watchlist.py | Architkapoor13/Auctions | 5 | 12762638 | # Generated by Django 3.0.8 on 2020-09-13 18:33
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auctions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AuctionListing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64)),
('description', models.CharField(max_length=500)),
('category', models.CharField(max_length=64)),
('startingbid', models.FloatField()),
('piclink', models.CharField(default='https://encrypted-tbn0.gstatic.com/images?q=tbn%3AANd9GcRqEWgS0uxxEYJ0PsOb2OgwyWvC0Gjp8NUdPw&usqp=CAU', max_length=200)),
('currentbid', models.FloatField()),
('isactive', models.BooleanField(default=True)),
('created_on', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='user_auctions', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Watchlist',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='item_in_watchlist', to='auctions.AuctionListing')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='watchlist_of_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Comments',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=200)),
('created_on', models.DateTimeField(auto_now_add=True)),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments_on_title', to='auctions.AuctionListing')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='comments_by_user', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Bids',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('bid', models.FloatField()),
('item', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bid_on_title', to='auctions.AuctionListing')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='bids_by_user', to=settings.AUTH_USER_MODEL)),
],
),
]
| 1.671875 | 2 |
src/debugpy/_vendored/pydevd/tests_python/test_smart_step_into_bytecode.py | r3m0t/debugpy | 695 | 12762639 | <gh_stars>100-1000
import sys
try:
from _pydevd_bundle import pydevd_bytecode_utils
except ImportError:
pass
import pytest
pytestmark = pytest.mark.skipif(sys.version_info[0] < 3, reason='Only available for Python 3.')
@pytest.fixture(autouse=True, scope='function')
def enable_strict():
# In tests enable strict mode (in regular operation it'll be False and will just ignore
# bytecodes we still don't handle as if it didn't change the stack).
pydevd_bytecode_utils.STRICT_MODE = True
yield
pydevd_bytecode_utils.STRICT_MODE = False
def check(found, expected):
assert len(found) == len(expected), '%s != %s' % (found, expected)
last_offset = -1
for f, e in zip(found, expected):
try:
if isinstance(e.name, (list, tuple, set)):
assert f.name in e.name
else:
assert f.name == e.name
assert f.is_visited == e.is_visited
assert f.line == e.line
assert f.call_order == e.call_order
except AssertionError as exc:
raise AssertionError('%s\nError with: %s - %s' % (exc, f, e))
# We can't check the offset because it may be different among different python versions
# so, just check that it's always in order.
assert f.offset > last_offset
last_offset = f.offset
def collect_smart_step_into_variants(*args, **kwargs):
try:
return pydevd_bytecode_utils.calculate_smart_step_into_variants(*args, **kwargs)
except:
# In a failure, rerun with DEBUG!
debug = pydevd_bytecode_utils.DEBUG
pydevd_bytecode_utils.DEBUG = True
try:
return pydevd_bytecode_utils.calculate_smart_step_into_variants(*args, **kwargs)
finally:
pydevd_bytecode_utils.DEBUG = debug
def check_names_from_func_str(func_str, expected):
locs = {}
exec(func_str, globals(), locs)
function = locs['function']
class Frame:
f_code = function.__code__
f_lasti = 0
found = collect_smart_step_into_variants(
Frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, expected)
def test_smart_step_into_bytecode_info():
from _pydevd_bundle.pydevd_bytecode_utils import Variant
def function():
def some(arg):
pass
def call(arg):
pass
yield sys._getframe()
call(some(call(some())))
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check(found, [
Variant(name=('_getframe', 'sys'), is_visited=True, line=8, offset=20, call_order=1),
Variant(name='some', is_visited=False, line=9, offset=34, call_order=1),
Variant(name='call', is_visited=False, line=9, offset=36, call_order=1),
Variant(name='some', is_visited=False, line=9, offset=38, call_order=2),
Variant(name='call', is_visited=False, line=9, offset=40, call_order=2),
])
def check_name_and_line(found, expected):
names_and_lines = set()
for variant in found:
if variant.children_variants:
for v in variant.children_variants:
names_and_lines.add((v.name + (' (in %s)' % variant.name), v.line))
else:
names_and_lines.add((variant.name, variant.line))
if names_and_lines != set(expected):
raise AssertionError('Found: %s' % (sorted(names_and_lines, key=lambda tup:tuple(reversed(tup))),))
def test_smart_step_into_bytecode_info_002():
def function():
yield sys._getframe()
completions = foo.bar(
Something(param1, param2=xxx.yyy),
)
call()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('bar', 2), ('Something', 3), ('call', 5)])
def test_smart_step_into_bytecode_info_003():
def function():
yield sys._getframe()
bbb = foo.bar(
Something(param1, param2=xxx.yyy), {}
)
call()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('bar', 2), ('Something', 3), ('call', 5)])
def test_smart_step_into_bytecode_info_004():
def function():
yield sys._getframe()
bbb = foo.bar(
Something(param1, param2=xxx.yyy), {1: 1} # BUILD_MAP
)
call()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('bar', 2), ('Something', 3), ('call', 5)])
def test_smart_step_into_bytecode_info_005():
def function():
yield sys._getframe()
bbb = foo.bar(
Something(param1, param2=xxx.yyy), {1: 1, 2:2} # BUILD_CONST_KEY_MAP
)
call()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [
('_getframe', 1), ('bar', 2), ('Something', 3), ('call', 5)])
def test_smart_step_into_bytecode_info_006():
def function():
yield sys._getframe()
foo.bar(
Something(),
{
1: 1,
2:[
x for x
in call()
]
}
)
call2()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [
('_getframe', 1), ('bar', 2), ('Something', 3), ('call', 8), ('call2', 12)])
def test_smart_step_into_bytecode_info_007():
def function():
yield sys._getframe()
a[0]
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('__getitem__', 2)])
def test_smart_step_into_bytecode_info_008():
def function():
yield sys._getframe()
call(
[1, 2, 3])
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('call', 2)])
def test_smart_step_into_bytecode_info_009():
def function():
yield sys._getframe()
[1, 2, 3][0]()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('__getitem__', 2), ('__getitem__().__call__', 2)])
def test_smart_step_into_bytecode_info_011():
def function():
yield sys._getframe()
[1, 2, 3][0]()()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('__getitem__', 2), ('__getitem__().__call__', 2)])
def test_smart_step_into_bytecode_info_012():
def function():
yield sys._getframe()
(lambda a:a)(1)
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('<lambda>', 2)])
def test_smart_step_into_bytecode_info_013():
def function():
yield sys._getframe()
(lambda a:a,)[0](1)
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('__getitem__().__call__', 2), ('__getitem__', 2)])
def test_smart_step_into_bytecode_info_014():
def function():
yield sys._getframe()
try:
raise RuntimeError()
except Exception:
call2()
finally:
call3()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('RuntimeError', 3), ('call2', 5), ('call3', 7)])
def test_smart_step_into_bytecode_info_015():
def function():
yield sys._getframe()
with call():
call2()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('call', 2), ('call2', 3)])
def test_smart_step_into_bytecode_info_016():
def function():
yield sys._getframe()
call2(
1,
2,
a=3,
*args,
**kwargs
)
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('call2', 2)])
def test_smart_step_into_bytecode_info_017():
def function():
yield sys._getframe()
call([
x for x in y
if x == call2()
])
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found,
[('_getframe', 1), ('call', 2), ('__eq__ (in <listcomp>)', 4), ('call2 (in <listcomp>)', 4)]
)
def test_smart_step_into_bytecode_info_018():
def function():
yield sys._getframe()
class Foo(object):
def __init__(self):
pass
f = Foo()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('Foo', 8)])
def test_smart_step_into_bytecode_info_019():
def function():
yield sys._getframe()
class Foo(object):
def __init__(self):
pass
f = Foo()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('Foo', 8)])
def test_smart_step_into_bytecode_info_020():
def function():
yield sys._getframe()
for a in call():
if a != 1:
a()
break
elif a != 2:
b()
break
else:
continue
else:
raise RuntimeError()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [
('_getframe', 1), ('call', 2), ('__ne__', 3), ('a', 4), ('__ne__', 6), ('b', 7), ('RuntimeError', 12)])
def test_smart_step_into_bytecode_info_021():
def function():
yield sys._getframe()
a, b = b, a
a, b, c = c, a, b
a, b, c, d = d, c, a, b
a()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('a', 5)])
def test_smart_step_into_bytecode_info_022():
def function():
yield sys._getframe()
a(
*{1, 2},
**{
1:('1' + '2'),
2: tuple(
x for x in c()
if x == d())
}
)
b()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [
('_getframe', 1), ('a', 2), ('tuple', 6), ('c', 7), ('__eq__ (in <genexpr>)', 8), ('d (in <genexpr>)', 8), ('b', 11)])
def test_smart_step_into_bytecode_info_023():
def function():
yield sys._getframe()
tuple(
x for x in
c()
if x == d()
)
tuple(
x for x in
c()
if x == d()
)
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [
('_getframe', 1), ('tuple', 2), ('c', 4), ('__eq__ (in <genexpr>)', 5), ('d (in <genexpr>)', 5), ('tuple', 7), ('c', 9), ('__eq__ (in <genexpr>)', 10), ('d (in <genexpr>)', 10)])
def test_smart_step_into_bytecode_info_024():
func = '''def function():
yield sys._getframe()
call(a ** b)
call(a * b)
call(a @ b)
call(a / b)
call(a // b)
call(a % b)
call(a + b)
call(a - b)
call(a >> b)
call(a << b)
call(a & b)
call(a | b)
call(a ^ b)
'''
locs = {}
exec(func, globals(), locs)
function = locs['function']
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [
('_getframe', 1),
('__pow__', 2),
('call', 2),
('__mul__', 3),
('call', 3),
('__matmul__', 4),
('call', 4),
('__div__', 5),
('call', 5),
('__floordiv__', 6),
('call', 6),
('__mod__', 7),
('call', 7),
('__add__', 8),
('call', 8),
('__sub__', 9),
('call', 9),
('__rshift__', 10),
('call', 10),
('__lshift__', 11),
('call', 11),
('__and__', 12),
('call', 12),
('__or__', 13),
('call', 13),
('__xor__', 14),
('call', 14)],
)
def test_smart_step_into_bytecode_info_025():
func = '''def function():
yield sys._getframe()
a **= b
a *= b
a @= b
a /= b
a //= b
a %= b
a += b
a -= b
a >>= b
a <<= b
a &= b
a |= b
a ^= b
call()
'''
locs = {}
exec(func, globals(), locs)
function = locs['function']
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('call', 15)])
@pytest.mark.skipif(sys.version_info[0:2] < (3, 8), reason='Walrus operator only available for Python 3.8 onwards.')
def test_smart_step_into_bytecode_info_026():
func = '''def function():
yield sys._getframe()
call((a:=1))
'''
locs = {}
exec(func, globals(), locs)
function = locs['function']
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('call', 2)])
def test_smart_step_into_bytecode_info_027():
def function():
yield sys._getframe()
def call():
pass
a = [1, call]
a[:1] = []
x = a[0]()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('__getitem__', 8), ('__getitem__().__call__', 8)])
def test_smart_step_into_bytecode_info_028():
def function():
yield sys._getframe()
def call():
pass
a = [1, call]
a[:1] += []
x = a[0]()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('__getitem__', 7), ('__getitem__', 8), ('__getitem__().__call__', 8)])
def test_smart_step_into_bytecode_info_029():
def function():
yield sys._getframe()
call((+b) + (-b) - (not b) * (~b))
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('__add__', 3), ('__mul__', 3), ('__sub__', 3), ('call', 3)])
def test_smart_step_into_bytecode_info_030():
def function():
yield sys._getframe()
call({a for a in b})
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('call', 3)])
def test_smart_step_into_bytecode_info_031():
def function():
yield sys._getframe()
call({a: b for a in b})
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('call', 3)])
def test_smart_step_into_bytecode_info_032():
def function():
yield sys._getframe()
del a[:2]
call()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 1), ('call', 4)])
def test_smart_step_into_bytecode_info_033():
check_names_from_func_str('''def function():
yield sys._getframe()
raise call()
''', [('_getframe', 1), ('call', 3)])
@pytest.mark.skipif(sys.version_info[0:2] < (3, 6), reason='Async only available for Python 3.6 onwards.')
def test_smart_step_into_bytecode_info_034():
check_names_from_func_str('''async def function():
await a()
async for b in c():
await d()
''', [('a', 1), ('c', 2), ('d', 3)])
def test_smart_step_into_bytecode_info_035():
check_names_from_func_str('''def function():
assert 0, 'Foo'
''', [('AssertionError', 1)])
def test_smart_step_into_bytecode_info_036():
check_names_from_func_str('''def function(a):
global some_name
some_name = a
some_name()
''', [('some_name', 3)])
def test_smart_step_into_bytecode_info_037():
func = '''def function():
some_name = 10
def another():
nonlocal some_name
some_name = a
some_name()
return another
'''
locs = {}
exec(func, globals(), locs)
function = locs['function']()
class Frame:
f_code = function.__code__
f_lasti = 0
found = collect_smart_step_into_variants(
Frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('some_name', 3)])
def test_smart_step_into_bytecode_info_038():
check_names_from_func_str('''def function():
try:
call()
finally:
call2()
''', [('call', 2), ('call2', 4)])
def test_smart_step_into_bytecode_info_039():
check_names_from_func_str('''def function():
try:
call()
except:
return call2()
finally:
return call3()
''', [('call', 2), ('call2', 4), ('call3', 6)])
def test_smart_step_into_bytecode_info_040():
check_names_from_func_str('''def function():
a.call = foo()
a.call()
''', [('foo', 1), ('call', 2)])
def test_smart_step_into_bytecode_info_041():
check_names_from_func_str('''def function():
foo = 10
del foo
foo = method
foo()
''', [('foo', 4)])
def test_smart_step_into_bytecode_info_042():
check_names_from_func_str('''
foo = 10
def function():
global foo
foo()
''', [('foo', 2)])
def test_smart_step_into_bytecode_info_043():
def function(call):
def another_function():
yield sys._getframe()
call()
for frame in another_function():
yield frame
generator = iter(function(lambda: None))
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('_getframe', 3), ('call', 5)])
def test_smart_step_into_bytecode_info_044():
check_names_from_func_str('''
def function(args):
call, *c = args
call(*c)
''', [('call', 2)])
def test_smart_step_into_bytecode_info_045():
check_names_from_func_str('''
def function():
x.foo = 10
del x.foo
x.foo = lambda:None
x.foo()
''', [('foo', 4)])
def test_smart_step_into_bytecode_info_046():
check_names_from_func_str('''
a = 10
def function(args):
global a
del a
a()
''', [('a', 3)])
def test_smart_step_into_bytecode_info_047():
check_names_from_func_str('''
def function():
call(a, b=1, *c, **kw)
''', [('call', 1)])
def test_smart_step_into_bytecode_info_048():
check_names_from_func_str('''
def function(fn):
fn = call(fn)
def pa():
fn()
return pa()
''', [('call', 1), ('pa', 6)])
def test_smart_step_into_bytecode_info_049():
def function(foo):
class SomeClass(object):
implementation = foo
implementation()
f = sys._getframe()
return SomeClass.f
frame = function(object)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
check_name_and_line(found, [('implementation', 5), ('_getframe', 6)])
def test_smart_step_into_bytecode_info_050():
check_names_from_func_str('''
def function():
('a' 'b').index('x')
''', [('index', 1)])
def test_smart_step_into_bytecode_info_051():
check_names_from_func_str('''
def function():
v = 1
v2 = 2
call((f'a{v()!r}' f'b{v2()}'))
''', [('call', 3), ('v', 3), ('v2', 3)])
def test_smart_step_into_bytecode_info_052():
check_names_from_func_str('''
def function():
v = 1
v2 = 2
call({*v(), *v2()})
''', [('call', 3), ('v', 3), ('v2', 3)])
def test_smart_step_into_bytecode_info_053():
check_names_from_func_str('''
def function():
v = 1
v2 = 2
call({**v(), **v2()})
''', [('call', 3), ('v', 3), ('v2', 3)])
def test_smart_step_into_bytecode_info_054():
check_names_from_func_str('''
def function():
import a
from a import b
call()
''', [('call', 3)])
def test_smart_step_into_bytecode_info_055():
check_names_from_func_str('''
async def function():
async with lock() as foo:
await foo()
''', [('lock', 1), ('foo', 2)])
def test_smart_step_into_bytecode_info_056():
check_names_from_func_str('''
def function(mask_path):
wc = some_func(
parsed_content,
np.array(
Image.open(mask_path)
)
)
''', [('some_func', 1), ('array', 3), ('open', 4)])
def test_smart_step_into_bytecode_info_057():
check_names_from_func_str('''
def function(mask_path):
wc = some_func(
parsed_content,
np.array(
my.pack.Image.open(mask_path)
)
)
''', [('some_func', 1), ('array', 3), ('open', 4)])
def test_get_smart_step_into_variant_from_frame_offset():
from _pydevd_bundle.pydevd_bytecode_utils import Variant
found = [
Variant(name='_getframe', is_visited=True, line=8, offset=20, call_order=1),
Variant(name='some', is_visited=False, line=9, offset=34, call_order=1),
Variant(name='call', is_visited=False, line=9, offset=36, call_order=1),
Variant(name='some', is_visited=False, line=9, offset=38, call_order=2),
Variant(name='call', is_visited=False, line=9, offset=40, call_order=2),
]
assert pydevd_bytecode_utils.get_smart_step_into_variant_from_frame_offset(19, found) is None
assert pydevd_bytecode_utils.get_smart_step_into_variant_from_frame_offset(20, found).offset == 20
assert pydevd_bytecode_utils.get_smart_step_into_variant_from_frame_offset(33, found).offset == 20
assert pydevd_bytecode_utils.get_smart_step_into_variant_from_frame_offset(34, found).offset == 34
assert pydevd_bytecode_utils.get_smart_step_into_variant_from_frame_offset(35, found).offset == 34
assert pydevd_bytecode_utils.get_smart_step_into_variant_from_frame_offset(36, found).offset == 36
assert pydevd_bytecode_utils.get_smart_step_into_variant_from_frame_offset(44, found).offset == 40
def test_smart_step_into_bytecode_info_00eq():
from _pydevd_bundle.pydevd_bytecode_utils import Variant
def function():
a = 1
b = 1
if a == b:
pass
if a != b:
pass
if a > b:
pass
if a >= b:
pass
if a < b:
pass
if a <= b:
pass
if a is b:
pass
yield sys._getframe()
generator = iter(function())
frame = next(generator)
found = collect_smart_step_into_variants(
frame, 0, 99999, base=function.__code__.co_firstlineno)
if sys.version_info[:2] < (3, 9):
check(found, [
Variant(name='__eq__', is_visited=True, line=3, offset=18, call_order=1),
Variant(name='__ne__', is_visited=True, line=5, offset=33, call_order=1),
Variant(name='__gt__', is_visited=True, line=7, offset=48, call_order=1),
Variant(name='__ge__', is_visited=True, line=9, offset=63, call_order=1),
Variant(name='__lt__', is_visited=True, line=11, offset=78, call_order=1),
Variant(name='__le__', is_visited=True, line=13, offset=93, call_order=1),
Variant(name='is', is_visited=True, line=15, offset=108, call_order=1),
Variant(name=('_getframe', 'sys'), is_visited=True, line=18, offset=123, call_order=1),
])
else:
check(found, [
Variant(name='__eq__', is_visited=True, line=3, offset=18, call_order=1),
Variant(name='__ne__', is_visited=True, line=5, offset=33, call_order=1),
Variant(name='__gt__', is_visited=True, line=7, offset=48, call_order=1),
Variant(name='__ge__', is_visited=True, line=9, offset=63, call_order=1),
Variant(name='__lt__', is_visited=True, line=11, offset=78, call_order=1),
Variant(name='__le__', is_visited=True, line=13, offset=93, call_order=1),
Variant(name=('_getframe', 'sys'), is_visited=True, line=18, offset=123, call_order=1),
])
def _test_find_bytecode():
import glob
import dis
from io import StringIO
root_dir = 'C:\\bin\\Python310\\Lib\\site-packages\\'
i = 0
for filename in glob.iglob(root_dir + '**/*.py', recursive=True):
print(filename)
with open(filename, 'r', encoding='utf-8') as stream:
try:
contents = stream.read()
except:
sys.stderr.write('Unable to read file: %s' % (filename,))
continue
code_obj = compile(contents, filename, 'exec')
s = StringIO()
dis.dis(code_obj, file=s)
# https://docs.python.org/3.10/library/dis.html has references to the new opcodes added.
if 'COPY_DICT_WITHOUT_KEYS' in s.getvalue():
dis.dis(code_obj)
raise AssertionError('Found bytecode in: %s' % filename)
# i += 1
# if i == 1000:
# break
| 1.8125 | 2 |
leetcode_python/Array/fair-candy-swap.py | yennanliu/Python_basics | 18 | 12762640 | # V0
# V1
# https://www.jiuzhang.com/solution/fair-candy-swap/#tag-highlight-lang-python
class Solution:
"""
@param A: an array
@param B: an array
@return: an integer array
"""
def fairCandySwap(self, A, B):
# Write your code here.
ans = []
sumA = sum(A)
sumB = sum(B)
A.sort()
B.sort()
tmp = sumA - (sumA + sumB) / 2
i = 0
j = 0
while i < len(A) and j < len(B):
if A[i] - B[j] == tmp:
ans.append(A[i])
ans.append(B[j])
break
elif A[i] - B[j] > tmp:
j += 1
elif A[i] - B[j] < tmp:
i += 1
return ans
# V1'
# https://blog.csdn.net/fuxuemingzhu/article/details/82013077
class Solution(object):
def fairCandySwap(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: List[int]
"""
sum_A, sum_B, set_B = sum(A), sum(B), set(B)
target = (sum_A + sum_B) / 2
for a in A:
b = target - (sum_A - a)
if b >= 1 and b <= 100000 and b in set_B:
return [a, b]
# V2
# Time: O(m + n)
# Space: O(m + n)
class Solution(object):
def fairCandySwap(self, A, B):
"""
:type A: List[int]
:type B: List[int]
:rtype: List[int]
"""
diff = (sum(A)-sum(B))//2
setA = set(A)
for b in set(B):
if diff+b in setA:
return [diff+b, b]
return [] | 3.6875 | 4 |
se3cnn/point/self_interaction.py | mariogeiger/se3cnn | 170 | 12762641 | <filename>se3cnn/point/self_interaction.py
# pylint: disable=arguments-differ, no-member, missing-docstring, invalid-name, line-too-long
from functools import reduce
import torch
from se3cnn.point.kernel import Kernel
from se3cnn.point.radial import ConstantRadialModel
class SortSphericalSignals(torch.nn.Module):
def __init__(self, Rs):
super().__init__()
ljds = []
j = 0
for mul, l in Rs:
d = mul * (2 * l + 1)
ljds.append((l, j, d))
j += d
mixing_matrix = torch.zeros(j, j)
i = 0
for _l, j, d in sorted(ljds):
mixing_matrix[i:i+d, j:j+d] = torch.eye(d)
i += d
self.register_buffer('mixing_matrix', mixing_matrix)
def forward(self, x):
"""
:param x: tensor [batch, feature, ...]
"""
output = torch.einsum('ij,zja->zia', (self.mixing_matrix, x.flatten(2))).contiguous()
return output.view(*x.size())
class ConcatenateSphericalSignals(torch.nn.Module):
def __init__(self, *Rs):
super().__init__()
Rs = reduce(list.__add__, Rs, [])
self.sort = SortSphericalSignals(Rs)
def forward(self, *signals):
combined = torch.cat(signals, dim=1)
return self.sort(combined)
class SelfInteraction(torch.nn.Module):
def __init__(self, Rs_in, Rs_out):
super().__init__()
self.kernel = Kernel(Rs_in, Rs_out, ConstantRadialModel)
def forward(self, features):
"""
:param features: tensor [..., channel]
:return: tensro [..., channel]
"""
*size, n = features.size()
features = features.view(-1, n)
k = self.kernel(features.new_zeros(features.size(0), 3))
features = torch.einsum("zij,zj->zi", (k, features))
features = features.view(*size, -1)
return features
| 1.921875 | 2 |
pseudo/generators/csharp_generator.py | alefranz/pseudo | 0 | 12762642 | <reponame>alefranz/pseudo<gh_stars>0
from pseudo.code_generator import CodeGenerator, switch
from pseudo.middlewares import DeclarationMiddleware, NameMiddleware, TupleMiddleware
from pseudo.pseudo_tree import Node, local
OPS = {'not': '!', 'and': '&&', 'or': '||'}
class CSharpGenerator(CodeGenerator):
'''CSharp code generator'''
indent = 4
use_spaces = True
middlewares = [TupleMiddleware(all=False),
DeclarationMiddleware,
NameMiddleware(
normal_name='camel_case',
method_name='pascal_case',
function_name='pascal_case')]
types = {
'Int': 'int',
'Float': 'float',
'Boolean': 'bool',
'String': 'string',
'List': 'List<{0}>',
'Dictionary': 'Dictionary<{0}, {1}>',
'Set': 'HashSet<{0}>',
'Tuple': lambda x: 'Tuple<{0}>'.format(', '.join(x)),
'Array': '{0}[]',
# fixed-size buffers in c# are not widely used
# they require a struct and an unsafe annotation
# we can a unsafe-fixed-size-buffer option to config
'Void': 'void',
'Regexp': 'Regex',
'RegexpMatch': 'Match'
}
templates = dict(
module = '''
using System;
%<dependencies:lines>
%<custom_exceptions:lines>
%<tuple_definitions:lines>
%<#class_definitions>
public class Program
{
%<constants:lines>
%<#function_definitions>
public static void Main(string[] args)
{
%<main:semi>
}
}''',
function_definition = '''
static %<@return_type> %<name>(%<#params>)
{
%<block:semi>
}''',
method_definition = '''
%<.is_public> %<@return_type> %<name>(%<#params>)
{
%<block:semi>
}''',
method_definition_is_public = ('public', 'private'),
class_definition = '''
public class %<name>%<.base>
{
%<attrs:lines>
%<.constructor>
%<methods:line_join>
}''',
class_definition_base = ('%<#base>', ''),
class_definition_constructor = ('%<constructor>', ''),
class_attr = "%<.is_public>%<@pseudo_type> %<name:camel_case 'lower'>;",
class_attr_is_public = ('public ', 'private '),
immutable_class_attr = '''
private readonly %<@pseudo_type> %<name>;
public %<@pseudo_type> %<name:camel_case 'title'> { get { return %<name>; } }''',
anonymous_function = "%<#anon_params> =>%<#anon_block>",
constructor = '''
public %<this>(%<#params>)
{
%<block:semi>
}''',
dependency = 'using %<name>;',
local = '%<name>',
typename = '%<name>',
int = '%<value>',
float = '%<value>',
string = '%<#safe_double>',
boolean = '%<value>',
null = 'null',
simple_initializer = "new %<name>(%<args:join ', '>)",
list = "new[] {%<elements:join ', '>}",
dictionary = "new %<@pseudo_type> { %<pairs:join ', '> }",
pair = "{%<key>, %<value>}",
attr = "%<object>.%<attr>",
new_instance = "new %<class_name>(%<args:join ', '>)",
# assignment = '%<#z>',
assignment = switch('first_mention',
true = 'var %<target> = %<value>', # in v0.3 add config/use var only for generic types
_otherwise = '%<target> = %<value>'
),
tuple = switch(lambda e: len(e.elements) <= 2,
true = "Tuple.Create(%<elements:join ', '>)",
_otherwise = '''
Tuple.Create(
%<elements:c_lines>
)'''),
array = "new[] { %<elements:join ', '> }",
char = "%<#char>",
binary_op = '%<#binary_left> %<#op> %<#binary_right>',
unary_op = '%<#op>%<value>',
comparison = '%<#comparison>',
static_call = "%<receiver>.%<message>(%<args:join ', '>)",
call = "%<function>(%<args:join ', '>)",
method_call = "%<receiver>.%<message>(%<args:join ', '>)",
this_method_call = "this.%<message:camel_case 'title'>(%<args:join ', '>)",
this = 'this',
instance_variable = 'this.%<name>',
throw_statement = 'throw new %<exception>(%<value>)',
if_statement = '''
if (%<test>)
{
%<block:semi>
}
%<.otherwise>''',
if_statement_otherwise = ('%<otherwise>', ''),
elseif_statement = '''
else if (%<test>)
{
%<block:semi>
}
%<.otherwise>''',
elseif_statement_otherwise = ('%<otherwise>', ''),
else_statement = '''
else
{
%<block:semi>
}''',
while_statement = '''
while (%<test>)
{
%<block:semi>
}''',
try_statement = '''
try
{
%<block:semi>
}
%<handlers:lines>''',
exception_handler = '''
catch (%<.exception> %<instance>)
{
%<block:semi>
}''',
exception_handler_exception = ('%<exception>', 'Exception'),
for_each_statement = '''
for %<iterator> in %<sequence>:
%<#block>''',
for_each_with_index_statement = '''
for %<index>, %<iterator> in %<.sequence>:
%<#block>''',
for_each_with_index_statement_sequence = ('%<#index_sequence>', ''),
for_each_in_zip_statement = '''
for %<iterators:join ', '> in zip(%<sequences:join ', '>):
%<#block>''',
implicit_return = 'return %<value>',
explicit_return = 'return %<value>',
index = switch(lambda s: isinstance(s.sequence.pseudo_type, list) and s.sequence.pseudo_type[0] == 'Tuple',
true = '%<sequence>.Item%<#tuple_index>',
_otherwise = '%<sequence>[%<index>]'
),
interpolation = "string.Format(\"%<args:join ''>\", %<#placeholders>)",
interpolation_placeholder = "{%<index>}",
interpolation_literal = "%<value>",
index_assignment = '%<sequence>[%<index>] = %<value>',
constant = '%<constant> = %<init>',
for_statement = switch(lambda f: f.iterators.type,
for_iterator_with_index = '''
for (int %<iterators.index> = 0; %<iterators.index> < %<sequences.sequence>.Length; %<iterators.index> ++)
{
var %<iterators.iterator> = %<sequences.sequence>[%<iterators.index>];
%<block:semi>
}''',
for_iterator_zip = '''
for (int _index = 0; _index < %<#first_sequence>.Length; _index ++)
{
%<#zip_iterators>
%<block:semi>
}''',
for_iterator_with_items = '''
foreach(var _item in %<sequences.sequence>)
{
var %<iterators.key> = _item.key;
var %<iterators.value> = _item.value;
%<block:semi>
}''',
_otherwise = '''
foreach(%<iterators> in %<sequences>)
{
%<block:semi>
}'''
),
for_range_statement = '''
for (int %<index> = %<.first>; %<index> != %<last>; %<index> += %<.step>)
{
%<block:semi>
}''',
for_range_statement_first = ('%<first>', '0'),
for_range_statement_step = ('%<step>', '1'),
for_iterator = 'var %<iterator>',
for_iterator_zip = "var %<iterators:join ', '>",
for_iterator_with_index = 'int %<index>, var %<iterator>',
for_iterator_with_items = '%<key>, %<value>',
for_sequence = '%<sequence>',
custom_exception = '''
public class %<name> : Exception
{
public %<name>(string message)
: base(message)
{
}
}''',
standard_iterable_call = '''
%<sequences>
.Where(%<iterators.iterator> => %<test:first>)
.Select(%<iterators.iterator> => %<block:first>)
.ToList()''',
aug_assignment = '%<target> %<op>= %<value>',
block = '%<block:semi>',
regex = '@"%<value>'
)
def params(self, node, indent):
return ', '.join(
'%s %s' % (
self.render_type(node.pseudo_type[j + 1]),
self._generate_node(k)) for j, k in enumerate(node.params) )
def anon_params(self, node, indent):
if len(node.params) == 0:
return ''
else:
l, r = ('(', ')') if len(node.params) > 1 else ('', '')
return '%s%s%s' % (l, ', '.join(param if isinstance(param, str) else self._generate_node(param) for param in node.params), r)
def anon_block(self, node, indent):
# print(indent);input(node.params[0].y)
if indent < 2:
indent = 2 # anon cant be before method lvl
if len(node.block) == 1:
if node.block[0].type == 'implicit_return' or node.block[0].type == 'explicit_return':
e = node.block[0].value
else:
e = node.block[0]
b = self._generate_node(e)
return ' ' + b
else:
b = ';\n'.join(self.offset(indent + 1) + self._generate_node(e, indent + 1) for e in node.block) + ';\n'
return ' {\n%s%s}' % (b, self.offset(indent))
def class_definitions(self, node, depth):
result = '\n'.join(self._generate_node(k) for k in node.definitions if k.type == 'class_definition')
if result:
return result + '\n'
else:
return ''
def function_definitions(self, node, depth):
result = '\n'.join(self.offset(1) + self._generate_node(f, 1) for f in node.definitions if f.type == 'function_definition')
if result:
return result + '\n'
else:
return ''
def base(self, node, depth):
if node.base:
return ' : %s' % node.base
else:
return ''
def first_sequence(self, node, depth):
return self._generate_node(node.sequences.sequences[0])
def zip_iterators(self, node, depth):
return '\n'.join(
'%svar %s = %s;' % (
self.offset(depth) if j else '',
q.name,
self._generate_node(
Node('index',
sequence=node.sequences.sequences[j],
index=local('_index', 'Int'),
pseudo_type=node.sequences.sequences[j].pseudo_type[1])))
for j, q
in enumerate(node.iterators.iterators))
def tuple_index(self, node, depth):
return str(node.index.value + 1)
def op(self, node, depth):
return OPS.get(node.op, node.op)
def char(self, node, depth):
if node.value == "'":
return "'\\''"
else:
return "'%s'" % node.value
# args starting from 0
def comparison(self, node, depth):
if node.left.type != 'binary_op' or node.left.op != '+' or node.left.right.type != 'int' or node.right.type != 'int':# 'attr' or node.left.object.type != 'local' or node.left.object.name != 'ARGV':
pass
else:
node.right.value -= node.left.right.value
node.left = node.left.left
return '%s %s %s' % (self.binary_left(node, depth), node.op, self.binary_right(node, depth))
def z(self, node, depth):
print(node.y)
input()
return '!!!'
def placeholders(self, node, depth):
return ', '.join(self._generate_node(child.value) for child in node.args[1::2])
| 2.390625 | 2 |
tests/test_motif_shortened.py | ymoon06/htsinfer | 0 | 12762643 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 2 01:28:58 2020
"""
import unittest
from htsinfer.read_motif_v6 import find_overlaps
class Test_Inputs(unittest.TestCase):
def test_call(self):
# 0 arguments, not acceptable
with self.assertRaises(TypeError):
find_overlaps()
# 1 argument, not acceptable
with self.assertRaises(TypeError):
find_overlaps("")
# 2 arguments, not acceptable
with self.assertRaises(TypeError):
find_overlaps("", "")
# 3 or 4 or arguments, acceptable
find_overlaps("A", "A", 1)
find_overlaps("A", "A", 1, True)
# 5 arguments, not acceptable
with self.assertRaises(TypeError):
find_overlaps("", "", 0, True, 0)
def test_positional_arguments(self):
# Check that argument 1 is of type string
with self.assertRaises(TypeError):
find_overlaps(0, "", 1, True)
# Check that argument 2 is of type string
with self.assertRaises(TypeError):
find_overlaps("", 0, 1, True)
# Check that argument 3 is of type int
with self.assertRaises(TypeError):
find_overlaps("", "", "", True)
# Check that argument 4 is of type bool
with self.assertRaises(TypeError):
find_overlaps("", "", 1, "")
def test_arguments_types(self):
# Check that motif is of type string
with self.assertRaises(TypeError):
find_overlaps(motif=0, read="", min_overlap=1, full_contain=False)
# Check that read is of type string
with self.assertRaises(TypeError):
find_overlaps(motif="", read=0, min_overlap=1, full_contain=False)
# Check that min_overlap is of type int
with self.assertRaises(TypeError):
find_overlaps(motif="", read="",
min_overlap="", full_contain=False)
# Check that use_n is of type bool
with self.assertRaises(TypeError):
find_overlaps(motif="", read="", min_overlap=1, full_contain=6)
def test_argument_range(self):
# Check that motif is not accepted if it is an empty string
with self.assertRaises(ValueError):
find_overlaps(motif="", read="A",
min_overlap=1, full_contain=False)
# Check that read is not accepted if it is an empty string
with self.assertRaises(ValueError):
find_overlaps(motif="A", read="",
min_overlap=1, full_contain=False)
# Check that min_overlap is not accepted if smaller than 1
with self.assertRaises(ValueError):
find_overlaps(motif="A", read="A",
min_overlap=0, full_contain=False)
# check if read is longer than motif
with self.assertRaises(ValueError):
find_overlaps(motif="AAAAAA", read="A",
min_overlap=1, full_contain=False)
# check if motif contains small character
with self.assertRaises(ValueError):
find_overlaps(motif="a", read="AAAAAA",
min_overlap=1, full_contain=False)
# check if read contains small character
with self.assertRaises(ValueError):
find_overlaps(motif="A", read="Aa",
min_overlap=1, full_contain=False)
def test_return_value(self):
rv = find_overlaps(motif="G", read="AAAA",
min_overlap=1, full_contain=False)
self.assertTrue(isinstance(rv, list))
class TestMatchFull(unittest.TestCase):
def test_single_match(self):
rv = find_overlaps(motif="GGA", read="TACGGGACGAT",
min_overlap=1, full_contain=False)
self.assertTrue(len(rv) == 1)
self.assertTrue(rv[0] == (4, 1))
def test_single_match_start(self):
rv = find_overlaps(motif="ACGGG", read="ACGGGACGAT",
min_overlap=1, full_contain=False)
self.assertTrue(len(rv) == 1)
self.assertTrue(rv[0] == (0, 1))
def test_single_match_end(self):
rv = find_overlaps(motif="CGA", read="TACGGGACGA",
min_overlap=1, full_contain=False)
self.assertTrue(len(rv) == 1)
self.assertTrue(rv[0] == (7, 1))
def test_multi_match_not_overlapping(self):
rv = find_overlaps(motif="CGA", read="TATTCGATTAGCGAAT",
min_overlap=1, full_contain=False)
self.assertTrue(len(rv) == 2)
self.assertTrue(rv[0] == (4, 1))
self.assertTrue(rv[1] == (11, 1))
def test_multi_match_overlapping(self):
rv = find_overlaps(motif="CGACGA", read="TATTCGACGACGATTAGCGAAT",
min_overlap=1, full_contain=False)
self.assertTrue(len(rv) == 2)
self.assertTrue(rv[0] == (4, 1))
self.assertTrue(rv[1] == (7, 1))
def test_return_value_full(self):
rv = find_overlaps(motif="AAA", read="AAAA",
min_overlap=1, full_contain=True)
self.assertTrue(rv[0] == (0, 1))
class TestMatchPartial(unittest.TestCase):
def test_match_start(self):
rv = find_overlaps(motif="GTA", read="TACGGGACGA",
min_overlap=2, full_contain=False)
self.assertTrue(len(rv) == 1)
self.assertTrue(rv[0] == (0, 2/3))
def test_match_end(self):
rv = find_overlaps(motif="GTAAA", read="TACGGGACGAGT",
min_overlap=2, full_contain=False)
self.assertTrue(len(rv) == 1)
self.assertTrue(rv[0] == (10, 2/5))
class TestMatchMixed(unittest.TestCase):
def test_multi_match_start(self):
rv = find_overlaps(motif="GTA", read="TACGGGTAGA",
min_overlap=2, full_contain=False)
self.assertTrue(len(rv) == 2)
self.assertTrue(rv[0] == (0, 2/3))
self.assertTrue(rv[1] == (5, 1))
def test_multi_match_end(self):
rv = find_overlaps(motif="GTAAA", read="ACGGTAAAAGT",
min_overlap=2, full_contain=False)
self.assertTrue(len(rv) == 2)
self.assertTrue(rv[0] == (3, 1))
self.assertTrue(rv[1] == (9, 2/5))
# further to implement. to give stress and see if it works well.
class NoMatch(unittest.TestCase):
def test_noMatch(self):
rv = find_overlaps(motif="GTA", read="AAAAAAAAAAA",
min_overlap=2, full_contain=False)
self.assertTrue(len(rv) == 0)
class TestLongReads(unittest.TestCase):
def test_file_1(self):
pass
if __name__ == '__main__':
unittest.main()
| 2.78125 | 3 |
SORTING/Easy/Height Checker/Code.py | HassanRahim26/LEETCODE | 3 | 12762644 | # PROBLEM LINK:- https://leetcode.com/problems/height-checker/
class Solution:
def heightChecker(self, heights: List[int]) -> int:
n = len(heights)
expected = heights.copy()
expected.sort()
c = 0
for i in range(0,n):
if heights[i] != expected[i]:
c += 1
return c
| 3.734375 | 4 |
start.py | DennyDai/angr-management | 474 | 12762645 | <filename>start.py
#!/usr/bin/env python3
from angrmanagement.__main__ import main
if __name__ == '__main__':
main()
| 1.609375 | 2 |
ExerciciosComStrings/13.py | TheCarvalho/atividades-wikipython | 0 | 12762646 |
# todo 13. Jogo da palavra embaralhada. Desenvolva um jogo em que o usuário tenha que adivinhar uma palavra que será mostrada com as letras embaralhadas. O programa terá uma lista de palavras lidas de um arquivo texto e escolherá uma aleatoriamente. O jogador terá seis tentativas para adivinhar a palavra. Ao final a palavra deve ser mostrada na tela, informando se o usuário ganhou ou perdeu o jogo.
from random import shuffle, choice
from os import system
from cores import *
with open("palavras.txt", "r") as file:
allText = file.read()
words = list(map(str, allText.split()))
palavra = choice(words)
lista = list()
nova = ''
tentativa = 6
system('cls')
for i in range(0, len(palavra)):
lista.append(palavra[i])
shuffle(lista)
for i in lista:
nova += i
while True:
print(f'Palavra embaralhada > {cor["amarelo"]}{nova}{cor["limpar"]} <\n')
guess = str(input(f'{tentativa} tentativas restantes: ')).lower().strip()
system('cls')
if guess == palavra:
print(f'{cor["verde"]}Parabéns, vc venceu!!{cor["limpar"]}')
print(f'A palavra era {palavra}')
break
else:
tentativa -= 1
if tentativa == 0:
print(f'{cor["vermelho"]}GAME-OVER{cor["limpar"]}')
print(f'A palavra era {palavra}')
break
| 4.25 | 4 |
Wall-Do/downloader.py | ananyo141/Wall-Do | 0 | 12762647 | """
This module contains all the required backend stuff (logic)
required to handle and download from the wallpaper website.
URL signature looks like:
https://wall.alphacoders.com/search.php?search={searchKey}&page={PageNo}
The website may internally store some popular keywords like 'spiderman'
in collections and serve them with collection ids, need to look out for those
variations.
"""
import os, sys, logging, time
import threading, requests, bs4
from logger import mainlogger
from exceptions import (InvalidDownloadNum, MaxRetriesCrossed,
SearchReturnedNone)
# get module logger
downloadLogger = logging.getLogger('main.downloader')
"""
A Wallpaper Downloader Class for https://wall.alphacoders.com
"""
class AlphaDownloader:
queryStr = \
'https://wall.alphacoders.com/search.php?search=%(searchKey)s&page=%(pageNo)d'
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) '
'AppleWebKit/537.36 (KHTML, like Gecko) '
'Chrome/72.0.3626.28 Safari/537.36'
}
prefixes = ('Movie ', 'Video ', 'Comics ', 'TV Show ')
chunksize = 10000000
# For current session (total)
totalSize = 0
totalDownloads = 0
printFormat = ("Current Run :\n"
"Images Downloaded : %(numDownloaded)d, Time taken: %(lastDownloadTime)d secs\n"
"Number of Pages : %(numPages)d, Downloaded: %(downloadSize).3f MB\n\n"
"Session Details:\n"
"Total Images : %(totalDownloads)d, Total Size: %(totalSize).3f MB\n")
def __init__(self, trace=False):
" initialize attributes for object "
self.imageMetaDict = dict()
self.trace = trace
self.mutex = threading.Lock()
self._queryStrServed = None
self.downloadSession = requests.Session()
self.downloadSession.headers.update(self.headers)
def startDownload(self,
searchKey,
numImages,
downloadDir = os.curdir,
maxretries = 2,
imgPerThread = 5):
"""
toplevel method for starting download, handle and check actual
download success
"""
# PreDownload Hooks
if numImages <= 0:
raise InvalidDownloadNum
# Make sure download dir exists
os.makedirs(downloadDir, exist_ok=True)
downloadLogger.info(f'{downloadDir = }')
self.downloadDir = downloadDir
# For current run
self.searchKey = searchKey
self.numImages = numImages
self.numPages = 0
self.numDownloaded = 0
self.downloadSize = 0
self.lastDownloadTime = None
MaxRetries = maxretries
start = time.time()
self._queryStrServed = None # query string returned by website
# (may be collection id)
retries = 0
# Try until actual number of images downloaded is less than
# given number; and retries is less than max retries
while self.numDownloaded < self.numImages and retries < MaxRetries:
self._runDownload(imgPerThread)
retries += 1
self.lastDownloadTime = time.time() - start
self.totalDownloads += self.numDownloaded
self.sessionDict = dict(
numDownloaded = self.numDownloaded,
lastDownloadTime = self.lastDownloadTime,
numPages = self.numPages,
downloadSize = self.bytesToMiB(self.downloadSize),
totalDownloads = self.totalDownloads,
totalSize = self.bytesToMiB(self.totalSize),
)
if self.trace:
print('\n', ' Stats: '.center(50, '*'))
print(self.printFormat % self.sessionDict)
if retries >= MaxRetries and self.numDownloaded < self.numImages:
raise MaxRetriesCrossed("Max Retries; check log for error details")
def _downloadSq(self, imgList):
" Target Function for threading "
for imgname, imglink in imgList:
self.downloadImage(imglink, imgname)
def _runDownload(self, ImgPerThread=5):
"""
Threaded Download Logic;
Perform Download assuming every link works, doesn't check if the actual number of download
satisfies the required number given
Not to be invoked directly, use wrapper method startDownload()
"""
threads = []
imgArg = []
finished = False
imgLinksFetched = 0
while not finished:
self.numPages += 1
for imgTuple in self.fetchLinks(self.searchKey, self.numPages):
if imgLinksFetched >= self.numImages:
finished = True
else:
imgArg.append(imgTuple)
# if length becomes equal to image per thread
# or image links are fetched but not processed
# (not a multiple of imgPerThread)
if len(imgArg) == ImgPerThread \
or (finished and imgArg):
downloadLogger.info(f'{len(imgArg) = }')
downloadLogger.debug(f'{imgLinksFetched = }')
downloadLogger.debug(f'{self.numPages = }')
thread = threading.Thread(target=self._downloadSq, args=(imgArg,))
threads.append(thread)
thread.start()
imgArg = []
imgLinksFetched += 1
if finished: break # break inner loop if download
# number satisfied
for thread in threads: thread.join()
def downloadImage(self, link, name=''):
" download given image link "
# Use the trailing id of the image link: ('1149.jpg')
# to make the image name truly unique
imgfilename = os.path.join(self.downloadDir,
name + '_' + os.path.basename(link))
# Abort Download (return) if:
# 1) Filename exists,
if os.path.exists(imgfilename):
downloadLogger.warning(f'{imgfilename} exists; possible bug')
return
try:
image = self.downloadSession.get(link)
image.raise_for_status()
# 2) Download error
except Exception as exc:
downloadLogger.error(f'Error saving image: {link}\n{str(exc)}')
return
# save downloaded image (try to delegate os-specific filename
# restrictions to underlying platform by encoding filename)
with open(imgfilename.encode(), 'wb') as imgfile:
for chunk in image.iter_content(self.chunksize):
imgfile.write(chunk)
with self.mutex:
imgSize = os.path.getsize(imgfilename)
self.downloadSize += imgSize
self.totalSize += imgSize
self.numDownloaded += 1
self.imageMetaDict[name] = link
if self.trace:
print(f'Downloaded: {name}...')
self.imgfilename = imgfilename # save filename for subclass
def restoreMetadata(self, imageMetaDict, imgPerThread=5):
" Download images from a previously saved name-image dict "
imgList = [(name, link) for name, link in imageMetaDict.items()]
threads = []
while imgList:
imgArg, imgList = imgList[:imgPerThread], imgList[imgPerThread:]
thread = threading.Thread(target=self._downloadSq, args=(imgArg,))
thread.start()
threads.append(thread)
for thread in threads: thread.join()
msgb.showinfo(title='Imported',
message='Previous session was successfully restored')
def fetchLinks(self, searchKey, start=1, stop=None, step=1):
"""
Generate the image links for pages start to stop (non-inclusive)
Optional: Stop: if not given, scrape links for start page only,
Step: default 1, can travel backwards if given negative value
"""
if stop is None: # generate links for given page only
stop = start + 1
downloadLogger.info(f'{start = }, {stop = }, {step = }')
for pageNum in range(start, stop, step):
# construct page url, if first pass, use base query, else fetched
# query string
pageInfoDict = dict(searchKey=searchKey, pageNo=pageNum)
pageUrl = self._queryStrServed + f'&page={pageNum}' \
if self._queryStrServed \
else self.queryStr % pageInfoDict
downloadLogger.info(f'{pageUrl = }')
# fetch page
try:
pageResponse = self.downloadSession.get(pageUrl)
pageResponse.raise_for_status()
downloadLogger.info(f'{pageResponse.status_code = }')
except Exception as exc:
downloadLogger.error(f'Error Downloading Page: {pageNum}\n{str(exc)}')
continue
# parse and get the image links
mainPageSoup = bs4.BeautifulSoup(pageResponse.text, 'lxml')
# get the served query string (may give a collection id for
# selected keywords)
if self._queryStrServed is None:
try:
pageUrl = mainPageSoup.select('div.page_container')[0].get('data-url')
except IndexError:
raise SearchReturnedNone("Target Not found") from None
self._queryStrServed = pageUrl
downloadLogger.debug(f'{pageUrl = }')
# get the image elements with class='img-responsive'
imageTags = mainPageSoup.select('img.img-responsive')
downloadLogger.debug(f'{len(imageTags) = }')
# generate imagename, imagelink for every image found
for imageTag in imageTags:
imageName = imageTag.get('alt').rstrip(' HD Wallpaper | Background Image')[:50]
# strip unnecessary prefixes (if present)
for prefix in self.prefixes:
if imageName.startswith(prefix):
imageName = imageName.lstrip(prefix)
break
imageLink = imageTag.get('src').replace('thumbbig-', '')
yield imageName, imageLink
@staticmethod
def bytesToMiB(sizeInBy):
" Return size in bytes to MiB "
return sizeInBy / (1024 * 1024)
| 2.953125 | 3 |
pykeops/torch/kernel_product/__init__.py | mdiazmel/keops | 695 | 12762648 | <reponame>mdiazmel/keops<gh_stars>100-1000
import warnings
warnings.simplefilter("default")
warnings.warn(
"[pyKeOps]: the kernel_product syntax is deprecated. Please consider using the LazyTensor helper instead.",
DeprecationWarning,
)
from .kernels import Kernel, kernel_product, kernel_formulas
from .formula import Formula
| 1.179688 | 1 |
screensaver/src/PhotoScreensaver.py | FoxyRabbit67/enigma2-plugins | 0 | 12762649 | from enigma import ePicLoad, eTimer, eWindowAnimationSet, eFloatAnimation, eLinearInterpolator, eWindowAnimationManager, ePixmap, eActionMap, getDesktop
from Components.config import config
from Components.ActionMap import ActionMap
from Components.GUIComponent import GUIComponent
from Components.Pixmap import Pixmap
from Screens.Screen import Screen
from Tools.Directories import fileExists
from Tools.Log import Log
from twisted.web.client import downloadPage
class MyPixmap(Pixmap):
def postWidgetCreate(self, instance):
Pixmap.postWidgetCreate(self, instance)
self.setupAnimation()
def setupAnimation(self):
if self.instance:
self.instance.setShowHideAnimation(PhotoScreensaver.ANIMATION_KEY_FADE)
self.instance.setScale(ePixmap.SCALE_TYPE_WIDTH)
class PhotoScreensaver(Screen):
skin = """<screen name="Screensaver" title="Screensaver" position="center,center" size="fill_parent,fill_parent" backgroundColor="#000000">
<widget name="wallpaper" position="0,0" size="fill_parent,fill_parent" zPosition="1"/>
</screen>"""
TEMPFILE = "/tmp/wallpaper"
ANIMATION_KEY_FADE = "wallpaper_slow_fade"
def __init__(self, session):
Screen.__init__(self, session)
self["actions"] = ActionMap(["OkCancelActions"], {
"ok": self._onOk,
"cancel": self.close},
- 2)
self.highPrioActionSlot = eActionMap.getInstance().bindAction('', -0x7FFFFFFF, self._onKeypress) #highest prio
self._pixmap = MyPixmap()
self["wallpaper"] = self._pixmap
self._setupAnimation()
#picload setup
size = getDesktop(0).size()
width, height = size.width(), size.height()
self._picload = ePicLoad()
self.__picload_conn = self._picload.PictureData.connect(self._onPixmapReady)
self._picload.setPara((width, height, width, height, False, 1, '#ff000000'))
self._nextPixmap = None
self._timer = eTimer()
self.__timer_conn = self._timer.timeout.connect(self._onTimeout)
self._inactivityTimer = eTimer()
self.__inactivityTimer_conn = self._inactivityTimer.timeout.connect(self._onInactivityTimeout)
self._immediateShow = True
self._isEnabled = False
self._isInitial = True
self.onShow.append(self._onShow)
self.onClose.append(self._onClose)
config.plugins.screensaver.photo.speed.addNotifier(self._setupAnimation, initial_call = False)
def _onShow(self):#
self._immediateShow = self._isInitial
if not self._immediateShow:
self._restartTimer()
self._check()
def _onClose(self):
config.plugins.screensaver.photo.speed.removeNotifier(self._setupAnimation)
def _setupAnimation(self, *args):
animset = eWindowAnimationSet.create()
animset.setKey(PhotoScreensaver.ANIMATION_KEY_FADE)
animset.setName("Slow wallpaper fade")
animset.setInternal(True)
interpolator = eLinearInterpolator.create()
duration = int(config.plugins.screensaver.photo.speed.value) * 1000
animset.setAlpha(eFloatAnimation.create(duration, 0.0, 1.0, False, interpolator))
eWindowAnimationManager.setAnimationSet(animset)
self._pixmap.setupAnimation()
def _check(self):
if fileExists(self.TEMPFILE):
self._onFileReady()
else:
self._loadNext()
def isEnabled(self):
return self._isEnabled
def setEnabled(self, enabled):
Log.i("%s" %(enabled,))
if enabled == self._isEnabled:
return
self._isEnabled = enabled
if self._isEnabled:
self._onKeypress()
self._check()
else:
self._reset()
enabled = property(isEnabled, setEnabled)
def _reset(self):
self._nextPixmap = None
self._timer.stop()
self._inactivityTimer.stop()
def _onKeypress(self, *args):
self.hide()
self._reset()
if self._isEnabled:
self._inactivityTimer.startLongTimer(int(config.plugins.screensaver.delay.value))
return 0
def _onInactivityTimeout(self):
self.show()
def _onOk(self):
pass
def _loadNext(self):
Log.i("Getting next photo")
url = "https://source.unsplash.com/random/1920x1080"
self._d = downloadPage(url, self.TEMPFILE).addCallbacks(self._onFileReady, self._failed)
def _onFileReady(self, *args):
self._picload.startDecode(self.TEMPFILE)
def _failed(self, *args):
Log.w(args)
def _onPixmapReady(self, picInfo=None):
Log.d(picInfo)
if not self._isEnabled:
self._reset()
return
self._picInfo = picInfo
self._nextPixmap = self._picload.getData()
if self._immediateShow:
self._immediateShow = False
self._onTimeout()
def _restartTimer(self):
self._timer.startLongTimer(int(config.plugins.screensaver.photo.retention.value))
def _showNext(self):
if not self._isEnabled:
self._reset()
return
if self._nextPixmap:
self._isInitial = False
self._pixmap.setPixmap(self._nextPixmap)
self._nextPixmap = None
self._restartTimer()
return True
return False
def _onTimeout(self):
if self._showNext():
self._loadNext()
self._restartTimer()
else:
self._immediateShow = True
| 1.90625 | 2 |
mitmproxy/exceptions.py | dolfly/mitmproxy | 0 | 12762650 | <filename>mitmproxy/exceptions.py
"""
We try to be very hygienic regarding the exceptions we throw:
Every Exception mitmproxy raises shall be a subclass of ProxyException.
See also: http://lucumr.pocoo.org/2014/10/16/on-error-handling/
"""
from __future__ import (absolute_import, print_function, division)
class ProxyException(Exception):
"""
Base class for all exceptions thrown by mitmproxy.
"""
def __init__(self, message=None):
super(ProxyException, self).__init__(message)
class ProtocolException(ProxyException):
pass
class TlsProtocolException(ProtocolException):
pass
class ClientHandshakeException(TlsProtocolException):
def __init__(self, message, server):
super(ClientHandshakeException, self).__init__(message)
self.server = server
class Socks5ProtocolException(ProtocolException):
pass
class HttpProtocolException(ProtocolException):
pass
class ServerException(ProxyException):
pass
class ContentViewException(ProxyException):
pass
class ReplayException(ProxyException):
pass
class ScriptException(ProxyException):
pass
| 2.328125 | 2 |