blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
69
| license_type
stringclasses 2
values | repo_name
stringlengths 5
118
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringlengths 4
63
| visit_date
timestamp[us] | revision_date
timestamp[us] | committer_date
timestamp[us] | github_id
int64 2.91k
686M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 23
values | gha_event_created_at
timestamp[us] | gha_created_at
timestamp[us] | gha_language
stringclasses 213
values | src_encoding
stringclasses 30
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 2
10.3M
| extension
stringclasses 246
values | content
stringlengths 2
10.3M
| authors
listlengths 1
1
| author_id
stringlengths 0
212
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6f9fd7fd9d9ed579db5194cea206a47fd8a0b308
|
9e5f71cec02ae4cb58a58b6fc33b75b5e2555022
|
/GO4StructuralPatterns/BridgePattern/MessageSenderBase.py
|
7b5325621725584d73454a674e6752607ae4f17a
|
[] |
no_license
|
sumeshsg/GO4DesignPattern
|
a764335412e22be9d945e321e67c1b9712bf71a2
|
c2d3625ae03aeb0816191a148d9db24e24b78c76
|
refs/heads/master
| 2022-09-07T03:18:17.217719
| 2020-05-29T06:40:24
| 2020-05-29T06:40:24
| 250,414,539
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 155
|
py
|
from abc import abstractmethod
class MessageSenderBase(object):
@abstractmethod
def send_message(self, title, details, importance):
pass
|
[
"sumeshsg@nbnco.com.au"
] |
sumeshsg@nbnco.com.au
|
f4291ee93deaf810818660a525dacb66b3a2eb7c
|
6227d63532f2d657ef66d90709a3a1f484e9784b
|
/oviqpr/wsgi.py
|
02a76e64db03ba8feffdd1981fb6c14b65e4ad1b
|
[] |
no_license
|
vassily-la/oviq
|
2edff4e61e5ac8cb94b462e2ed5c5bec2c5b014a
|
65a86ca5cddd0180e8309a7659eaab6a35a5c785
|
refs/heads/master
| 2021-04-28T03:05:02.908010
| 2018-02-22T17:14:32
| 2018-02-22T17:14:32
| 122,131,126
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 390
|
py
|
"""
WSGI config for oviqpr project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "oviqpr.settings")
application = get_wsgi_application()
|
[
"vasil.deville@gmail.com"
] |
vasil.deville@gmail.com
|
019e3fc12aa54ec0097b26e82f09f1a9ae633952
|
712c7fb5c4931abb337e29574d1223e7fa1ec2c9
|
/optimizely/helpers/types.py
|
a28aca67a3b6ba528a149e4a11cf0a05f082f39e
|
[
"Apache-2.0",
"BSD-3-Clause"
] |
permissive
|
optimizely/python-sdk
|
008144c79e701f9db3968b5dfe862fb39fe8dcca
|
bf000e737f391270f9adec97606646ce4761ecd8
|
refs/heads/master
| 2023-07-19T09:05:51.287871
| 2023-07-18T17:24:17
| 2023-07-18T17:24:17
| 64,881,795
| 34
| 40
|
Apache-2.0
| 2023-08-23T17:47:52
| 2016-08-03T21:50:18
|
Python
|
UTF-8
|
Python
| false
| false
| 2,869
|
py
|
# Copyright 2022, Optimizely
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import Optional, Any
from sys import version_info
if version_info < (3, 8):
from typing_extensions import TypedDict
else:
from typing import TypedDict # type: ignore
# Intermediate types for type checking deserialized datafile json before actual class instantiation.
# These aren't used for anything other than type signatures
class BaseEntity(TypedDict):
pass
class BaseDict(BaseEntity):
"""Base type for parsed datafile json, before instantiation of class objects."""
id: str
key: str
class EventDict(BaseDict):
"""Event dict from parsed datafile json."""
experimentIds: list[str]
class AttributeDict(BaseDict):
"""Attribute dict from parsed datafile json."""
pass
class TrafficAllocation(BaseEntity):
"""Traffic Allocation dict from parsed datafile json."""
endOfRange: int
entityId: str
class VariableDict(BaseDict):
"""Variable dict from parsed datafile json."""
value: str
type: str
defaultValue: str
subType: str
class VariationDict(BaseDict):
"""Variation dict from parsed datafile json."""
variables: list[VariableDict]
featureEnabled: Optional[bool]
class ExperimentDict(BaseDict):
"""Experiment dict from parsed datafile json."""
status: str
forcedVariations: dict[str, str]
variations: list[VariationDict]
layerId: str
audienceIds: list[str]
audienceConditions: list[str | list[str]]
trafficAllocation: list[TrafficAllocation]
class RolloutDict(BaseEntity):
"""Rollout dict from parsed datafile json."""
id: str
experiments: list[ExperimentDict]
class FeatureFlagDict(BaseDict):
"""Feature flag dict from parsed datafile json."""
rolloutId: str
variables: list[VariableDict]
experimentIds: list[str]
class GroupDict(BaseEntity):
"""Group dict from parsed datafile json."""
id: str
policy: str
experiments: list[ExperimentDict]
trafficAllocation: list[TrafficAllocation]
class AudienceDict(BaseEntity):
"""Audience dict from parsed datafile json."""
id: str
name: str
conditions: list[Any] | str
class IntegrationDict(BaseEntity):
"""Integration dict from parsed datafile json."""
key: str
host: str
publicKey: str
|
[
"noreply@github.com"
] |
optimizely.noreply@github.com
|
beecac358761373299eadb88bb99a044d335fbed
|
e2840689c879e82854322cd7a0ef094244b83b2b
|
/main.py
|
6ac589e596e2787d6ba78b01a9e5781efecdb0d9
|
[] |
no_license
|
349/harmoniser
|
cc740d5c73d9109160adb5949c339403c958f608
|
0bad0dcffe971c0657771817e484424d443d365a
|
refs/heads/master
| 2022-10-28T16:29:38.715906
| 2020-06-08T22:30:19
| 2020-06-08T22:30:19
| 270,847,104
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210
|
py
|
melody_file = open("melody.txt", "rt") #open to read melody
melody = melody_file.read() #string melody from melody
melody_file.close()
print(melody)
for x in len(melody):
pass
#make an array with each note
|
[
"james.sheridan@outlook.com"
] |
james.sheridan@outlook.com
|
5103cc928788d30e67890665c674dd02fe179634
|
b30468e308671c8ae93d421d76e675713689fbbc
|
/mineral_catalog/minerals/views.py
|
26afdd1b8946c1b80d2d47e17ea71e13f95db390
|
[] |
no_license
|
lfisher1998/Python-Techdegree-Project-6
|
cb6ba0219fc340f590ec931462eb12ca600a6375
|
f6ff8a34734cd01023bdf85dc0a16d7f027225e8
|
refs/heads/master
| 2020-05-05T04:49:59.517885
| 2019-04-05T19:23:36
| 2019-04-05T19:23:36
| 179,726,506
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 469
|
py
|
from collections import OrderedDict
from django.db.models.functions import Lower
from django.http import Http404
from django.shortcuts import render
from .models import Mineral
def mineral_list(request):
minerals = Mineral.objects.all()
return render(request, 'minerals/index.html', {'minerals': minerals})
def mineral_detail(request, pk):
mineral = Mineral.objects.get(pk=pk)
return render(request, 'minerals/detail.html', {'mineral': mineral})
|
[
"noreply@github.com"
] |
lfisher1998.noreply@github.com
|
246dcfe77d429a182463f159f158577ef8353990
|
1f8f423532070a9b49bad7b15af6cd12d0d3e5d0
|
/app/src/applications/user/view/AvlanUserListView.py
|
2395120f5b89bcf128e497a6623991d513adb7db
|
[] |
no_license
|
r2r-dev/avlan-docker
|
357ce99db914660a0ffdb2022ee139f213e5514b
|
1fe6b045bc9d03cbc81b69431e98af45958358d0
|
refs/heads/master
| 2021-06-12T00:54:19.901286
| 2017-01-29T22:22:32
| 2017-01-29T22:22:32
| 68,092,562
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 418
|
py
|
from src.applications.base.view.AvlanBaseView import AvlanBaseView
class AvlanUserListView(AvlanBaseView):
__template = 'webroot/html/AvlanUserListTemplate.tmpl'
def __init__(self, translation=None):
AvlanBaseView.__init__(
self,
self.__template,
translation,
)
self.title = "User List"
self._users = {}
self._users_settings = {}
|
[
"artur.stachecki@gmail.com"
] |
artur.stachecki@gmail.com
|
480778148664d4907f34cb73cc9d20b41eeac656
|
4667bbbc941b743547fb8d39c1e1058ad233c446
|
/5.py
|
c282829913411e79df53cb1cef63a31a602b653d
|
[] |
no_license
|
shamilmhussain/QR-code
|
6e3e0d8beb79807e50c5e63b8b273cd9c1c9ba32
|
453f16d799b9b3c7908914e853b351630d956724
|
refs/heads/master
| 2020-06-14T10:44:16.720076
| 2019-07-03T05:23:02
| 2019-07-03T05:23:02
| 194,984,459
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,236
|
py
|
from __future__ import print_function
import pyzbar.pyzbar as pyzbar
import numpy as np
import cv2
def decode(im):
# Find barcodes and QR codes
decodedObjects = pyzbar.decode(im)
# Print results
for obj in decodedObjects:
print('Type : ', obj.type)
print('Data : ', obj.data, '\n')
return decodedObjects
# Display barcode and QR code location
def display(im, decodedObjects):
# Loop over all decoded objects
for decodedObject in decodedObjects:
points = decodedObject.polygon
# If the points do not form a quad, find convex hull
if len(points) > 4:
hull = cv2.convexHull(np.array([point for point in points], dtype=np.float32))
hull = list(map(tuple, np.squeeze(hull)))
else:
hull = points;
# Number of points in the convex hull
n = len(hull)
# Draw the convext hull
for j in range(0, n):
cv2.line(im, hull[j], hull[(j + 1) % n], (255, 0, 0), 3)
# Display results
cv2.imshow("Results", im);
cv2.waitKey(0);
# Main
if __name__ == '__main__':
# Read image
im = cv2.imread('avatar.jpg')
decodedObjects = decode(im)
display(im, decodedObjects)
|
[
"shamilmhussain@gmail.com"
] |
shamilmhussain@gmail.com
|
d523d4aaff289eae4c93219f44ef1d4ca8dc999a
|
4c3509b7948f756d5cba90490384143d83924644
|
/chapter8/library_checkout/tests/test_checkout_mass_message.py
|
e9bec218737bae4ed0fbbdf9c30bf662101917ba
|
[] |
no_license
|
n37r06u3/odoo12-development-essentials
|
3049221c4df568326f5c71c4f02aa658216a5b2e
|
456ea192c1c0320bbce2d6c030bef4f364a0bf88
|
refs/heads/master
| 2020-04-18T01:01:41.114763
| 2019-01-23T02:56:42
| 2019-01-23T02:56:42
| 167,102,350
| 6
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,353
|
py
|
from odoo.tests.common import TransactionCase
from odoo import exceptions
class TestWizard(TransactionCase):
def setUp(self, *args, **kwargs):
super(TestWizard, self).setUp(*args, **kwargs)
# Setup test data
admin_user = self.env.ref('base.user_admin')
self.Checkout = self.env['library.checkout'].sudo(admin_user)
self.Wizard = self.env['library.checkout.massmessage'].sudo(admin_user)
a_member = self.env['library.member'].create({'name': 'John'})
self.checkout0 = self.Checkout.create({
'member_id': a_member.id})
def test_button_send(self):
"""Send button should create messages on Checkouts"""
# Add test code
msgs_before = len(self.checkout0.message_ids)
Wizard0 = self.Wizard.with_context(active_ids=self.checkout0.ids)
wizard0 = Wizard0.create({'message_body': 'Hello'})
wizard0.button_send()
msgs_after = len(self.checkout0.message_ids)
self.assertEqual(
msgs_after,
msgs_before+1,
'Expected on additional message in the Checkout.')
def test_button_send_empty_body(self):
"Send button errors on empty body message"
wizard0 = self.Wizard.create({})
with self.assertRaises(exceptions.UserError) as e:
wizard0.button_send()
|
[
"n37r06u3@gmail.com"
] |
n37r06u3@gmail.com
|
63e526bd1682671945b0d368a5e61108f4cc7ba4
|
055ed076c6b1d3ee294797c92dc9533776571f54
|
/hash_table.py
|
0f58d2d5574064831df9b135798223c4d1b70936
|
[] |
no_license
|
imartin2433/Western-Governors-University-Parcel-Service
|
b7609b1a89d92b3954ef9f4640b354f4a631191f
|
3c41972cab7332330eba434b1a46a114390b3bb7
|
refs/heads/main
| 2023-07-24T05:21:23.352596
| 2021-08-16T15:01:40
| 2021-08-16T15:01:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,371
|
py
|
# HashTable class using chaining.
class ChainingHashTable:
"""
A class to represent a person.
Attributes
----------
initial_capacity : int
the initial size of the hashtable
Methods
-------
insert:
Inserts elements into the hashtable
search:
Search for elements in the hashtable
remove:
removes elements form the hashtable
"""
def __init__(self, initial_capacity=40):
"""
Constructs all the necessary attributes for the person object.
Has a runtime of O(n).
Parameters
----------
initial_capacity : int
The initial size of the hashtable
"""
# initialize the hash table with empty bucket list entries.
self.table = []
for i in range(initial_capacity):
self.table.append([])
def insert(self, key, item):
"""
Inserts elements into the hashtable.
The runtime is O(1).
Parameters
----------
key : int
Is the value used to lookup the element in the future
item : Object
The element that will be in the hashtable
Returns
-------
True : boolean
If the element was successfully inserted it return True
"""
# does both insert and update
# get the bucket list where this item will fo
bucket = hash(key) % len(self.table)
bucket_list = self.table[bucket]
# update key if it is already in the bucket
for kv in bucket_list:
if kv[0] == key:
kv[1] = item
return True
# if not, insert the item to the end of the bucket list.
key_value = [key, item]
bucket_list.append(key_value)
return True
# Searches for an item with matching key in the hash table.
# Returns the item if found, or None if not found.
def search(self, key):
"""
Constructs all the necessary attributes for the person object.
Has a runtime of O(1).
Parameters
----------
key : int
The value to look up an elements.
Returns
-------
item : Object
Object associated with key entered
"""
# get the bucket list where this key would be.
bucket = hash(key) % len(self.table)
bucket_list = self.table[bucket]
# search for the key in the bucket list
for key_value in bucket_list:
# find the item's index and return the item that is in the bucket list.
if key_value[0] == key:
item = key_value[1]
return item
return None
# Removes an item with matching key from the hash table.
def remove(self, key):
"""
Removes items from the hashtable.
Runtime is O(1).
Parameters
----------
key : int
The key of the item that needs to be removed.
"""
# get the bucket list where this item will be removed from.
bucket = hash(key) % len(self.table)
bucket_list = self.table[bucket]
# remove the item from the bucket list if it is present.
if key in bucket_list:
bucket_list.remove(key)
|
[
"ishmael.martin@icloud.com"
] |
ishmael.martin@icloud.com
|
ae9c23f5fdb98de82ae8cbf6a8e4ee62419a45d6
|
493a36f1f8606c7ddce8fc7fe49ce4409faf80be
|
/.history/B073040023/client_20210614185044.py
|
dc83e9c3495b85ade6ac751b06199f40df2ea143
|
[] |
no_license
|
ZhangRRz/computer_network
|
f7c3b82e62920bc0881dff923895da8ae60fa653
|
077848a2191fdfe2516798829644c32eaeded11e
|
refs/heads/main
| 2023-05-28T02:18:09.902165
| 2021-06-15T06:28:59
| 2021-06-15T06:28:59
| 376,568,344
| 0
| 0
| null | 2021-06-13T14:48:36
| 2021-06-13T14:48:36
| null |
UTF-8
|
Python
| false
| false
| 4,772
|
py
|
import socket
import threading
import tcppacket
import struct
# socket.socket() will create a TCP socket (default)
# socket.socket(socket.AF_INET, socket.SOCK_STREAM) to explicitly define a TCP socket
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM) # explicitly define a UDP socket
udp_host = '127.0.0.1' # Host IP
udp_port = 12345 # specified port to connect
def init_new_calc_req(i):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg = "calc 2 ^ 10"
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port) # Sending message to UDP server
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
def init_new_videoreq_req(i):
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
msg = "video 1".encode('utf-8')
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port)) # Sending message to UDP server
recvdata = b''
ack_seq = 0
seq = 0
counter = 0
while True:
data, address = sock.recvfrom(512*1024)
s = struct.calcsize('!HHLLBBHHH')
raw = struct.unpack('!HHLLBBHHH', data[:s])
print("receive packet from ", address,
"with header", raw)
if(raw[2] == ack_seq and raw[7] == 0):
recvdata += data[s:]
if(raw[5] % 2):
# fin_falg
fin_flag = 1
else:
fin_flag = 0
ack_seq += 1
counter += 1
else:
print("Receive ERROR packet from ", address)
fin_flag = 1
counter = 3
# --------------------------------------------
# send ACK
if(counter == 3):
tcp = tcppacket.TCPPacket(
data=str("ACK").encode('utf-8'),
seq=seq, ack_seq=ack_seq,
flags_ack=1,
flags_fin=fin_flag)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address,
"with ack seq: ", ack_seq, " and seq: ", seq)
sock.sendto(tcp.raw, address)
if(not fin_flag):
counter = 0
seq += 1
# --------------------------------------------
print(fin_flag)
if(fin_flag):
break
savename = str(i+1)+"received.mp4"
f = open(savename, "wb")
f.write(recvdata)
f.close()
def init_new_dns_req(i):
# ---------------------
sock = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
oldmsg = msg = "dns google.com"
msg = msg.encode('utf-8')
tcp = tcppacket.TCPPacket(data=msg)
tcp.assemble_tcp_feilds()
sock.sendto(tcp.raw, (udp_host, udp_port))
# print("UDP target IP:", udp_host)
# print("UDP target Port:", udp_port)
while True:
data, address = sock.recvfrom(512*1024)
sock.connect(address)
s = struct.calcsize('!HHLLBBH')
unpackdata = struct.unpack('!HHLLBBH', data[:s])
msg = data[s:].decode('utf-8')
print(oldmsg,"is", msg)
if(unpackdata[5] % 2):
# fin_falg
fin_falg = 1
else:
fin_falg = 0
tcp = tcppacket.TCPPacket(
data="ACK".encode('utf-8'),
flags_ack=1,
flags_fin=fin_falg)
tcp.assemble_tcp_feilds()
print("ACK send to (IP,port):", address)
sock.sendto(tcp.raw, address)
if(fin_falg):
break
# ----------------------
# def init_new
threads = []
for i in range(1):
print("Demo calculation function")
threads.append(threading.Thread(target = init_new_calc_req, args = (i,)))
threads[-1].start()
for i in range(1):
threads.append(threading.Thread(target = init_new_dns_req, args = (i,)))
threads[-1].start()
for i in range(1):
threads.append(threading.Thread(target = init_new_videoreq_req, args = (i,)))
threads[-1].start()
|
[
"tom95011@gmail.com"
] |
tom95011@gmail.com
|
02d549172e472542fc2a2d3045217259fbf12efa
|
085530edb6ff64a5838ac675b7282a59928c9851
|
/capstone/flight_control/gps_amd.py
|
a8cbec81fd362a484f7fdf2d844eb3b5fc3a658b
|
[] |
no_license
|
leadtheway01/UVa
|
1a836c275e704779df52e12f62cdbd0b8f9cb9f7
|
c016766b17db1a36d86244de31ec3d161eed612f
|
refs/heads/master
| 2021-01-20T18:39:50.190870
| 2017-05-11T02:45:53
| 2017-05-11T02:45:53
| 90,928,106
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,315
|
py
|
#!/usr/bin/env python
from dronekit import connect, VehicleMode, LocationGlobalRelative
import time
"""
Take off
"""
def takeoff(vehicle, aTargetAltitude):
print "Taking off!"
vehicle.simple_takeoff(aTargetAltitude)
while True:
print "Altitude: ", vehicle.location.global_relative_frame.alt
if vehicle.location.global_relative_frame.alt >= aTargetAltitude*0.95:
print "Reached target altitude: ", aTargetAltitude
break
time.sleep(1)
"""
GPS based flight
"""
def gpsFlight(vehicle, latitude, longitude, altitude=None, groundSpeed=None):
print "Going towrad target location... ",
point = LocationGlobalRelative(latitude, longitude, altitude)
vehicle.simple_goto(point, groundSpeed)
current_lat = vehicle.location.global_relative_frame.lat
current_lon = vehicle.location.global_relative_frame.lon
current_alt = vehicle.location.global_relative_frame.alt
latitudeDifference = abs(abs(current_lat)-abs(latitude*0.99999999999999999999))
longitudeDifference = abs(abs(current_lon) - abs(longitude*0.99999999999999999))
while True:
print "Latitude: ", current_lat
print "Longitude: ", current_lon
#print "Altitude: ", current_alt
if latitudeDifference <= 0.01 and longitudeDifference <= 0.01 and current_alt >= altitude*0.95:
print "Reached destination"
break
time.sleep(1)
|
[
"wdk7bj@virginia.edu"
] |
wdk7bj@virginia.edu
|
1133f50906d41acd6c70af39e60e5b22bb2d2764
|
01e7069eaaab8f907502c97a26f2e1c8bae7aade
|
/file_queries.py
|
a64ddd36ad941f43b625dfbd095769c51d2276e4
|
[] |
no_license
|
avakutin/ground_scripts
|
38c03b2dd9a2170094a8e91a2518bb1cf251bbce
|
749a36c570e00140abebd8c0427c7fefbc419598
|
refs/heads/master
| 2021-01-12T13:47:12.028254
| 2016-11-29T01:45:12
| 2016-11-29T01:45:12
| 69,127,274
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,294
|
py
|
import requests
import json
import string
import utils
class FileQueries:
def __init__(self, hostname):
self.hostname = hostname
def ls(self, path):
# path_list = self.split_path(path)
node_name = path.replace("/", ">")
i = string.rfind(node_name, ">")
if i == 0:
parent = None
dirname = node_name
else:
parent = node_name[:i]
dirname = node_name[i+1:]
# Check that supplied path refers to a directory
if self.is_file(node_name, parent=False):
print "Path '{}' is not a directory".format(path)
return
dir_node_version = utils.get_latest_node_version(self.hostname, node_name)
adj_path = self.hostname + "/nodes/adjacent/{0}/{1}-to-".format(dir_node_version["id"], \
node_name)
adj_list = requests.get(adj_path).json()
output = [path]
for node_id in adj_list:
node = requests.get(self.hostname + "/nodes/versions/{}".format(node_id)).json()
name = node["nodeId"][6:]
node_version = utils.get_latest_node_version(self.hostname, name)
metadata = utils.get_node_version_metadata(node_version)
name = name.replace(">", "/")
if metadata.get("file") != None:
output.append((name, "File"))
else:
output.append((name, "Directory"))
return output
def create_directory(self, path):
# Create Node for directory
node_name = path.replace("/", ">")
i = string.rfind(node_name, ">")
if i == 0:
if len(path) < 2:
parent = None
else:
parent = node_name[0]
dirname = node_name
else:
parent = node_name[:i]
dirname = node_name[i+1:]
# Check that supplied parent is a directory
if parent and self.is_file(node_name):
print "Parent '{}' is not a directory".format(parent.replace(">", "/"))
return
req_path = self.hostname + "/nodes/{}".format(node_name)
dir_node = requests.post(req_path).json()
# Create NodeVersion for the directory
dir_node_version = utils.create_node_version(self.hostname, dir_node["id"])
if parent:
# Create edge between parent directory NodeVersion and new NodeVersion
self.create_edge_to_parent_dir(parent, node_name, dir_node_version["id"])
def create_file(self, filepath, metadata):
"""
Creates a Node and NodeVersion for a file located at *filepath*
containing *metadata*
"""
node_name = filepath.replace("/", ">")
i = string.rfind(node_name, ">")
if i == 0:
parent = node_name[0]
else:
parent = node_name[:i]
filename = node_name[i+1:]
# Check that supplied parent is a directory
if self.is_file(node_name):
print "Parent '{}' is not a directory".format(parent.replace(">", "/"))
return
# Create Node for file
path = self.hostname + "/nodes/{}".format(node_name)
file_node = requests.post(path).json()
# Create Tags for each item of metadata
tag_map = {}
tag_map["file"] = {
"key": "file",
"value": "file",
"type": "string"
}
for label, value in metadata.items():
tag_map[label] = {
"key": label,
"value": value,
"type": "string"
}
file_node_version = utils.create_node_version(self.hostname, file_node["id"], tag_map=tag_map)
# Create edge between the file and its parent directory
self.create_edge_to_parent_dir(parent, node_name, file_node_version["id"])
return file_node_version
def get_file(self, filepath):
node_name = filepath.replace("/", ">")
i = string.rfind(node_name, ">")
if i == 0:
parent = node_name[0]
else:
parent = node_name[:i]
filename = node_name[i+1:]
if not self.is_file(node_name, parent=False):
print "{} is not a file".format(filepath)
return
file_info = [node_name.replace(">", "/")]
file_node_version = utils.get_latest_node_version(self.hostname, node_name)
file_info.append(utils.get_node_version_metadata(file_node_version))
return file_info
def split_path(self, path):
"""
Splits the input path into a list, adding "root" to the list
"""
path_split = string.split(path, "/")
path_list = ["root"]
path_list += [name for name in path_split if name != '']
return path_list
def create_edge_to_parent_dir(self, parent, new_node_name, new_node_version_id):
"""
Creates an Edge and EdgeVersion between a new NodeVersion with id
*new_node_version_id* and name *new_node_name* and its parent directory,
as specified by *parent*
"""
parent_dir_node_version = utils.get_latest_node_version(self.hostname, parent)
edge_path = self.hostname + "/edges/{0}-to-{1}".format(parent, new_node_name)
edge = requests.post(edge_path).json()
edge_id = edge["id"]
fromId = parent_dir_node_version["id"]
toId = new_node_version_id
utils.create_edge_version(self.hostname, edge_id, fromId, toId)
def is_file(self, path, parent=True):
"""
If parent is true, returns whether the parent of the supplied
directory/file is a directory. If parent is false, returns whether
the entire path refers to a directory
"""
i = string.rfind(path, ">")
if parent:
if i == 0:
name = path[0]
else:
name = path[:i]
else:
name = path
# If root directory
if i == 0 and len(path) < 2:
return False
parent_node_version = utils.get_latest_node_version(self.hostname, name)
metadata = utils.get_node_version_metadata(parent_node_version)
return metadata.get("file") != None
|
[
"alinavak@berkeley.edu"
] |
alinavak@berkeley.edu
|
b4633c3cf26d5c187ffcfec668318ef6eef731a8
|
380f8425f31088c61e1eb02690ce33b62c503040
|
/pset6/readability-in-progress/readability.py
|
c71a9116cb32985cfbb6ff27b85757bc8e6ae2f9
|
[] |
no_license
|
thalita89/HarvardProblems-set6
|
c4ce2ba86bf18ca1bb5931ea87c6612633c22c52
|
2b97e4d72a28bc2857623ad3d4eab470ffc9323d
|
refs/heads/master
| 2023-04-08T01:43:30.418846
| 2021-04-17T17:23:37
| 2021-04-17T17:23:37
| 358,929,350
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,202
|
py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 25 17:12:13 2021
@author: thalitaramires
"""
#4) Readability:
#Problem: https://cs50.harvard.edu/x/2021/psets/6/readability/
#Primeiro passo:
#solicitar a string
#Segundo passo
#tratar os dados da string
#Terceiro passo
#Executar o algoritmo
# 0.0588 * L - 0.296 * S - 15.8, where:
#L is the average number of letters per 100 words in the text, and
#S is the average number of sentences per 100 words in the text
#5 sentences,
#119 words, and
#639 letters or digits
#L = Letters ÷ Words × 100 = 639 ÷ 119 × 100 ≈ 537
#S = Sentences ÷ Words × 100 = 5 ÷ 119 × 100 ≈ 4.20
#CLI=0.0588\times 537-0.296\times 4.20-15.8=14.5
#ord()
#chr()
# Hello world, thalita here! Good bye.
#1-text
def readability():
return input('Give-me some text:')
readability = readability()
#2-sentence
import spacy
def breakSentencesFunction(arg):
nlp = spacy.load('en')
doc = nlp(readability)
return doc.sents
split = breakSentencesFunction(readability)
print(split)
#len-sentence
def sentencesFunction(arg):
sentence = breakSentencesFunction(arg)
listArray = list(sentence)
return len(listArray)
s = sentencesFunction(readability)
print('The sentence(s) is(are): ', s)
#3-count words
def wordsFunction():
words = 0
sentence = breakSentencesFunction(readability)
for w in sentence:
words += len([token for token in w])
return words
w = wordsFunction()
print(w)
alphanum = readability
alphanum = [char for char in alphanum if char.isalnum()]
alphanum = ' '.join(alphanum)
print(alphanum)
l = len(alphanum)
print(l)
print(l)
print(w)
print(s)
#final
def letterFunction(l , w):
return l / w * 100
letter = letterFunction(l , w)
def sentenceFuncion(s, w):
return s / w * 100
sentence = sentenceFuncion(s, w)
def colemanFuncion(letter, sentence):
index = 0.0588 * float(letter) - 0.296 * float(sentence) - 15.8
print(index)
if index >= 16:
return 'Grade 16+'
elif index < 1:
return 'Before Grade 1'
else:
return index
colemanLiau = colemanFuncion(letter, sentence)
print(colemanLiau)
|
[
"thalitaramires89@gmail.com"
] |
thalitaramires89@gmail.com
|
d5046c3c0b613b372fc250a121575cad4d03bc38
|
89d230ad44d17b18897da507725b0a10c32960d8
|
/Gen2_0_PP/Contest/weaponsProblem.py
|
4262d5dcd74a8bb81dd8ecc8b8b5afcbc6146ab4
|
[] |
no_license
|
KB-perByte/CodePedia
|
aeeae87b56cf0ff6e02200cfd6b34da42a007338
|
287e7a3ce981bbf594436cdc06dde23a02b53bb0
|
refs/heads/master
| 2021-06-19T07:32:53.849871
| 2021-01-23T16:17:27
| 2021-01-23T16:17:27
| 163,250,017
| 0
| 1
| null | 2020-03-21T14:39:36
| 2018-12-27T05:13:55
|
JavaScript
|
UTF-8
|
Python
| false
| false
| 2,014
|
py
|
'''
Daenerys has N types of weapons. There are Ai number of weapons of type i (1 <= i <= N). She wants to distribute these weapons among K soldiers. She wants to distribute them in such a way that:
All soldier get equal number of weapons.
All the weapons which a soldier gets must be of same type.
As she wants to make all of them more powerful so she wants to give as many weapons as possible. Help Daenerys in finding out what is the maximum number of weapons which a soldier can get.
Input Format
The first line consists two space seperated integer N and K.
The second line consists of N space seperated integers A1, A2, A3.... An, as described above.
Constraints
1 <= N <= 100000
1 <= Ai, K <= 1,000,000,000
Output Format
Output a single integer denoting the maximum weapons a soldier can get .
Sample Input 0
3 2
3 1 4
Sample Output 0
3
Explanation 0
She can give 3 weapons of type 1 to first soldier and 3 weapons of type 3 to second soldier.
'''
def binarySearch(array, l, r, toSearch): #not so needed
while l <= r:
mid = l + (r - l)//2
if array[mid] == toSearch:
return mid
elif array[mid] < toSearch:
l = mid + 1
else:
r = mid - 1
return -1
def checkDistribution(lst, mid , k):
s = 0
for i in range(len(lst)):
s+=lst[i]//mid
print('val of s',s)
print('val of k',k)
return s>=k
def makimumWeapons(lst,k):
l = min(lst)
h = max(lst)
while h >= l:
mid = l+(h-l)//2
print("value of l and h", l ,h)
if checkDistribution(lst, mid, k):
if not checkDistribution(lst, mid+1, k):
return mid
else:
l = mid + 1
else:
h = mid - 1
return 0
import sys
def get_ints(): return list(map(int, sys.stdin.readline().strip().split()))
input1 = list(map(int,input().split()))
#input2 = list(map(int,input().split()))
input2 = get_ints()
print(makimumWeapons(input2, input1[1]))
|
[
"paul.sagar@yahoo.com"
] |
paul.sagar@yahoo.com
|
cc3e8e736099a578fdb1575de3070766cc9c13fb
|
e66770daf4d1679c735cfab1ac24dd1f5107bd83
|
/Chapter06/Ch06_Code/GUI_multiple_threads_starting_a_thread.py
|
a33887dfb3fb91f64788072ca95a614df27e233a
|
[] |
no_license
|
CodedQuen/Python-GUI-Programming-Cookbook
|
c038eb6cec4945ff4f2b09e1551f9db712dd2502
|
f02b0f9916fb8272edc7ed4704eecce53ae0231c
|
refs/heads/master
| 2022-05-27T19:35:35.004455
| 2020-05-05T01:00:51
| 2020-05-05T01:00:51
| 261,329,651
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,778
|
py
|
'''
May 2017
@author: Burkhard A. Meier
'''
#======================
# imports
#======================
import tkinter as tk
from tkinter import ttk
from tkinter import scrolledtext
from tkinter import Menu
from tkinter import messagebox as msg
from tkinter import Spinbox
from time import sleep
import Ch04_Code.ToolTip as tt
from threading import Thread
GLOBAL_CONST = 42
#=====================================================
class OOP():
def __init__(self): # Initializer method
# Create instance
self.win = tk.Tk()
# Add a title
self.win.title("Python GUI")
self.create_widgets()
def method_in_a_thread(self):
print('Hi, how are you?')
for idx in range(10):
sleep(5)
self.scrol.insert(tk.INSERT, str(idx) + '\n')
# Running methods in Threads
def create_thread(self):
self.run_thread = Thread(target=self.method_in_a_thread)
self.run_thread.start() # start the thread
print(self.run_thread)
# Button callback
def click_me(self):
self.action.configure(text='Hello ' + self.name.get())
self.create_thread()
# Spinbox callback
def _spin(self):
value = self.spin.get()
self.scrol.insert(tk.INSERT, value + '\n')
# GUI Callback
def checkCallback(self, *ignored_args):
# only enable one checkbutton
if self.chVarUn.get(): self.check3.configure(state='disabled')
else: self.check3.configure(state='normal')
if self.chVarEn.get(): self.check2.configure(state='disabled')
else: self.check2.configure(state='normal')
# Radiobutton Callback
def radCall(self):
radSel = self.radVar.get()
if radSel == 0: self.mighty2.configure(text='Blue')
elif radSel == 1: self.mighty2.configure(text='Gold')
elif radSel == 2: self.mighty2.configure(text='Red')
# update progressbar in callback loop
def run_progressbar(self):
self.progress_bar["maximum"] = 100
for i in range(101):
sleep(0.05)
self.progress_bar["value"] = i # increment progressbar
self.progress_bar.update() # have to call update() in loop
self.progress_bar["value"] = 0 # reset/clear progressbar
def start_progressbar(self):
self.progress_bar.start()
def stop_progressbar(self):
self.progress_bar.stop()
def progressbar_stop_after(self, wait_ms=1000):
self.win.after(wait_ms, self.progress_bar.stop)
def usingGlobal(self):
global GLOBAL_CONST
GLOBAL_CONST = 777
# Exit GUI cleanly
def _quit(self):
self.win.quit()
self.win.destroy()
exit()
#####################################################################################
def create_widgets(self):
tabControl = ttk.Notebook(self.win) # Create Tab Control
tab1 = ttk.Frame(tabControl) # Create a tab
tabControl.add(tab1, text='Tab 1') # Add the tab
tab2 = ttk.Frame(tabControl) # Add a second tab
tabControl.add(tab2, text='Tab 2') # Make second tab visible
tabControl.pack(expand=1, fill="both") # Pack to make visible
# LabelFrame using tab1 as the parent
mighty = ttk.LabelFrame(tab1, text=' Mighty Python ')
mighty.grid(column=0, row=0, padx=8, pady=4)
# Modify adding a Label using mighty as the parent instead of win
a_label = ttk.Label(mighty, text="Enter a name:")
a_label.grid(column=0, row=0, sticky='W')
# Adding a Textbox Entry widget
self.name = tk.StringVar()
self.name_entered = ttk.Entry(mighty, width=24, textvariable=self.name)
self.name_entered.grid(column=0, row=1, sticky='W')
# Adding a Button
self.action = ttk.Button(mighty, text="Click Me!", command=self.click_me)
self.action.grid(column=2, row=1)
ttk.Label(mighty, text="Choose a number:").grid(column=1, row=0)
number = tk.StringVar()
self.number_chosen = ttk.Combobox(mighty, width=14, textvariable=number, state='readonly')
self.number_chosen['values'] = (1, 2, 4, 42, 100)
self.number_chosen.grid(column=1, row=1)
self.number_chosen.current(0)
# Adding a Spinbox widget
self.spin = Spinbox(mighty, values=(1, 2, 4, 42, 100), width=5, bd=9, command=self._spin) # using range
self.spin.grid(column=0, row=2, sticky='W') # align left
# Using a scrolled Text control
scrol_w = 40; scrol_h = 10 # increase sizes
self.scrol = scrolledtext.ScrolledText(mighty, width=scrol_w, height=scrol_h, wrap=tk.WORD)
self.scrol.grid(column=0, row=3, sticky='WE', columnspan=3)
for child in mighty.winfo_children(): # add spacing to align widgets within tabs
child.grid_configure(padx=4, pady=2)
#=====================================================================================
# Tab Control 2 ----------------------------------------------------------------------
self.mighty2 = ttk.LabelFrame(tab2, text=' The Snake ')
self.mighty2.grid(column=0, row=0, padx=8, pady=4)
# Creating three checkbuttons
chVarDis = tk.IntVar()
check1 = tk.Checkbutton(self.mighty2, text="Disabled", variable=chVarDis, state='disabled')
check1.select()
check1.grid(column=0, row=0, sticky=tk.W)
chVarUn = tk.IntVar()
check2 = tk.Checkbutton(self.mighty2, text="UnChecked", variable=chVarUn)
check2.deselect()
check2.grid(column=1, row=0, sticky=tk.W)
chVarEn = tk.IntVar()
check3 = tk.Checkbutton(self.mighty2, text="Enabled", variable=chVarEn)
check3.deselect()
check3.grid(column=2, row=0, sticky=tk.W)
# trace the state of the two checkbuttons
chVarUn.trace('w', lambda unused0, unused1, unused2 : self.checkCallback())
chVarEn.trace('w', lambda unused0, unused1, unused2 : self.checkCallback())
# First, we change our Radiobutton global variables into a list
colors = ["Blue", "Gold", "Red"]
# create three Radiobuttons using one variable
self.radVar = tk.IntVar()
# Next we are selecting a non-existing index value for radVar
self.radVar.set(99)
# Now we are creating all three Radiobutton widgets within one loop
for col in range(3):
curRad = tk.Radiobutton(self.mighty2, text=colors[col], variable=self.radVar,
value=col, command=self.radCall)
curRad.grid(column=col, row=1, sticky=tk.W) # row=6
# And now adding tooltips
tt.create_ToolTip(curRad, 'This is a Radiobutton control')
# Add a Progressbar to Tab 2
self.progress_bar = ttk.Progressbar(tab2, orient='horizontal', length=286, mode='determinate')
self.progress_bar.grid(column=0, row=3, pady=2)
# Create a container to hold buttons
buttons_frame = ttk.LabelFrame(self.mighty2, text=' ProgressBar ')
buttons_frame.grid(column=0, row=2, sticky='W', columnspan=2)
# Add Buttons for Progressbar commands
ttk.Button(buttons_frame, text=" Run Progressbar ", command=self.run_progressbar).grid(column=0, row=0, sticky='W')
ttk.Button(buttons_frame, text=" Start Progressbar ", command=self.start_progressbar).grid(column=0, row=1, sticky='W')
ttk.Button(buttons_frame, text=" Stop immediately ", command=self.stop_progressbar).grid(column=0, row=2, sticky='W')
ttk.Button(buttons_frame, text=" Stop after second ", command=self.progressbar_stop_after).grid(column=0, row=3, sticky='W')
for child in buttons_frame.winfo_children():
child.grid_configure(padx=2, pady=2)
for child in self.mighty2.winfo_children():
child.grid_configure(padx=8, pady=2)
# Creating a Menu Bar
menu_bar = Menu(self.win)
self.win.config(menu=menu_bar)
# Add menu items
file_menu = Menu(menu_bar, tearoff=0)
file_menu.add_command(label="New")
file_menu.add_separator()
file_menu.add_command(label="Exit", command=self._quit)
menu_bar.add_cascade(label="File", menu=file_menu)
# Display a Message Box
def _msgBox():
msg.showinfo('Python Message Info Box', 'A Python GUI created using tkinter:\nThe year is 2017.')
# Add another Menu to the Menu Bar and an item
help_menu = Menu(menu_bar, tearoff=0)
help_menu.add_command(label="About", command=_msgBox) # display messagebox when clicked
menu_bar.add_cascade(label="Help", menu=help_menu)
# Change the main windows icon
self.win.iconbitmap('pyc.ico')
# It is not necessary to create a tk.StringVar()
# strData = tk.StringVar()
strData = self.spin.get()
# call function
self.usingGlobal()
self.name_entered.focus()
# Add Tooltips -----------------------------------------------------
# Add a Tooltip to the Spinbox
tt.create_ToolTip(self.spin, 'This is a Spinbox control')
# Add Tooltips to more widgets
tt.create_ToolTip(self.name_entered, 'This is an Entry control')
tt.create_ToolTip(self.action, 'This is a Button control')
tt.create_ToolTip(self.scrol, 'This is a ScrolledText control')
#======================
# Start GUI
#======================
oop = OOP()
oop.win.mainloop()
|
[
"noreply@github.com"
] |
CodedQuen.noreply@github.com
|
12864b8861704ee4da1a109a726b0489f7f6ad36
|
ea7d0f2ad8d4441ebbead7f7cc8469fdf2f2e207
|
/media.py
|
e7142b39063d7c2af5be436d42225cf73ea94bd4
|
[] |
no_license
|
lmichilot/UdacityFSW_Project1
|
f9d243d7aa50bb44ef305cd7acb1cff02a0b76aa
|
e70fb2346fab007b83747ad698d94d8ebeb68b57
|
refs/heads/master
| 2021-06-25T16:14:33.130345
| 2017-08-25T15:57:49
| 2017-08-25T15:57:49
| 101,202,086
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 609
|
py
|
import webbrowser
class Movie():
"""
This class provides a way to store movie related information.
Attributes:
title: The title of the movie.
storyline: The summary of the movie.
poster_image_url: URL of the movie poster.
trailer_youtube_url: URL of the movie trailer.
director: director of the movie.
"""
def __init__(self, _title, _storyline, _image, _url, _director):
self.title = _title
self.storyline = _storyline
self.poster_image_url = _image
self.trailer_youtube_url = _url
self.director = _director
|
[
"lmichilot@gmail.com"
] |
lmichilot@gmail.com
|
b715d3f300e76228033f8818f704dd9ab56f4a6b
|
b394241994f896463a95ab291e98957d66987ba2
|
/app.py
|
b02b6e9ccc5ab740082f4254e2eb29bb50a7f3a6
|
[] |
no_license
|
admajaput/Myfirst_Dashplotly
|
fdef83a291fdaafa16ad8f9612c7dcb0079ab6a2
|
e84ddd9bf46c2a7ab41d131b234f3c6f8066a1f9
|
refs/heads/master
| 2020-04-08T04:15:32.393439
| 2018-11-25T08:00:04
| 2018-11-25T08:00:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,660
|
py
|
import dash
import dash_core_components as dcc
import dash_html_components as html
import pandas as pd
import plotly.graph_objs as go
from dash.dependencies import Input,Output
from categoryplot import getPlot, catgory
import numpy as np
from plotly import tools
app = dash.Dash()
app.title = 'Purwadhika Dash Plotly'
mydata = pd.read_csv('data')
dataT = pd.read_csv('jointdata')
data = {
'After_Preparation' : dataT,
'Before_Preparation' : mydata
}
estiFunc = {
'count': len,
'sum': sum,
'mean': np.mean,
'std': np.std
}
color_set = {
'Gender': ['#ff3fd8','#4290ff'],
'Education': ['#32fc7c','#ed2828','#ddff00','#f2e200','#0059a3'],
'NumberChildrenAtHome': ['#0059a3','#f2e200','#ddff00','#3de800','#00c9ed'],
'Occupation': ['#ff8800','#ddff00','#3de800','#00c9ed','#ff3fd8'],
'CountryRegionName':['#32fc7c','#ed2828','#ddff00','#0059a3','#00c9ed','#ff3fd8'],
'Catage' : ['#ff8800','#ddff00','#3de800','#00c9ed','#ff3fd8','#3de800','#00c9ed']
}
app.layout = html.Div(children=[
dcc.Tabs(id='tabs', value='tab1', className='h1firstTab',
style={
'fontFamily': 'system-ui'
},
content_style={
'fontFamily': 'Arial',
'borderLeft': '1px solid #d6d6d6',
'borderRight': '1px solid #d6d6d6',
'borderBottom': '1px solid #d6d6d6',
'padding': '44px'
},
children=[
dcc.Tab(label='Data Set Overview', value='tab1', children=[
html.Div([
html.Table([
html.Tr([
html.Td(html.P('Table : ')),
html.Td([
dcc.Dropdown(
id='dd-table',
options=[{'label':'Dataset Before', 'value':'Before_Preparation'},
{'label':'Dataset After', 'value':'After_Preparation'}],
value='Before_Preparation'
)]
)
])
],style={ 'width': '300px', 'paddingBottom': '20px' }),
html.Div(id='tampil_table')
])
]),
dcc.Tab(label='Categorical Fiture with target Overview', value='tab2', children=[
html.Div([
html.H1('View Data Pie Plot',className='h1firstTab'),
html.Table([
html.Tr([
html.Td(html.P(['Hue : ',
dcc.Dropdown(
id='ddl-fiture-plot',
options=[{'label': 'Gender', 'value': 'Gender'},
{'label': 'Education', 'value': 'Education'},
{'label': 'Number Children At Home', 'value': 'NumberChildrenAtHome'},
{'label': 'Occupation', 'value': 'Occupation'},
{'label': 'Country Region Name', 'value': 'CountryRegionName'},
{'label': 'Age', 'value': 'catAge'}],
value='Gender'
)
]),style={'width': '900px'}),
html.Td(html.P(['Column : ',
dcc.Dropdown(
id='ddl-target-plot',
options=[{'label': 'Yearly Income', 'value': 'YearlyIncome'},
{'label': 'Average Month Spend', 'value': 'AveMonthSpend'}],
value='YearlyIncome'
)
]),style={'width': '400px'})
]),
],style={ 'width': '1200px', 'paddingBottom': '20px'}),
html.Table(id='tr_bar', children=[])
])
]),
dcc.Tab(label='Plot fiture', value='tab-3', children=[
html.Div([
html.H1('Categorical Plot Tips Data Set',className='h1firstTab'),
html.Table([
html.Tr([
html.Td([
html.P('Jenis : '),
dcc.Dropdown(
id='ddl-jenis-plot-category',
options=[{'label': 'Bar', 'value': 'bar'},
{'label': 'Violin', 'value': 'violin'},
{'label': 'Box', 'value': 'box'}],
value='bar'
)
]),
html.Td([
html.P('X Axis : '),
dcc.Dropdown(
id='ddl-x-plot-category',
options=[{'label': 'Gender', 'value': 'Gender'},
{'label': 'Education', 'value': 'Education'},
{'label': 'Number Children At Home', 'value': 'NumberChildrenAtHome'},
{'label': 'Occupation', 'value': 'Occupation'},
{'label': 'Country Region Name', 'value': 'CountryRegionName'},
{'label': 'Age', 'value': 'catAge'}],
value='Gender'
)
])
])
], style={ 'width' : '700px', 'margin': '0 auto'}),
dcc.Graph(
id='categoricalPlot',
figure={
'data': []
}
)
])
])
])
],
style={'maxWidth' : '1300px',
'margin' : '0 auto' })
#callback untuk table
@app.callback(
Output('tampil_table','children'),
[Input('dd-table','value')]
)
def tampil_table(table):
dataset = data[table]
return[
html.H1(children = table,className='h1firstTab'),
html.H4('Total Row :'+str(len(dataset))),
html.H4("Total columns : "+str(len(dataset.columns))),
dcc.Graph(
id='table_go',
figure={
'data':[ go.Table(
header=dict(
values=['<b>'+col.capitalize()+'<b>' for col in dataset.columns],
fill = dict(color='#C2D4FF'),
font = dict(size=11),
height= 30,
align = ['center']),
cells=dict(
values=[dataset[col] for col in dataset.columns],
fill= dict(color='#F5F8FF'),
font=dict(size=11),
height= 25,
align = ['right']*5)
)],
'layout':go.Layout(height=500, margin={'l': 40, 'b': 40, 't': 10, 'r': 10})
}
)
]
@app.callback(
Output('tr_bar', 'children'),
[Input('ddl-fiture-plot', 'value'),
Input('ddl-target-plot','value')]
)
def update_graph(fiture,target):
xtable = mydata.groupby(fiture).mean()[target].sort_values(ascending=False).reset_index()
return[
html.Td([
dcc.Graph(
id='table_go',
figure={
'data':[
go.Bar(
x=xtable[fiture],
y=xtable[target],
text=xtable[target],
name='try',
marker=dict(color='blue'),
legendgroup = 'target'
)],
'layout': go.Layout(
xaxis={'title': fiture.capitalize()}, yaxis={'title': target.capitalize()},
margin={'l': 40, 'b': 40, 't': 10, 'r': 10},
width=700,
height=500,
legend={'x': 0, 'y': 1.2}, hovermode='closest',
boxmode='group',violinmode='group',
#plot_bgcolor= 'black', paper_bgcolor= 'black'
)
}
)
],colSpan='2',style={'width': '900px'}),
html.Td([
dcc.Graph(
id='table_go2',
figure={
'data':[ go.Table(
header=dict(
values=['<b>'+col.capitalize()+'<b>' for col in xtable.columns],
fill = dict(color='#C2D4FF'),
font = dict(size=11),
height= 30,
align = ['center']),
cells=dict(
values=[xtable[col] for col in xtable.columns],
fill= dict(color='#F5F8FF'),
font=dict(size=11),
height= 25,
align = ['right']*5)
)],
'layout':go.Layout(width = 300,height=300, margin={'l': 10,'b': 40, 't': 10, 'r': 10})
}
)
],style={'position': 'absolute', 'width': '300px'})
]
@app.callback(
Output('categoricalPlot', 'figure'),
[Input('ddl-jenis-plot-category', 'value'),
Input('ddl-x-plot-category', 'value')])
def update_category_graph(ddljeniscategory, ddlxcategory):
return {
'data': getPlot(ddljeniscategory,ddlxcategory),
'layout': go.Layout(
xaxis={'title': ddlxcategory.capitalize()}, yaxis={'title': 'US$'},
margin={'l': 40, 'b': 40, 't': 10, 'r': 10},
legend={'x': 0, 'y': 1.2}, hovermode='closest',
boxmode='group',violinmode='group'
# plot_bgcolor= 'black', paper_bgcolor= 'black',
)
}
if __name__ == '__main__':
# run server on port 1997
# debug=True for auto restart if code edited
app.run_server(debug=True, port=1907)
|
[
"asmaja113@gmail.com"
] |
asmaja113@gmail.com
|
51624a9d93d6b8f1ec4bc07488f89856c586aa69
|
e8bef0f6dc2bd90f25e4bd2f32668ac31f2f0fe2
|
/old/variable_neutral_line_manipulator/display/result_graph_widget.py
|
ba4d3f93c8c774e04818fda648d5ba6b63d68d50
|
[] |
no_license
|
MINGXUANCALVIN/variableNeutralLineManipulator
|
cdf3db4314029d84e3831ecb02570b0171615273
|
c2356a49c9efd62d4ec6aa2bd83bcce26ede3707
|
refs/heads/master
| 2023-04-27T00:39:42.232858
| 2020-05-30T05:24:28
| 2020-05-30T05:24:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,611
|
py
|
import math
import numpy as np
from PyQt5.QtCore import QSize, Qt
from PyQt5.QtWidgets import QSizePolicy, QVBoxLayout, QWidget
from matplotlib.backends.qt_compat import QtWidgets
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import Axes3D
from .state_management import StateManagement
from .ranges import Range3d, enforceRange
from .plot import *
class ResultGraphWidget(QWidget):
"""
For graphs
"""
def __init__(self, parent=None, width=5, height=4, dpi=100):
super().__init__(parent)
fig = Figure(figsize=(width, height), dpi=dpi)
self.canvas = FigureCanvas(fig)
self.ax = fig.add_subplot(111, projection='3d')
FigureCanvas.updateGeometry(self.canvas) # Not sure whether it is necessary
mainLayout = QVBoxLayout()
mainLayout.addWidget(self.canvas)
self.setLayout(mainLayout)
# sizePolicy = QSizePolicy()
# # sizePolicy.setHeightForWidth(True)
# sizePolicy.setHorizontalPolicy(QSizePolicy.Expanding)
# sizePolicy.setVerticalPolicy(QSizePolicy.MinimumExpanding)
# self.setSizePolicy(sizePolicy)
self.result = None
StateManagement().computeTensionsSink.subscribe(self._updateResult)
StateManagement().graphResizeUpdateSink.subscribe(lambda _: self._updateGraph)
self._updateGraph()
def _updateResult(self, res):
self.result = res
self._updateGraph()
def _updateGraph(self):
if not self.result:
return
self.ax.clear()
range3d = Range3d()
for i, s in enumerate(self.result.states):
cylindricalRadius = max([tms.tendonModel.horizontalDistFromAxis for tms in s.tendonModelStates])
rg = RingPlotGeometry.fromRing(s.ring, cylindricalRadius)
plotRingRF(self.ax, rg, self.result.getTF(i), range=range3d)
enforceRange(self.ax, range3d)
self.ax.set_xlabel('X axis')
self.ax.set_ylabel('Y axis')
self.ax.set_zlabel('Z axis')
self.canvas.draw()
def resizeEvent(self, event):
width = event.size().width()
height = event.size().height()
if width > height:
self.resize(height, height)
elif width < height:
self.resize(width, width)
else:
StateManagement().graphResizeUpdateSrc.on_next(None)
def minimumSizeHint(self):
return QSize(400,400)
|
[
"dickson0717@hotmail.com"
] |
dickson0717@hotmail.com
|
4ed9874c1f88b19a1c4b4f1546c73a2abcebc081
|
194f21e0c0e0d1fbf827ffcbd1a0dbb5b7ddd1cf
|
/sksurgerybard.py
|
41534a870c6e56bf3bdebb972ede6c06783255df
|
[
"BSD-3-Clause"
] |
permissive
|
SciKit-Surgery/scikit-surgerybard
|
b5ccdf7df9b179da41986d41cbe45ce2d154c363
|
3a4ec2f2b22e12af2781238122427dd91231d110
|
refs/heads/master
| 2023-07-07T15:42:36.771451
| 2023-06-27T19:52:24
| 2023-06-27T19:52:24
| 260,141,467
| 3
| 0
|
NOASSERTION
| 2023-06-27T19:52:25
| 2020-04-30T07:22:32
|
Python
|
UTF-8
|
Python
| false
| false
| 177
|
py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from sksurgerybard.ui.sksurgerybard_command_line import main
if __name__ == "__main__":
sys.exit(main(sys.argv[1:]))
|
[
"mianasbatahmad@eduroam-int-dhcp-97-228-159.ucl.ac.uk"
] |
mianasbatahmad@eduroam-int-dhcp-97-228-159.ucl.ac.uk
|
5cdfb9f36ca69e8ff5fabc7944ba6a1bbc3b4b56
|
14b00b45523dc474b9cb8e3c527f84e322c9e178
|
/scen9.py
|
f1a1525900163d46e25047abe20ddc874e09964d
|
[] |
no_license
|
GilBaggio/python-project
|
d67a1bcb710e5623ce41ba3715c7b7d69459da7c
|
64d47f07fc0688c9fe5ce135a38683b5ff59b23a
|
refs/heads/master
| 2021-01-10T17:43:38.577848
| 2015-12-09T10:46:15
| 2015-12-09T10:46:15
| 47,684,703
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,268
|
py
|
#Highest percentage in all india level
def high_percentage():
try:
#opening and reading the standerdised file
stdfile = open('Standardiseddata.csv','r')
firstline = stdfile.readline()
heading = firstline.split(',')
percentage_index = heading.index('mathpcor')
studentid_index = heading.index('STUID')
state_index = heading.index('state')
percentagelist = []
higher = 0
#iterating statements
for line in stdfile:
data = line.split(',')
if data[percentage_index] > higher: # check the percentage with the condition
higher = data[percentage_index] # add the highest percentage and check with other
percentagelist = data # move the highest data to the percentagelist
else:
higher = higher
except IOError as e:
print "Oops,something went wrong"
print e.errno
print e.strerror
else:
print "Highest_Scorer_ID"," ","Percentage"," ","State_Name\n"
print " ",percentagelist[studentid_index]," ",percentagelist[percentage_index]," ",percentagelist[state_index]
|
[
"tsccbe17@gmail.com"
] |
tsccbe17@gmail.com
|
22b06f917a2e60d9e5443d0a32cf7b4cb27e71c3
|
50f42e142c7b989afc9bc9d9fd53515923aceb56
|
/ML_practice/test_field.py
|
824433785cb92c1abe62e59a015e4140ff9a6c0c
|
[] |
no_license
|
shincling/MyCommon
|
7d02da4408f1ab0acf883845cbb8b8e54e364076
|
ae362fdef8d51c808645f7827a86e43d07db6e0f
|
refs/heads/master
| 2021-01-17T04:10:57.546936
| 2018-11-06T13:17:27
| 2018-11-06T13:17:27
| 45,384,609
| 2
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
def f():
print "Before first yield"
yield 1
print "Before second yield"
yield 2
print "After second yield"
g = f()
# g.next()
# g.next()
print "Before first next"
g.next()
print "Before second next"
g.next()
print "Before third yield"
g.next()
|
[
"shijing609@163.com"
] |
shijing609@163.com
|
b77f6cb4c37844887fe9769139d3b4cf953d6420
|
993ef8924418866f932396a58e3ad0c2a940ddd3
|
/Production/python/PrivateSamples/EMJ_UL18_mMed-1000_mDark-20_ctau-500_unflavored-down_cff.py
|
a689762b309b5edaf406483255496d14a1d0b40b
|
[] |
no_license
|
TreeMaker/TreeMaker
|
48d81f6c95a17828dbb599d29c15137cd6ef009a
|
15dd7fe9e9e6f97d9e52614c900c27d200a6c45f
|
refs/heads/Run2_UL
| 2023-07-07T15:04:56.672709
| 2023-07-03T16:43:17
| 2023-07-03T16:43:17
| 29,192,343
| 16
| 92
| null | 2023-07-03T16:43:28
| 2015-01-13T13:59:30
|
Python
|
UTF-8
|
Python
| false
| false
| 1,981
|
py
|
import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-1.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-10.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-2.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-3.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-4.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-5.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-6.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-7.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-8.root',
'root://cmseos.fnal.gov///store/group/lpcsusyhad/ExoEMJAnalysis2020/Signal.Oct.2021/UL18/step4_MINIAODv2_mMed-1000_mDark-20_ctau-500_unflavored-down_n-500_part-9.root',
] )
|
[
"enochnotsocool@gmail.com"
] |
enochnotsocool@gmail.com
|
d7bc1849f667986a5c952b587f0cea53c5180c85
|
968949635b157804c048a400f3c2459b097d1296
|
/Python/2017_example_MITPS1b.py
|
a61677dfe116c7f0ca02b98ebb753fe054299202
|
[] |
no_license
|
EliBildman/CS1-Projects
|
2e22ad7d0c34facdabef784b43fdca621129bf41
|
d3edc80fd1956e84775478e441b1fae27c3b2a9e
|
refs/heads/master
| 2021-10-23T23:31:39.931753
| 2019-03-20T18:34:59
| 2019-03-20T18:34:59
| 114,024,363
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 536
|
py
|
init_balance = float(input("Outstanding balance: "))
interest = float(input("Annual interest rate (dec): "))
monthly_payment = 0
months = 13
while months > 12:
months = 0
monthly_payment += 10
balance = init_balance
while balance > 0:
balance = round(balance * (1 + (interest / 12)), 2)
balance = round(balance - monthly_payment, 2)
months += 1
print(balance)
if months > 12:
break
print("Monthly payment:", monthly_payment)
print("Number of months needed", months)
|
[
"eli@bildman.com"
] |
eli@bildman.com
|
bb7711a1d9a0542bf0147818f036a11eb8eb630f
|
5f65e12a62b59aea9263f35240c960b7e6009aa5
|
/cb_scripts/game_on.py
|
5cd86411560201702863ecffeab27460f20cfee6
|
[
"MIT"
] |
permissive
|
christopher-burke/python-scripts
|
23e80b8e7f26a74ab68dc7d0ad1a8093d900cf8b
|
f5dceca0bdbe9de6197b26858600b792f6adff8a
|
refs/heads/main
| 2022-05-20T01:36:04.668447
| 2022-04-25T20:31:33
| 2022-04-25T20:31:33
| 6,054,247
| 1
| 1
|
MIT
| 2022-03-16T02:24:45
| 2012-10-03T01:49:53
|
Python
|
UTF-8
|
Python
| false
| false
| 1,233
|
py
|
#!/usr/bin/env python3
"""Game on.
Games won tracker.
"""
from dataclasses import dataclass, asdict
import json
import sys
# from datetime import date
@dataclass
class Player:
"""Player dataclass."""
name: str
@dataclass
class Match:
"""Match dataclass."""
game: str
date: date = date.today().__str__()
@dataclass
class Results:
"""Results dataclass."""
match: Match
player: Player
wins: int = 0
losses: int = 0
def load():
"""Load data from json file."""
with open('game_on.json') as json_file:
data = json.load(json_file)
return data
def write(data, *args, **kwargs):
"""Write data to the json file."""
with open('game_on.json', 'w') as json_file:
json.dump(data, json_file)
return True
def main():
"""Game on main funtion."""
pass
if __name__ == "__main__":
if not len(sys.argv) < 1:
exit(0)
match = Match('Name') # -g "Name"
p1 = Player('Player 1') # -p1 "Name"
p2 = Player('Player 2') # -p1 "Name"
r1 = Results(match, p1, 2) # -r1 2
r2 = Results(match, p2, 12) # -r2 2
r1.losses = r2.wins
r2.losses = r1.wins
data = {}
data['result'] = [asdict(r1), asdict(r2)]
|
[
"christopherjamesburke@gmail.com"
] |
christopherjamesburke@gmail.com
|
4b12cb9e5dcf0c50d61ba29d1ec577502c471c7c
|
a37240b1f7b62067c2e56577dd8710fa647dc779
|
/classifyClient/IMU.py
|
abc1b7ce378846da49842b41a55858191576f425
|
[] |
no_license
|
180D-FW-2020/Team5
|
6e82c57a22d5b0ed06f35e97e9d2723dec9853ac
|
edc0c9e1204f43f29ca2c10d34cf2af0e486a0e4
|
refs/heads/master
| 2023-03-13T16:56:26.604271
| 2021-03-12T01:04:23
| 2021-03-12T01:04:23
| 297,809,977
| 0
| 0
| null | 2021-03-09T21:10:34
| 2020-09-23T00:44:47
|
Python
|
UTF-8
|
Python
| false
| false
| 11,822
|
py
|
import smbus2 as smbus
bus = smbus.SMBus(1)
from LSM9DS0 import *
from LSM9DS1 import *
from LSM6DSL import *
from LIS3MDL import *
import time
BerryIMUversion = 99
def detectIMU():
#Detect which version of BerryIMU is connected using the 'who am i' register
#BerryIMUv1 uses the LSM9DS0
#BerryIMUv2 uses the LSM9DS1
#BerryIMUv3 uses the LSM6DSL and LIS3MDL
global BerryIMUversion
try:
#Check for BerryIMUv1 (LSM9DS0)
#If no LSM9DS0 is connected, there will be an I2C bus error and the program will exit.
#This section of code stops this from happening.
LSM9DS0_WHO_G_response = (bus.read_byte_data(LSM9DS0_GYR_ADDRESS, LSM9DS0_WHO_AM_I_G))
LSM9DS0_WHO_XM_response = (bus.read_byte_data(LSM9DS0_ACC_ADDRESS, LSM9DS0_WHO_AM_I_XM))
except IOError as e:
print('') #need to do something here, so we just print a space
else:
if (LSM9DS0_WHO_G_response == 0xd4) and (LSM9DS0_WHO_XM_response == 0x49):
print("Found BerryIMUv1 (LSM9DS0)")
BerryIMUversion = 1
try:
#Check for BerryIMUv2 (LSM9DS1)
#If no LSM9DS1 is connnected, there will be an I2C bus error and the program will exit.
#This section of code stops this from happening.
LSM9DS1_WHO_XG_response = (bus.read_byte_data(LSM9DS1_GYR_ADDRESS, LSM9DS1_WHO_AM_I_XG))
LSM9DS1_WHO_M_response = (bus.read_byte_data(LSM9DS1_MAG_ADDRESS, LSM9DS1_WHO_AM_I_M))
except IOError as f:
print('') #need to do something here, so we just print a space
else:
if (LSM9DS1_WHO_XG_response == 0x68) and (LSM9DS1_WHO_M_response == 0x3d):
print("Found BerryIMUv2 (LSM9DS1)")
BerryIMUversion = 2
try:
#Check for BerryIMUv3 (LSM6DSL and LIS3MDL)
#If no LSM6DSL or LIS3MDL is connected, there will be an I2C bus error and the program will exit.
#This section of code stops this from happening.
LSM6DSL_WHO_AM_I_response = (bus.read_byte_data(LSM6DSL_ADDRESS, LSM6DSL_WHO_AM_I))
LIS3MDL_WHO_AM_I_response = (bus.read_byte_data(LIS3MDL_ADDRESS, LIS3MDL_WHO_AM_I))
except IOError as f:
print('') #need to do something here, so we just print a space
else:
if (LSM6DSL_WHO_AM_I_response == 0x6A) and (LIS3MDL_WHO_AM_I_response == 0x3D):
print("Found BerryIMUv3 (LSM6DSL and LIS3MDL)")
BerryIMUversion = 3
time.sleep(1)
def writeByte(device_address,register,value):
bus.write_byte_data(device_address, register, value)
def readACCx():
acc_l = 0
acc_h = 0
if(BerryIMUversion == 1):
acc_l = bus.read_byte_data(LSM9DS0_ACC_ADDRESS, LSM9DS0_OUT_X_L_A)
acc_h = bus.read_byte_data(LSM9DS0_ACC_ADDRESS, LSM9DS0_OUT_X_H_A)
elif(BerryIMUversion == 2):
acc_l = bus.read_byte_data(LSM9DS1_ACC_ADDRESS, LSM9DS1_OUT_X_L_XL)
acc_h = bus.read_byte_data(LSM9DS1_ACC_ADDRESS, LSM9DS1_OUT_X_H_XL)
elif(BerryIMUversion == 3):
acc_l = bus.read_byte_data(LSM6DSL_ADDRESS, LSM6DSL_OUTX_L_XL)
acc_h = bus.read_byte_data(LSM6DSL_ADDRESS, LSM6DSL_OUTX_H_XL)
acc_combined = (acc_l | acc_h <<8)
return acc_combined if acc_combined < 32768 else acc_combined - 65536
def readACCy():
acc_l = 0
acc_h = 0
if(BerryIMUversion == 1):
acc_l = bus.read_byte_data(LSM9DS0_ACC_ADDRESS, LSM9DS0_OUT_Y_L_A)
acc_h = bus.read_byte_data(LSM9DS0_ACC_ADDRESS, LSM9DS0_OUT_Y_H_A)
elif(BerryIMUversion == 2):
acc_l = bus.read_byte_data(LSM9DS1_ACC_ADDRESS, LSM9DS1_OUT_Y_L_XL)
acc_h = bus.read_byte_data(LSM9DS1_ACC_ADDRESS, LSM9DS1_OUT_Y_H_XL)
elif(BerryIMUversion == 3):
acc_l = bus.read_byte_data(LSM6DSL_ADDRESS, LSM6DSL_OUTY_L_XL)
acc_h = bus.read_byte_data(LSM6DSL_ADDRESS, LSM6DSL_OUTY_H_XL)
acc_combined = (acc_l | acc_h <<8)
return acc_combined if acc_combined < 32768 else acc_combined - 65536
def readACCz():
acc_l = 0
acc_h = 0
if(BerryIMUversion == 1):
acc_l = bus.read_byte_data(LSM9DS0_ACC_ADDRESS, LSM9DS0_OUT_Z_L_A)
acc_h = bus.read_byte_data(LSM9DS0_ACC_ADDRESS, LSM9DS0_OUT_Z_H_A)
elif(BerryIMUversion == 2):
acc_l = bus.read_byte_data(LSM9DS1_ACC_ADDRESS, LSM9DS1_OUT_Z_L_XL)
acc_h = bus.read_byte_data(LSM9DS1_ACC_ADDRESS, LSM9DS1_OUT_Z_H_XL)
elif(BerryIMUversion == 3):
acc_l = bus.read_byte_data(LSM6DSL_ADDRESS, LSM6DSL_OUTZ_L_XL)
acc_h = bus.read_byte_data(LSM6DSL_ADDRESS, LSM6DSL_OUTZ_H_XL)
acc_combined = (acc_l | acc_h <<8)
return acc_combined if acc_combined < 32768 else acc_combined - 65536
def readGYRx():
gyr_l = 0
gyr_h = 0
if(BerryIMUversion == 1):
gyr_l = bus.read_byte_data(LSM9DS0_GYR_ADDRESS, LSM9DS0_OUT_X_L_G)
gyr_h = bus.read_byte_data(LSM9DS0_GYR_ADDRESS, LSM9DS0_OUT_X_H_G)
elif(BerryIMUversion == 2):
gyr_l = bus.read_byte_data(LSM9DS1_GYR_ADDRESS, LSM9DS1_OUT_X_L_G)
gyr_h = bus.read_byte_data(LSM9DS1_GYR_ADDRESS, LSM9DS1_OUT_X_H_G)
elif(BerryIMUversion == 3):
gyr_l = bus.read_byte_data(LSM6DSL_ADDRESS, LSM6DSL_OUTX_L_G)
gyr_h = bus.read_byte_data(LSM6DSL_ADDRESS, LSM6DSL_OUTX_H_G)
gyr_combined = (gyr_l | gyr_h <<8)
return gyr_combined if gyr_combined < 32768 else gyr_combined - 65536
def readGYRy():
gyr_l = 0
gyr_h = 0
if(BerryIMUversion == 1):
gyr_l = bus.read_byte_data(LSM9DS0_GYR_ADDRESS, LSM9DS0_OUT_Y_L_G)
gyr_h = bus.read_byte_data(LSM9DS0_GYR_ADDRESS, LSM9DS0_OUT_Y_H_G)
elif(BerryIMUversion == 2):
gyr_l = bus.read_byte_data(LSM9DS1_GYR_ADDRESS, LSM9DS1_OUT_Y_L_G)
gyr_h = bus.read_byte_data(LSM9DS1_GYR_ADDRESS, LSM9DS1_OUT_Y_H_G)
elif(BerryIMUversion == 3):
gyr_l = bus.read_byte_data(LSM6DSL_ADDRESS, LSM6DSL_OUTY_L_G)
gyr_h = bus.read_byte_data(LSM6DSL_ADDRESS, LSM6DSL_OUTY_H_G)
gyr_combined = (gyr_l | gyr_h <<8)
return gyr_combined if gyr_combined < 32768 else gyr_combined - 65536
def readGYRz():
gyr_l = 0
gyr_h = 0
if(BerryIMUversion == 1):
gyr_l = bus.read_byte_data(LSM9DS0_GYR_ADDRESS, LSM9DS0_OUT_Z_L_G)
gyr_h = bus.read_byte_data(LSM9DS0_GYR_ADDRESS, LSM9DS0_OUT_Z_H_G)
elif(BerryIMUversion == 2):
gyr_l = bus.read_byte_data(LSM9DS1_GYR_ADDRESS, LSM9DS1_OUT_Z_L_G)
gyr_h = bus.read_byte_data(LSM9DS1_GYR_ADDRESS, LSM9DS1_OUT_Z_H_G)
elif(BerryIMUversion == 3):
gyr_l = bus.read_byte_data(LSM6DSL_ADDRESS, LSM6DSL_OUTZ_L_G)
gyr_h = bus.read_byte_data(LSM6DSL_ADDRESS, LSM6DSL_OUTZ_H_G)
gyr_combined = (gyr_l | gyr_h <<8)
return gyr_combined if gyr_combined < 32768 else gyr_combined - 65536
def readMAGx():
mag_l = 0
mag_h = 0
if(BerryIMUversion == 1):
mag_l = bus.read_byte_data(LSM9DS0_MAG_ADDRESS, LSM9DS0_OUT_X_L_M)
mag_h = bus.read_byte_data(LSM9DS0_MAG_ADDRESS, LSM9DS0_OUT_X_H_M)
elif(BerryIMUversion == 2):
mag_l = bus.read_byte_data(LSM9DS1_MAG_ADDRESS, LSM9DS1_OUT_X_L_M)
mag_h = bus.read_byte_data(LSM9DS1_MAG_ADDRESS, LSM9DS1_OUT_X_H_M)
elif(BerryIMUversion == 3):
mag_l = bus.read_byte_data(LIS3MDL_ADDRESS, LIS3MDL_OUT_X_L)
mag_h = bus.read_byte_data(LIS3MDL_ADDRESS, LIS3MDL_OUT_X_H)
mag_combined = (mag_l | mag_h <<8)
return mag_combined if mag_combined < 32768 else mag_combined - 65536
def readMAGy():
mag_l = 0
mag_h = 0
if(BerryIMUversion == 1):
mag_l = bus.read_byte_data(LSM9DS0_MAG_ADDRESS, LSM9DS0_OUT_Y_L_M)
mag_h = bus.read_byte_data(LSM9DS0_MAG_ADDRESS, LSM9DS0_OUT_Y_H_M)
elif(BerryIMUversion == 2):
mag_l = bus.read_byte_data(LSM9DS1_MAG_ADDRESS, LSM9DS1_OUT_Y_L_M)
mag_h = bus.read_byte_data(LSM9DS1_MAG_ADDRESS, LSM9DS1_OUT_Y_H_M)
elif(BerryIMUversion == 3):
mag_l = bus.read_byte_data(LIS3MDL_ADDRESS, LIS3MDL_OUT_Y_L)
mag_h = bus.read_byte_data(LIS3MDL_ADDRESS, LIS3MDL_OUT_Y_H)
mag_combined = (mag_l | mag_h <<8)
return mag_combined if mag_combined < 32768 else mag_combined - 65536
def readMAGz():
mag_l = 0
mag_h = 0
if(BerryIMUversion == 1):
mag_l = bus.read_byte_data(LSM9DS0_MAG_ADDRESS, LSM9DS0_OUT_Z_L_M)
mag_h = bus.read_byte_data(LSM9DS0_MAG_ADDRESS, LSM9DS0_OUT_Z_H_M)
elif(BerryIMUversion == 2):
mag_l = bus.read_byte_data(LSM9DS1_MAG_ADDRESS, LSM9DS1_OUT_Z_L_M)
mag_h = bus.read_byte_data(LSM9DS1_MAG_ADDRESS, LSM9DS1_OUT_Z_H_M)
elif(BerryIMUversion == 3):
mag_l = bus.read_byte_data(LIS3MDL_ADDRESS, LIS3MDL_OUT_Z_L)
mag_h = bus.read_byte_data(LIS3MDL_ADDRESS, LIS3MDL_OUT_Z_H)
mag_combined = (mag_l | mag_h <<8)
return mag_combined if mag_combined < 32768 else mag_combined - 65536
def initIMU():
if(BerryIMUversion == 1): #For BerryIMUv1
#initialise the accelerometer
writeByte(LSM9DS0_ACC_ADDRESS,LSM9DS0_CTRL_REG1_XM, 0b01100111) #z,y,x axis enabled, continuos update, 100Hz data rate
writeByte(LSM9DS0_ACC_ADDRESS,LSM9DS0_CTRL_REG2_XM, 0b00011000) #+/- 8G full scale
#initialise the magnetometer
writeByte(LSM9DS0_MAG_ADDRESS,LSM9DS0_CTRL_REG5_XM, 0b11110000) #Temp enable, M data rate = 50Hz
writeByte(LSM9DS0_MAG_ADDRESS,LSM9DS0_CTRL_REG6_XM, 0b01100000) #+/- 12gauss
writeByte(LSM9DS0_MAG_ADDRESS,LSM9DS0_CTRL_REG7_XM, 0b00000000) #Continuous-conversion mode
#initialise the gyroscope
writeByte(LSM9DS0_GYR_ADDRESS,LSM9DS0_CTRL_REG1_G, 0b00001111) #Normal power mode, all axes enabled
writeByte(LSM9DS0_GYR_ADDRESS,LSM9DS0_CTRL_REG4_G, 0b00110000) #Continuos update, 2000 dps full scale
elif(BerryIMUversion == 2): #For BerryIMUv2
#initialise the accelerometer
writeByte(LSM9DS1_ACC_ADDRESS,LSM9DS1_CTRL_REG5_XL,0b00111000) #z, y, x axis enabled for accelerometer
writeByte(LSM9DS1_ACC_ADDRESS,LSM9DS1_CTRL_REG6_XL,0b00111000) #+/- 8g
#initialise the gyroscope
writeByte(LSM9DS1_GYR_ADDRESS,LSM9DS1_CTRL_REG4,0b00111000) #z, y, x axis enabled for gyro
writeByte(LSM9DS1_GYR_ADDRESS,LSM9DS1_CTRL_REG1_G,0b10111000) #Gyro ODR = 476Hz, 2000 dps
writeByte(LSM9DS1_GYR_ADDRESS,LSM9DS1_ORIENT_CFG_G,0b10111000) #Swap orientation
#initialise the magnetometer
writeByte(LSM9DS1_MAG_ADDRESS,LSM9DS1_CTRL_REG1_M, 0b10011100) #Temp compensation enabled,Low power mode mode,80Hz ODR
writeByte(LSM9DS1_MAG_ADDRESS,LSM9DS1_CTRL_REG2_M, 0b01000000) #+/- 2gauss
writeByte(LSM9DS1_MAG_ADDRESS,LSM9DS1_CTRL_REG3_M, 0b00000000) #continuos update
writeByte(LSM9DS1_MAG_ADDRESS,LSM9DS1_CTRL_REG4_M, 0b00000000) #lower power mode for Z axis
elif(BerryIMUversion == 3): #For BerryIMUv3
#initialise the accelerometer
writeByte(LSM6DSL_ADDRESS,LSM6DSL_CTRL1_XL,0b10011111) #ODR 3.33 kHz, +/- 8g , BW = 400hz
writeByte(LSM6DSL_ADDRESS,LSM6DSL_CTRL8_XL,0b11001000) #Low pass filter enabled, BW9, composite filter
writeByte(LSM6DSL_ADDRESS,LSM6DSL_CTRL3_C,0b01000100) #Enable Block Data update, increment during multi byte read
#initialise the gyroscope
writeByte(LSM6DSL_ADDRESS,LSM6DSL_CTRL2_G,0b10011100) #ODR 3.3 kHz, 2000 dps
#initialise the magnetometer
writeByte(LIS3MDL_ADDRESS,LIS3MDL_CTRL_REG1, 0b11011100) # Temp sesnor enabled, High performance, ODR 80 Hz, FAST ODR disabled and Selft test disabled.
writeByte(LIS3MDL_ADDRESS,LIS3MDL_CTRL_REG2, 0b00100000) # +/- 8 gauss
writeByte(LIS3MDL_ADDRESS,LIS3MDL_CTRL_REG3, 0b00000000) # Continuous-conversion mode
|
[
"jonryangoh@gmail.com"
] |
jonryangoh@gmail.com
|
e50a825fdb662cf8e4ddd282dfbd05d6ad92f388
|
3d00db49663c4694c2a2d83ccb279d16c4f43389
|
/iaas.py
|
7afb0581cdf2e7ec5bbff0ef63e55d2c282ea7e5
|
[] |
no_license
|
harshita978/cloud
|
16cee4cad4073e087bf3526261d6f09c67354e31
|
710872fd0e8e57ea50b01171d9f5b81e8d43cfad
|
refs/heads/master
| 2021-01-20T11:18:16.923515
| 2017-08-24T15:05:20
| 2017-08-24T15:05:20
| 83,948,327
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,010
|
py
|
#!/usr/bin/python2
import commands
import cgi,cgitb
print "Content-type : text/html"
print ""
cgitb.enable()
x=cgi.FieldStorage()
user=x.getvalue('usr')
password=x.getvalue('passwd')
osname=x.getvalue('name')
osram=x.getvalue('ram')
oscpu=x.getvalue('cpu')
por=x.getvalue('port')
#hd=x.getvalue('hd')
#for radio button
n=x.getvalue('radio')
os=x.getvalue('y')
commands.getoutput("systemctl restart httpd")
commands.getoutput("setenforce 0")
commands.getoutput("itables -F")
a=commands.getoutput("cat /var/www/html/users.txt | grep "+user+ " | awk '{print$1}'")
b=commands.getoutput("cat /var/www/html/users.txt | grep "+password+ " | awk '{print$7}'")
#LINUX os--------------------------------------------------------------------
if (os=="1" ) and (a !="") and (b !=""):
#liveboot------------------------------------------------------------
if (n=="1"):
commands.getoutput("sudo qemu-img create -f qcow2 -b /var/lib/libvirt/images/rhel7.1.qcow2 /var/lib/libvirt/images/"+osname+".qcow2")
commands.getoutput("sudo virt-install --name "+osname+" --ram "+osram+" --vcpu "+oscpu+" --disk=/var/lib/libvirt/images/"+osname+".qcow2 --import --noautoconsole --graphics=vnc,listen=0.0.0.0,port="+por+",password="+password)
f1=open('/var/www/html/clienttar/iaasclient.py','w+')
f1.write("#!/usr/bin/python2 \nimport os\nos.system('vncviewer 192.168.43.98:"+por+"')")
f1.close()
commands.getoutput('sudo chmod 777 /var/www/html/clienttar/iaasclient.py')
commands.getoutput("sudo tar -cvf /var/www/html/clienttar/"+user+"_iaas.tar /var/www/html/clienttar/iaasclient.py")
#commands.getoutput("sudo cd /var/www/html/websockify-master")
commands.getoutput("sudo /var/www/html/websockify-master/./run -D 7000 192.168.43.98:"+por)
print "<html>"
print "<p><a href='http://192.168.43.98/vnc/vnc.html'>Start</a>OS on browser</p>"
print "<p>><a href='http://192.168.43.98/clienttar/"+user+"_iaas.tar' download>Downlode</a> tar for VNC connectivity</p>"
print "<p>Go to your vnc viewer and connect using this ip & potrt[192.168.43.98:"+por+"]</p>"
print "</html>"
#Manually install----------------------------------------------------
elif (n=="2"):
#virt-install --name asd --ram 1024 --vcpu 1 --cdrom /root/Desktop/iso/rhel7.iso --disk path=/var/lib/libvirt/images/asd.qcow2,size=9 --noautoconsole --graphics=vnc,listen=0.0.0.0,port=5909,password=1325
commands.getoutput("sudo virt-install --name "+osname+" --ram "+osram+" --vcpu "+oscpu+" --cdrom /root/Desktop/iso/rhel7.iso --disk path=/var/lib/libvirt/images/"+osname+".qcow2,size=9 --noautoconsole --graphics=vnc,listen=0.0.0.0,port="+por+",password="+password )
f1=open('/var/www/html/clienttar/iaasclient.py','w+')
f1.write("#!/usr/bin/python2 \nimport os\nos.system('vncviewer 192.168.43.98:"+por+"')")
f1.close()
commands.getoutput('sudo chmod 777 /var/www/html/clienttar/iaasclient.py')
commands.getoutput("sudo tar -cvf /var/www/html/clienttar/"+user+"_iaas.tar /var/www/html/clienttar/iaasclient.py")
commands.getoutput("sudo /var/www/html/websockify-master/./run -D 7000 192.168.43.98:"+por)
print "<html>"
print "<p><a href='http://192.168.43.98/vnc/vnc.html'>Start</a>OS on browser</p>"
print "<p>><a href='http://192.168.43.98/clienttar/"+user+"_iaas.tar' download>Downlode</a> tar for VNC connectivity</p>"
print "<p>Go to your vnc viewer and connect using this ip & potrt[192.168.43.98:"+por+"]</p>"
print "</html>"
#snap---------------------------------------------------------------
elif (n=="3"):
commands.getoutput("sudo qemu-img create -f qcow2 -b /var/lib/libvirt/images/"+osname+".qcow2 /var/lib/libvirt/images/"+osname+"_snap.qcow2")
commands.getoutput("sudo virt-install --name "+osname+" --ram "+osram+" --vcpu "+oscpu+" --disk=/var/lib/libvirt/images/"+osname+"_snap.qcow2 --import --noautoconsole --graphics=vnc,listen=0.0.0.0,port="+por+",password="+password)
f1=open('/var/www/html/clienttar/iaasclient.py','w+')
f1.write("#!/usr/bin/python2 \nimport os\nos.system('vncviewer 192.168.43.98:"+por+"')")
f1.close()
commands.getoutput('sudo chmod 777 /var/www/html/clienttar/iaasclient.py')
commands.getoutput("sudo tar -cvf /var/www/html/clienttar/"+user+"_iaassnap.tar /var/www/html/clienttar/iaasclient.py")
#commands.getoutput("sudo cd /var/www/html/websockify-master")
commands.getoutput("sudo /var/www/html/websockify-master/./run -D 7000 192.168.43.98:"+por)
print "<html>"
print "<p><a href='http://192.168.43.98/vnc/vnc.html'>Start</a>OS on browser</p>"
print "<p>><a href='http://192.168.43.98/clienttar/"+user+"_iaassnap.tar' download>Downlode</a> tar for VNC connectivity</p>"
print "<p>Go to your vnc viewer and connect using this ip & potrt[192.168.43.98:"+por+"]</p>"
print "</html>"
#Windows os------------------------------------------------------------------
elif (os=="1" ) and (a !="") and (b !=""):
pass;
else :
print "<html>"
print "Wrong user name or password"
print "</html>"
|
[
"noreply@github.com"
] |
harshita978.noreply@github.com
|
65c743b47d21b4a5f4bc9d61deec30998b053b8f
|
349dd60aea666ef28e54bab5becc9df5dd55b090
|
/venv/Scripts/pip-script.py
|
0660ac3e8a2c89f82bf43bc472935d95540cf396
|
[] |
no_license
|
Firesean/DotGame
|
22cd44caa72d9150d3f7667b2bf9b9352277f548
|
02763a79de7d32a7dd0f7c1fba43d95ba1595a52
|
refs/heads/master
| 2023-05-26T11:40:10.217601
| 2023-05-12T19:51:13
| 2023-05-12T19:51:13
| 209,174,090
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 411
|
py
|
#!C:\Users\Owner\PycharmProjects\DotGame\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
|
[
"46581862+Firesean@users.noreply.github.com"
] |
46581862+Firesean@users.noreply.github.com
|
a7b37f0a32f43b1387f9bb0b53aeb674392056fe
|
729ce64f0270821b12ce8f3cdda25dcd16c7afde
|
/flask_demo/association_analysis/alarm_association.py
|
b0dd6b05670331547e90778501a0182ca057a74f
|
[] |
no_license
|
XLab-Tongji/Correlation_Analysis
|
2ebb1e049cc8043d46fb1bbc37ef8349d9f1fda6
|
c7b7b3118bbd2a53ac835c4457d43f5e58a009e4
|
refs/heads/master
| 2020-06-30T10:15:23.600334
| 2019-08-05T08:56:24
| 2019-08-05T08:56:24
| 200,798,908
| 0
| 0
| null | 2019-08-06T07:23:40
| 2019-08-06T07:23:39
| null |
UTF-8
|
Python
| false
| false
| 4,831
|
py
|
#-*- coding: UTF-8 -*-
from __future__ import division
import random
import math
from operator import itemgetter
def mixdata(alarmtime, timeseries_set, timeseries):
"""
:param alarmtime: 报警的时刻序列,已经排好顺序
:param timeseries_set: 每个报警时刻的时间序列构成的时序集
:param timeseries: 报警时刻整体区间内的时序数据
:return:mixset是混合集,alarm_number报警样本个数,random_number随机样本个数
"""
mixset = []
alarm_number = 0
random_number = 0
randomnum = 20
for i in range(len(alarmtime)):
data = timeseries_set[i]
data.append('alarm')
if len(data) > 1 and data not in mixset:
mixset.append(data)
alarm_number += 1
while randomnum > 0:
end = random.randint(5, len(timeseries))
start = end - 5
data = timeseries[start:end]
data.append("random")
randomnum -= 1
if len(data) > 1 and data not in mixset:
mixset.append(data)
random_number += 1
print(mixset)
print(alarm_number)
print(random_number)
return mixset, alarm_number, random_number
def distance(data1,data2):
dis = 0
for i in range(0, len(data1)-1):
dis += (data1[i]-data2[i]) ** 2
dis = math.sqrt(dis)
return dis
def feature_screen(mixset, alarm_number, random_number):
"""
:param mixset: 报警序列与随机序列的混合集
:param alarm_number: 报警序列个数
:param random_number: 随机序列个数
:return: 监控项与报警是否相关
"""
if alarm_number == 0 or random_number == 0:
return False
sum_number = alarm_number + random_number
#均值
mean = (alarm_number/sum_number) ** 2 + (random_number/sum_number) ** 2
print("mean", mean)
#标准差
stdDev = (alarm_number/sum_number) * (random_number/sum_number) * (1 + 4 * (random_number/sum_number) * (alarm_number / sum_number))
print("stdDev", stdDev)
R = 10
trp = 0
for j in range(len(mixset)):
tempdic = {}
for k in range(len(mixset)):
if j == k:
continue
dis = distance(mixset[j], mixset[k])
tempdic.setdefault(k, dis)
#print(tempdic)
temp_list = sorted(tempdic.items(), key=itemgetter(1), reverse=False)[0:R]
for k in temp_list:
if mixset[j][-1] == mixset[k[0]][-1]:
trp += 1
trp = float(trp / (R*sum_number))
print("sum", sum_number)
print("trp", trp)
check = (abs(trp-mean) / stdDev) * math.sqrt(R*sum_number)
print("check", check)
print("---------------------------")
return trp
def get_GR(alarmseries,nomalseries):
'''
:param alarmseries: 单一报警的时间序列
:param nomalseries: 整体报警的时间序列
:return:
'''
cutnum = 10 # 切分份数
maxvalue = float("-inf")
minvalue = float("inf")
GR = 0
while None in alarmseries:
alarmseries.remove(None)
C1 = len(alarmseries)
if max(alarmseries) > maxvalue:
maxvalue = max(alarmseries)
if min(alarmseries) < minvalue:
minvalue = min(alarmseries)
while None in nomalseries:
nomalseries.remove(None)
C2 = len(nomalseries)
if max(nomalseries) > maxvalue:
maxvalue = max(nomalseries)
if min(nomalseries) < minvalue:
minvalue = min(nomalseries)
value_gap = (maxvalue-minvalue) / cutnum
print(C1)
print(C2)
if C1 == 0 or C2 == 0 or value_gap == 0:
return GR
HD = (C1 / (C1+C2)) * math.log((C1 / (C1+C2)), 2) + (C2 / (C1+C2)) * math.log((C2 / (C1+C2)), 2)
Neg = [0] * (cutnum+1)
Pos = [0] * (cutnum+1)
for value in alarmseries:
temp_count = int((value-minvalue) / value_gap) + 1
if temp_count > cutnum:
temp_count = cutnum
Neg[temp_count] += 1
for value in nomalseries:
temp_count = int((value-minvalue) / value_gap) + 1
if temp_count > cutnum:
temp_count = cutnum
Pos[temp_count] += 1
HDA = 0
HAD = 0
for j in range(1, cutnum + 1):
temp = 0
if Neg[j] != 0 and Pos[j] != 0:
HAD += ((Neg[j]+Pos[j]) / (C1+C2)) * math.log(((Neg[j]+Pos[j]) / (C1+C2)), 2)
temp = (Neg[j] / (Neg[j]+Pos[j])) * math.log((Neg[j] / (Neg[j]+Pos[j])), 2) + (Pos[j] / (Neg[j]+Pos[j])) * math.log((Pos[j] / (Neg[j]+Pos[j])), 2)
elif Neg[j] == 0 and Pos[j] != 0:
HAD += ((Neg[j]+Pos[j]) / (C1+C2)) * math.log(((Neg[j]+Pos[j]) / (C1 + C2)), 2)
elif Pos[j] == 0 and Neg[j] != 0:
HAD += ((Neg[j]+Pos[j]) / (C1+C2)) * math.log(((Neg[j]+Pos[j]) / (C1+C2)), 2)
HDA += ((Neg[j]+Pos[j]) / (C1+C2)) * temp
GR = (HD - HDA) / HAD
return GR
|
[
"547095144@qq.com"
] |
547095144@qq.com
|
f3b6fcf3ff2434a6798b06c31ddd4ea7418aaf06
|
6d065137cb8d0ec08fc0c4b41ffddc2a90e9a949
|
/apps/base_dato/migrations/0001_initial.py
|
41b2a422f0ce2693b943a452224545cbafc37040
|
[] |
no_license
|
Morales1408/Software-engineering
|
2aa6ddc2503aa1be8e7809cf732a9586d63def52
|
29206f9e3fef4c28a9eefd59839091f292c0e87d
|
refs/heads/master
| 2022-11-27T17:15:00.356726
| 2020-07-27T19:21:50
| 2020-07-27T19:21:50
| 282,942,176
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,853
|
py
|
# Generated by Django 3.0.7 on 2020-07-06 16:54
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='chofere',
fields=[
('nombre', models.CharField(default='Ninungo', max_length=30, primary_key=True, serialize=False, verbose_name='Nombre del chofer')),
('fecha_contrato', models.DateField(verbose_name='Fecha de contrato')),
('edad', models.IntegerField(verbose_name='Edad del conductor')),
],
),
migrations.CreateModel(
name='unidade',
fields=[
('numero', models.CharField(default='Unidad', max_length=30, primary_key=True, serialize=False, verbose_name='Número de la unidad')),
('fecha_adquisicion', models.DateField(verbose_name='Fecha de adquisición')),
('chofer', models.ManyToManyField(to='base_dato.chofere', verbose_name='Nombre del conductor designado')),
],
),
migrations.CreateModel(
name='viaje',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('duracion_viaje', models.IntegerField(verbose_name='Duración aproximada del viaje (hrs)')),
('fecha', models.DateField()),
('precio_viaje', models.FloatField(verbose_name='Precio del boleto')),
('asientos', models.IntegerField(verbose_name='Asientos disponibles')),
('hora', models.CharField(max_length=30, verbose_name='Hora a la que sale el viaje')),
('paradero', models.CharField(default='Paradero', max_length=30, verbose_name='Punto de partida')),
('destino', models.CharField(default='Destino', max_length=30, verbose_name='Destino final')),
('numero_unidad', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='base_dato.unidade', verbose_name='Número de la unidad')),
],
),
migrations.CreateModel(
name='usuario',
fields=[
('nombre', models.CharField(default=1, max_length=30, primary_key=True, serialize=False, verbose_name='Nombre del usuario')),
('fecha', models.DateField(verbose_name='Fecha en que viaja')),
('boletos', models.IntegerField(verbose_name='Boletos que compró')),
('hora', models.CharField(max_length=30, verbose_name='Hora a la que viaja')),
('unidad', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='base_dato.unidade', verbose_name='Unidad en la que viaja')),
],
),
]
|
[
"st1809116@upy.edu.mx"
] |
st1809116@upy.edu.mx
|
e954d47805f5d4d7ba09ea9172d9c32d2bb332c4
|
e30469369764d7f1a5bd8b40970c574a84b9b8fd
|
/theReal/theRealOTC/urls.py
|
52427268fda0470d10e8e59b65563c5e2b1728a3
|
[] |
no_license
|
seedatnabeel/OTC
|
f1ba57a714387667a723fa8709ed798ceb2d00a4
|
7fd7d938ded592472c05b51cfdedfcddf5512e93
|
refs/heads/master
| 2021-01-17T17:17:22.578481
| 2016-07-07T14:22:24
| 2016-07-07T14:22:24
| 61,906,244
| 0
| 0
| null | 2016-07-02T06:11:18
| 2016-06-24T19:00:02
|
Python
|
UTF-8
|
Python
| false
| false
| 318
|
py
|
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^symptoms/', include('symptoms.urls')),
url(r'^causes/', include('causes.urls')),
url(r'^meds/', include('meds.urls')),
url(r'^contact/', include('contact.urls')),
]
|
[
"seedatnabeel@gmail.com"
] |
seedatnabeel@gmail.com
|
f8441dfac7d30823607f006ff4e75a1d25cff2e6
|
d5ac351fd1e521fbd9bf5f0883a5a139d7f740e4
|
/forms.py
|
55f8af9c9259943890584f7d6557fd92dd2aed2f
|
[] |
no_license
|
Abhishekkumartrn/Kgp-networking-app-interface
|
05d1877c967c64f7e290933af277b926b8396b07
|
250168d64a1d3c251388af57c96f6131a9afcde3
|
refs/heads/master
| 2022-12-03T04:42:36.188637
| 2020-08-08T08:48:20
| 2020-08-08T08:48:20
| 286,686,263
| 0
| 0
| null | 2020-08-11T08:16:57
| 2020-08-11T08:16:57
| null |
UTF-8
|
Python
| false
| false
| 1,000
|
py
|
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField, BooleanField
from wtforms.validators import DataRequired, Length, Email, EqualTo
class RegForm(FlaskForm):
username = StringField('Username',
validators=[DataRequired(),Length(min=8, max=15)])
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password',
validators=[DataRequired()])
confirm_password = PasswordField('Confirm Password',
validators=[DataRequired(), EqualTo('password')])
submit = SubmitField('Sign Up')
class LoginForm(FlaskForm):
email = StringField('Email',
validators=[DataRequired(), Email()])
password = PasswordField('Password',
validators=[DataRequired()])
remember = BooleanField('Remember Me')
submit = SubmitField('Login')
|
[
"noreply@github.com"
] |
Abhishekkumartrn.noreply@github.com
|
c98b8344c6499cdf92eead15af7b0f8c6e83b2aa
|
db9ea672e8e21c45691904e3f883db87d4f702f9
|
/sorting/sort.py
|
d84c18610d615221cde64c5b8f35666b0b0231f7
|
[] |
no_license
|
manoj06/Miscellaneous
|
5f24837fdefdbef12ad1effef19de480219fbd01
|
b03f817d5eba67b8da2014bf8a9278b06cc98d7f
|
refs/heads/master
| 2021-05-10T09:33:26.960958
| 2018-01-25T15:14:08
| 2018-01-25T15:14:08
| 118,928,500
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 263
|
py
|
a=[1,5,7,8,9,2]
empty=[]
m=1
while True:
for i in range(len(a)):
count=0
for k in range(len(a)):
if a[i]<a[k]:
count=count+1
if count==len(a)-m:
if a[i] not in empty:
empty.append(a[i])
m=m+1
if len(empty)==len(a):
print empty
break
|
[
"noreply@github.com"
] |
manoj06.noreply@github.com
|
731ea584c6c77788eb745d2777f2ade7e83e3595
|
baf418eb9fc7756a6eea75fe0d8c9fa18a5c7fb0
|
/works/migrations/0001_initial.py
|
67cc0624bde8f5e4158955d253308050f7d19ddd
|
[] |
no_license
|
ansakoy/dsw
|
37c47dfd6f4010d7c4d25cab758ed1a09645ce91
|
c676a9920205b46259df593ceb62f0d0a8198041
|
refs/heads/master
| 2021-06-14T00:31:42.138547
| 2019-10-09T14:49:55
| 2019-10-09T14:49:55
| 181,659,065
| 0
| 0
| null | 2021-06-10T21:50:38
| 2019-04-16T09:34:18
|
HTML
|
UTF-8
|
Python
| false
| false
| 3,838
|
py
|
# Generated by Django 2.2 on 2019-04-19 11:31
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Genre',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name_ru', models.CharField(max_length=250, null=True)),
('name_hy', models.CharField(max_length=250, null=True)),
('name_en', models.CharField(max_length=250, null=True)),
],
),
migrations.CreateModel(
name='Opus',
fields=[
('opus_id', models.CharField(max_length=10, primary_key=True, serialize=False)),
('title_ru', models.CharField(max_length=500)),
('title_hy', models.CharField(max_length=500)),
('title_en', models.CharField(max_length=500)),
('comment_ru', models.CharField(max_length=1000)),
('comment_hy', models.CharField(max_length=1000)),
('comment_en', models.CharField(max_length=1000)),
('year', models.CharField(max_length=4, null=True)),
('genre', models.ForeignKey(null=True, on_delete=django.db.models.deletion.PROTECT, to='works.Genre', verbose_name='Жанр')),
],
),
migrations.CreateModel(
name='Performance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('performance_url', models.CharField(max_length=1000)),
('perform_date', models.DateField(null=True)),
('location', models.CharField(max_length=500, null=True)),
('opus', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='performances', to='works.Opus', verbose_name='Сочинение')),
],
),
migrations.CreateModel(
name='Person',
fields=[
('person_id', models.CharField(max_length=10, primary_key=True, serialize=False)),
('fname_ru', models.CharField(max_length=250)),
('lname_ru', models.CharField(max_length=250, null=True)),
('fname_hy', models.CharField(max_length=250, null=True)),
('lname_hy', models.CharField(max_length=250, null=True)),
('fname_en', models.CharField(max_length=250, null=True)),
('lname_en', models.CharField(max_length=250, null=True)),
],
),
migrations.CreateModel(
name='Performer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('role', models.CharField(max_length=250)),
('performance', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='performers', to='works.Performance', verbose_name='Исполнение')),
('performer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='works.Person', verbose_name='Исполнитель')),
],
),
migrations.AddField(
model_name='opus',
name='libretto_by',
field=models.ManyToManyField(related_name='libr_works', to='works.Person', verbose_name='Авторы либретто'),
),
migrations.AddField(
model_name='opus',
name='lyrics_by',
field=models.ManyToManyField(related_name='lyr_works', to='works.Person', verbose_name='Авторы текста'),
),
]
|
[
"ansakoy@gmail.com"
] |
ansakoy@gmail.com
|
a8ef2e8c10dd9e7ee0a932102a223060dfdcb6e8
|
6ec9f752e83928b7b906ba7179c6d706dfad5da1
|
/app/gestionPedidos/form.py
|
744ee780d07d15f036178f4a5f90e51bd24a5bd1
|
[] |
no_license
|
daniel-0s/django_pills_course
|
31f8a77f70227b88ceca7ca5c8d929f95207a4ef
|
587465187989600b252de0ac086d49429d8ef7a3
|
refs/heads/master
| 2022-10-06T12:03:02.268239
| 2020-06-09T22:27:55
| 2020-06-09T22:27:55
| 268,869,100
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 164
|
py
|
from django import forms
class FormularioContacto (forms.Form):
asunto = forms.CharField()
email = forms.EmailField()
mensaje = forms.CharField()
|
[
"daniel@MacBook-Pro.local"
] |
daniel@MacBook-Pro.local
|
b993149959091524ad909c1a0096aa98eb498b9e
|
1f351d7178b430a329bf52a3894738eff6db8e72
|
/producer.py
|
f641ad7d10c01f871c620f39fe4c5fa105772392
|
[
"MIT"
] |
permissive
|
ulfox/kafka-video-stream
|
2e681cede1616bb99840e7bdfe30562abbd45f4d
|
e60b79415517c6e8528f98bf31687bd721006780
|
refs/heads/master
| 2022-02-09T14:27:20.460847
| 2022-02-01T12:43:20
| 2022-02-01T12:43:20
| 219,063,355
| 7
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,165
|
py
|
from sys import argv, exit
from time import sleep
import cv2
from kafka import KafkaProducer
class kafkaVideoStreaming():
def __init__(self, bootstrap_servers, topic, videoFile, client_id, batch_size=65536, frq=0.001):
self.videoFile = videoFile
self.topicKey = str(videoFile)
self.topic = topic
self.batch_size = batch_size
self.client_id = client_id
self.bootstrap_servers = bootstrap_servers
self.frq = frq
def setProducer(self):
self.producer = KafkaProducer(
bootstrap_servers=self.bootstrap_servers,
api_version=(0,10,1),
client_id=self.client_id,
acks=1,
value_serializer=None,
key_serializer=str.encode,
batch_size=self.batch_size,
compression_type='gzip',
linger_ms=0,
buffer_memory=67108864,
max_request_size=1048576,
max_in_flight_requests_per_connection=1,
retries=1,
)
def reportCallback(self, record_metadata):
print("Topic Record Metadata: ", record_metadata.topic)
print("Parition Record Metadata: ", record_metadata.partition)
print("Offset Record Metatada: ", record_metadata.offset)
def errCallback(self, excp):
print('Errback', excp)
def publishFrames(self, payload):
self.producer.send(
topic=self.topic, key=self.topicKey, value=payload
).add_callback(
self.reportCallback
).add_errback(
self.errCallback
)
def run(self):
try:
print("Opening file %s" % self.videoFile)
__VIDEO_FILE = cv2.VideoCapture(self.videoFile)
except:
raise
self.setProducer()
print(
"Publishing: %{v}\n\
\tBatch Size: {b},\n\
\tSleep ({t}) \n\
\tTarget Topic: {t} \n\
\tHost: {h}".format(
v=self.topicKey,
b=self.batch_size,
t=self.topic,
h=self.bootstrap_servers
)
)
self.keep_processing = True
try:
while(__VIDEO_FILE.isOpened()) and self.keep_processing:
readStat, frame = __VIDEO_FILE.read()
if not readStat:
self.keep_processing = False
ret, buffer = cv2.imencode('.jpg', frame)
self.publishFrames(buffer.tostring())
sleep(self.frq)
if self.keep_processing:
print('Finished processing video %s' % self.topicKey)
else:
print("Error while reading %s" % self.topicKey)
__VIDEO_FILE.release()
except KeyboardInterrupt:
__VIDEO_FILE.release()
print("Keyboard interrupt was detected. Exiting...")
if __name__ == "__main__":
videoStream = kafkaVideoStreaming(
bootstrap_servers='localhost:9092',
topic='KafkaVideoStream',
videoFile=argv[1],
client_id='KafkaVideoStreamClient',
)
videoStream.run()
|
[
"christos"
] |
christos
|
f0da7aa51ef368c2762cf0033e027208273b4603
|
41188a72facc51c65d0d58efe127f5e8c8811f5e
|
/0046. Permutations/Solution.py
|
76886c436d83c51c92b29bc0f627d71268d88c1c
|
[
"MIT"
] |
permissive
|
furutuki/LeetCodeSolution
|
74ccebc8335125bbc4cbf1a76eb8d4281802f5b9
|
089d27af04bf81149251787409d1866c7c4390fb
|
refs/heads/master
| 2022-10-31T08:46:15.124759
| 2022-10-25T02:57:54
| 2022-10-25T02:57:54
| 168,449,346
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
from typing import List
class Solution:
def permute(self, nums: List[int]) -> List[List[int]]:
def dfs(num: List[int], cur_res: List[int]):
if not num:
ret.append(cur_res)
return
else:
for i in range(len(num)):
dfs(num[:i] + num[i + 1:], cur_res + [num[i]])
ret = []
dfs(nums, [])
return ret
|
[
"furutuki@foxmail.com"
] |
furutuki@foxmail.com
|
a51cafd8e5a3eaf78fe28d734db7d6fe74c76867
|
ea5403586b2b67cd84c927de649318693236251d
|
/Python/04ControlFlow/03DataTypes/Exercises/01String.py
|
ed3df598cc05f7d67136104d50542943582f227c
|
[] |
no_license
|
Mybro1968/AWS
|
ce71d175a13a85cc71ff5903fce4c2a1c73dc798
|
22362214897167649250dbed9b17065699795e64
|
refs/heads/master
| 2020-03-22T11:27:44.032406
| 2018-07-09T09:07:52
| 2018-07-09T09:07:52
| 139,972,100
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 169
|
py
|
####
#01String.py
# strings 3 words together
####
word1 = "Good"
word2 = "Day"
word3 = "Gary"
sentence = word1 +" " + word2 + " " + word3
print(sentence)
|
[
"noreply@github.com"
] |
Mybro1968.noreply@github.com
|
3e6a3ec1448066c03ebb5cb49c3430eec98d982a
|
c18f8172cdec913b3df3c241c343c1634ff1f26c
|
/Pluralsight/Intermediate/Unit_Testing_with_Python/1_Unit_Testing_Fundamentals/5_Poor_test/test_phonebook.py
|
2a023f48ae679afd622685897b5488c281b5344c
|
[] |
no_license
|
Gwinew/To-Lern-Python-Beginner
|
0b0acb370790b1e4f3832ff8472cebd009e57bae
|
7d7c01482419b4fb67bdcddd3e379cd4ef43ac2e
|
refs/heads/master
| 2021-07-02T06:52:35.347061
| 2020-08-31T17:28:31
| 2020-08-31T17:28:31
| 146,579,656
| 2
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,135
|
py
|
# Poor test case
"""Given a list of names and phone numbers.
Make a Phonebook
Determine if it is consistent:
- no number is a prefix of another
- e.g. Bob 91125426, Anna 97625992
- Emergency 911
- Bob and Emergency are inconsistent
"""
import unittest
from phonebook import PhoneBook
class PhoneBookTest(unittest.TestCase):
def setUp(self)-> None:
self.phonebook = PhoneBook()
def test_lookup_by_name(self): # Test Case name
self.phonebook.add("Bob", '12345') # Arrange
number = self.phonebook.lookup("Bob") # Act
self.assertEqual("12345", number) # Assert
def test_missing_name(self):
with self.assertRaises(KeyError):
self.phonebook.lookup("missing")
def test_empty_phonebook_is_consisten(self):
self.assertTrue(self.phonebook.is_consistent())
def test_is_consistent(self): # Test Case Name
self.phonebook.add("Bob", '12345') # Act
self.assertTrue(self.phonebook.is_consistent()) # Assert
self.phonebook.add("Anna", '012345') # Act
self.assertTrue(self.phonebook.is_consistent()) # Assert
self.phonebook.add("Sue", '12345') # identical to Bob # Act
self.assertFalse(self.phonebook.is_consistent()) # Assert
self.phonebook.add("Sue", '123') # prefix of Bob # Act
self.assertFalse(self.phonebook.is_consistent()) # Assert
# This list of tests is very poor.
# Many little tests is doing on one frame of test.
# The better idea is creating different test for situation:
# - Lookup by name
# - Missing name
# - Consistent when empty
# - Consistent when all different
# - Inconsistent when duplicates
# - Inconsistent when duplicates prefix
# The Three Parts of a Test:
# - Arrange: Set up the object to be tested, and collaborators.
# - Act: Exercise the unit under test
# - Assert: Make claims aboit what happend
|
[
"maksymilian.wesolowski@gmail.com"
] |
maksymilian.wesolowski@gmail.com
|
d660489e8f9aaa85168fe6230a528f5fa782d489
|
70fe5d0580424a2362feaec811cc58cf3abde1ca
|
/code/clean_data.py
|
08c4f4d791ae552232ecf466d14612fa6be37642
|
[] |
no_license
|
zhang-yw/avn
|
7d426d325f1030225faf50bda62461428da52166
|
fd0bf6fe2144c02f81322be0f337ca8edfec5138
|
refs/heads/master
| 2022-12-05T14:22:49.741324
| 2020-09-01T14:24:45
| 2020-09-01T14:24:45
| 284,073,064
| 4
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,061
|
py
|
import h5py
import numpy as np
import networkx as nx
def get_pres(locs, rot, p_locs, p_pres):
for i in range(len(p_locs)):
if abs(p_locs[i][0]-locs[0]) < 0.25 and abs(p_locs[i][1]-locs[1]) < 0.25 and abs(p_locs[i][2]*90-rot) < 0.25:
return p_pres[i]
print("Not find")
print(locs)
print(rot)
exit(0)
f = h5py.File("/Users/yw-zhang/Desktop/cvpr_code_for227/data/FloorPlan227.h5", "r")
f_2 = h5py.File("/Users/yw-zhang/Desktop/cvpr_code_for227/data/FloorPlan227_new.h5", "w")
f_3 = h5py.File("/Users/yw-zhang/Desktop/cvpr_code_for227/source_predict_227.h5", "r")
graph = []
location = []
observation = []
resnet_feature = []
rotation = []
fx = -1*np.ones(520, dtype=np.int8)
count = 0
for i in range(len(list(f['rotation']))):
if f['rotation'][i] != -1:
fx[i] = count
count += 1
graph.append(f['graph'][i])
location.append(f['location'][i])
observation.append(f['observation'][i])
resnet_feature.append(f['resnet_feature'][i])
rotation.append(f['rotation'][i])
for i in range(len(graph)):
for j in range(4):
if graph[i][j] != -1:
graph[i][j] = fx[graph[i][j]]
print(np.asarray(graph).shape)
f_2.create_dataset("graph", data=np.array(graph))
f_2.create_dataset("location", data=np.array(location))
f_2.create_dataset("observation", data=np.array(observation))
f_2.create_dataset("resnet_feature", data=np.array(resnet_feature))
f_2.create_dataset("rotation", data=np.array(rotation))
g = nx.Graph()
path = -1*np.ones((304,304))
for i in range(304):
g.add_node(i)
for i in range(304):
for j in range(4):
if(graph[i][j] != -1):
g.add_edge(i, graph[i][j])
paths = nx.shortest_path(g)
print(g.nodes())
for i in paths.keys():
for j in paths[i].keys():
path[i][j] = len(paths[i][j])
f_2.create_dataset("shortest_path_distance", data=np.array(path))
predict = []
p_locs = np.array(f_3['locs'])
p_pres = np.array(f_3['predicts'])
for i in range(304):
pres = get_pres(location[i], rotation[i], p_locs, p_pres)
predict.append(pres)
f_2.create_dataset("predict_source", data=np.array(predict))
f.close()
f_2.close()
f_3.close()
|
[
"yw-zhang16@mails.tsinghua.edu.cn"
] |
yw-zhang16@mails.tsinghua.edu.cn
|
487914b02da780d1d7776bd247bf83f3fdb7aaad
|
167f475843d1fbd002636000173d81c97a1b2998
|
/Pigwarts/Year3_Getting_Fancy/Chapter4_Lessons/loop5.py
|
344c292daa4c7a0dd480492cd3d45e07cec54b90
|
[] |
no_license
|
edwinevans/HarryPotterTerminalQuest
|
35f188ad69d26ef7ae051725568811210a6a741c
|
ffa9767666148b1668511459b59a6e261e61e5fa
|
refs/heads/master
| 2022-05-12T11:38:14.988095
| 2022-03-19T01:24:14
| 2022-03-19T01:24:14
| 71,944,091
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 313
|
py
|
#
# Keep printing random numbers until you find one less than 0.1
# Then print "Eurika!" and exit
#
# You can review earlier examples but try to create the whole program yourself without
# any hints
#
import random
while True:
number = random.random()
print number
if number < 0.1:
print "Eurika!"
break
|
[
"miaevans@edwin-evanss-macbook-pro.local"
] |
miaevans@edwin-evanss-macbook-pro.local
|
7f333078d9807b56b618427a57b317b03cb1a02b
|
fe93c6797b942510cd511aacdb9986afa4106999
|
/dl24/log.py
|
4a833f47e0ae009b6205d58f4e1802bb0930e603
|
[] |
no_license
|
szgut/queueing
|
0890875cfd20f028ba2ca254e16cf54a9585ec76
|
452c9dafcfad69766bc9f0051479ecc5ccccabd6
|
refs/heads/master
| 2020-06-09T05:00:54.886558
| 2015-04-06T14:43:21
| 2015-04-06T14:43:21
| 31,563,843
| 1
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 682
|
py
|
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
MAGENTA = '\033[35m'
CYAN = '\033[36m'
WHITE = '\033[37m'
RESET = '\033[0;0m'
BOLD = '\033[1m'
REVERSE = '\033[2m'
BLACKBG = '\033[40m'
REDBG = '\033[41m'
GREENBG = '\033[42m'
YELLOWBG= '\033[43m'
BLUEBG = '\033[44m'
MAGENTABG='\033[45m'
CYANBG = '\033[46m'
WHITEBG = '\033[47m'
def color(name, string):
return "%s%s%s" % (name, string, RESET)
def log(obj):
print obj
def warn(string):
log(color(BOLD+REDBG, string))
def bad(string):
log(color(RED, string))
def good(string):
log(color(GREEN, string))
def info(string):
log(color(YELLOW, string))
|
[
"pog992@gmail.com"
] |
pog992@gmail.com
|
fb4a70ccbc105159901a8fc6d5fac8efdb7b4dbd
|
dec884c0f55b830571357e1a4563f93a11adf715
|
/medusa/monitoring/dogstatsd.py
|
a4d9af0c3743bdb2eb00c6f73766648aa45e33bf
|
[
"Apache-2.0"
] |
permissive
|
thelastpickle/cassandra-medusa
|
1a08a2bb13cf36fdde344364ff4757caf6a4119e
|
aa41d5d05c93fa521c85b10b0f65e09041d7b742
|
refs/heads/master
| 2023-09-01T14:08:55.200660
| 2023-09-01T09:33:11
| 2023-09-01T09:33:11
| 219,795,871
| 246
| 130
|
Apache-2.0
| 2023-09-14T09:06:35
| 2019-11-05T16:48:44
|
Python
|
UTF-8
|
Python
| false
| false
| 1,646
|
py
|
# -*- coding: utf-8 -*-
# Copyright 2021-present Shopify. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import medusa.utils
from datadog.dogstatsd import DogStatsd
from medusa.monitoring.abstract import AbstractMonitoring
class DogStatsdMonitoring(AbstractMonitoring):
def __init__(self, config):
super().__init__(config)
self.client = DogStatsd()
def send(self, tags, value):
if len(tags) != 3:
raise AssertionError("Datadog monitoring implementation needs 3 tags: 'name', 'what' and 'backup_name'")
name, what, backup_name = tags
metric = '{name}.{what}'.format(name=name, what=what)
backup_name_tag = 'backup_name:{}'.format(backup_name)
# The backup_name would be a rather high cardinality metrics series if backups are at all frequent.
# This could be a expensive metric so backup_name is droppped from the tags sent by default
if medusa.utils.evaluate_boolean(self.config.send_backup_name_tag):
self.client.gauge(metric, value, tags=[backup_name_tag])
else:
self.client.gauge(metric, value)
|
[
"alex.dejanovski@datastax.com"
] |
alex.dejanovski@datastax.com
|
9f1faec8e0731fbad823f5000c61ae7553ec1af1
|
9083d620ec89d3c85f4270fd724010c20799368e
|
/app/admin.py
|
a6361094fdf44cebc131a84ddfb668ce2f22b52a
|
[] |
no_license
|
riyadhswe/CovidHelp
|
e122aa1fefacb985c862e758a3021af4af08712e
|
5e004739ec3facebbccdf0e9e46f96d3c01b2bb6
|
refs/heads/master
| 2023-08-14T04:14:37.458150
| 2021-10-10T05:02:23
| 2021-10-10T05:02:23
| 370,762,838
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 203
|
py
|
from django.contrib import admin
from app.models import *
# Register your models here.
admin.site.register(Division)
admin.site.register(City)
admin.site.register(Hospital)
admin.site.register(Service)
|
[
"riyadhswe@gmail.com"
] |
riyadhswe@gmail.com
|
8555fadef2daaa5488669cc0abbc52955524a398
|
8962da83c3397557ab62637b0ece739a3b5d4534
|
/w3resource/q8.py
|
055fa66595a42f1e9c1aedc76cecf9dd0f5a9114
|
[] |
no_license
|
kylechenoO/812pytest
|
a2a74c2b5fa0c92212c4b398bea7a663a36fced8
|
bb3892c046dfa55db1a46016959d99835da53c78
|
refs/heads/main
| 2023-01-24T13:05:55.331383
| 2020-12-09T13:57:06
| 2020-12-09T13:57:06
| 315,020,418
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 363
|
py
|
#!/opt/anaconda/bin/python
## Question 8: Write a Python program to display the first and last colors from the following list.
## import build-in pkgs
import re
## main run part
inputStr = input('Please input a list split with blank: ')
inputList = re.split(' ', inputStr)
print('1st item: {}'.format(inputList[0]))
print('last item: {}'.format(inputList[-1]))
|
[
"kyle@hacking-linux.com"
] |
kyle@hacking-linux.com
|
6fda27d6057b3948676d60f07f52a37bf01d75ff
|
69168aa705bf3eef9163f485ff7d3b651de33190
|
/python/convert_csv_gdf.py
|
ea7f112f44a510ce80df77d0c7b489cf490ad864
|
[] |
no_license
|
mrhysjones/rwc-gephi
|
6aa03cda2386431e4ef2f7e49cbfd04b89273878
|
ee0f6e16ee28c86e4466ac10e99160ebf42dddae
|
refs/heads/master
| 2021-01-10T06:48:40.663708
| 2016-02-17T15:15:54
| 2016-02-17T15:15:54
| 51,853,746
| 4
| 2
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,836
|
py
|
import csv
# Take a CSV file in the form 'TeamA, TeamB, ScoreA, ScoreB, TriesA, TriesB' and
# create a GDF representation for use with Gephi
def convert_csv_gdf(csv_filename, gdf_filename='output.gdf'):
# Put result data from CSV files into 2D arrays for processing
fixtures = []
scores = []
tries = []
with open(csv_filename) as csv_file:
reader = csv.DictReader(csv_file)
for row in reader:
fixtures.append([row['TeamA'], row['TeamB']])
scores.append([row['ScoreA'], row['ScoreB']])
tries.append([row['TriesA'], row['TriesB']])
# Construct node section of GDF file
nodes = get_nodes(fixtures, scores, tries)
node_info = 'nodedef>name VARCHAR,label VARCHAR,totalscore INTEGER,totaltries INTEGER\n'
for node in nodes:
node_info += node[0] + ',' + node[0] + ',' + str(node[1]) + ',' + str(node[2]) + '\n'
# Construct edge section of GDF file
edges = get_edges(fixtures)
edge_info = 'edgedef>node1 VARCHAR,node2 VARCHAR,directed BOOLEAN,weight DOUBLE\n'
for edge in edges:
edge_info += edge[0] + ',' + edge[1] + ',' + 'false' + ',' + str(edge[2]) + '\n'
gdf_file = open(gdf_filename, 'w')
gdf_file.write(node_info)
gdf_file.write(edge_info)
gdf_file.close()
# Get all information required for nodes (teams) including total tries and scores
def get_nodes(fixtures, scores, tries):
teams = set()
nodes = []
# Obtain set of teams
for team in fixtures:
teams.add(team[0])
teams.add(team[1])
# Loop through set, and calculate totals then add to node list
for team in teams:
total_score = 0;
total_tries = 0;
for idx, item in enumerate(fixtures):
# Check if team is LHS of fixture
if team == item[0]:
total_score += int(scores[idx][0])
total_tries += int(tries[idx][0])
# Check if team is RHS of fixture
elif team == item[1]:
total_score += int(scores[idx][1])
total_tries += int(tries[idx][1])
else:
continue
# Add team, total score, and total tries to node array
nodes.append([team, total_score, total_tries])
return nodes
# Get all unique fixtures as edges and calculate weight based on number of fixtures between teams
def get_edges(fixtures):
unique_fixtures = set()
edges = []
# Join fixture elements i.e. teams to 1 string to make use of set
for fixture in fixtures:
unique_fixtures.add(' '.join(fixture))
# Count occurences of unique fixtures from fixture list
for unique in unique_fixtures:
weight = 0
for fixture in fixtures:
if unique == ' '.join(fixture):
weight += 1
# Re-split the fixture elements and add them and the corresponding weight to the edges array
edge_nodes = unique.split(' ')
edges.append([edge_nodes[0], edge_nodes[1], weight])
return edges
# Example usage - assuming example file 'test.csv' generated by get_rugby_data.py
# convert_csv_gdf('test.csv', 'test.gdf')
|
[
"contact@matt-j.me"
] |
contact@matt-j.me
|
f97209b91defd35c3e6f48a28ca9922c38530bc2
|
56a24c62ee11a7ea3124619625cb9782fc33c74b
|
/Softmax_Boltzmann.py
|
3967fffb533a755dc0355ee668d1f7003e3e4a02
|
[] |
no_license
|
MezniMouafek1/Reinforcement-Learning
|
03eff0f25b26cf7445b9eb65e8ee7b470c9fb795
|
31649e49f53d33d0bf842b1ecb1b680f2bf0b310
|
refs/heads/master
| 2020-12-04T07:09:48.164947
| 2020-01-03T21:58:37
| 2020-01-03T21:58:37
| 231,671,612
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,625
|
py
|
# -*- coding: utf-8 -*-
#https://github.com/JKCooper2/gym-bandits.git
#cd
#pip install -e .
import numpy as np
import gym_bandits
import gym
from gym import wrappers
env = gym.make("BanditTenArmedGaussian-v0")
env = wrappers.Monitor(env,'C:\DATASCIENCES')
env.action_space
#initialisez les variables:
# nombre de tours (itérations)
num_rounds = 20000
# Nombre de fois qu'un bras a été tiré
count = np.zeros(10)
# Somme des récompenses de chaque bras
sum_rewards = np.zeros(10)
# Q valeur qui est la récompense moyenne
Q = np.zeros(10)
#Maintenant nous définissons la fonction softmax_Boltzmann :
import math
import random
def softmax_Boltzmann(to):
total = sum([math.exp(val/to) for val in Q])
probs = [math.exp(val/to)/total for val in Q]
#probabilité de la distribution de Boltzmann
threshold = random.random()
cumulative_prob = 0.0
for i in range(len(probs)):
cumulative_prob += probs[i]
if (cumulative_prob > threshold):
return i
return np.argmax(probs)
for i in range(num_rounds):
# Sélectionnez le bras avec softmax_Boltzmann
arm = softmax_Boltzmann(0.5)
# Obtenez la récompense
env.reset()
observation, reward, done, info = env.step(arm)
# mettre à jour le compte de ce bras
count[arm] += 1
# Somme les récompenses obtenues du bras
sum_rewards[arm]+=reward
# calcule la valeur Q qui correspond aux récompenses moyennes du bras
Q[arm] = sum_rewards[arm]/count[arm]
print( 'Le bras optimal est {}'.format(np.argmax(Q)))
env.close
|
[
"mouafek.mezni@esprit.tn"
] |
mouafek.mezni@esprit.tn
|
5f446b40e7dde06790d494ceb680e3446c01f024
|
72d7ce952b86c6799a621107b21df0958222e510
|
/icp.py
|
0e1db83e692d4fea315d064aeec12a341e8aa4fd
|
[
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
Ericlong423/AlignNet-3D
|
ea3b6a4e971487de362adfbbf241294bfb36c0f5
|
eb34e03a38012f06f86bc2847ae4093b14355e04
|
refs/heads/master
| 2022-11-08T07:53:43.228489
| 2020-06-25T09:41:34
| 2020-06-25T09:41:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 10,544
|
py
|
import json
import logging
import os
import sys
import time
# from py_goicp import GoICP, POINT3D, ROTNODE, TRANSNODE
from contextlib import contextmanager
import numpy as np
from scipy.spatial.transform import Rotation
from tqdm import tqdm
import evaluation
import open3d as o3
import provider
from pointcloud import ICP, get_mat_angle
logger = logging.getLogger('tp')
# https://stackoverflow.com/questions/5081657/how-do-i-prevent-a-c-shared-library-to-print-on-stdout-in-python
@contextmanager
def stdout_redirected(to=os.devnull):
fd = sys.stdout.fileno()
def _redirect_stdout(to):
sys.stdout.close() # + implicit flush()
os.dup2(to.fileno(), fd) # fd writes to 'to' file
sys.stdout = os.fdopen(fd, 'w') # Python writes to fd
with os.fdopen(os.dup(fd), 'w') as old_stdout:
with open(to, 'w') as file:
_redirect_stdout(to=file)
try:
yield # allow code to be run with the redirected stdout
finally:
_redirect_stdout(to=old_stdout) # restore stdout.
# buffering and flags such as
# CLOEXEC may be different
def load_pountclouds(file_idx, cfg, return_numpy=False):
ps1 = np.load(f'{cfg.data.basepath}/pointcloud1/{str(file_idx).zfill(8)}.npy')[:, :3]
ps2 = np.load(f'{cfg.data.basepath}/pointcloud2/{str(file_idx).zfill(8)}.npy')[:, :3]
pc1_centroid = ps1.mean(axis=0)
if return_numpy:
return ps1, ps2, pc1_centroid
pc1 = o3.geometry.PointCloud()
pc1.points = o3.Vector3dVector(ps1)
pc2 = o3.geometry.PointCloud()
pc2.points = o3.Vector3dVector(ps2)
return pc1, pc2, pc1_centroid
def get_median_init(pc1, pc2):
approx_translation = np.median(np.asarray(pc2.points), axis=0) - np.median(np.asarray(pc1.points), axis=0)
init = np.eye(4)
init[:3, 3] = approx_translation
return init
def get_centroid_init(pc1, pc2):
approx_translation = np.mean(np.asarray(pc2.points), axis=0) - np.mean(np.asarray(pc1.points), axis=0)
init = np.eye(4)
init[:3, 3] = approx_translation
return init
def icp_p2point(file_idx, cfg, radius=0.2, its=30, init=None, with_constraint=None):
with_constraint = with_constraint if with_constraint is not None else cfg.evaluation.special.icp.with_constraint
pc1, pc2, pc1_centroid = load_pountclouds(file_idx, cfg)
if init is None:
# init = get_median_init(pc1, pc2)
init = get_centroid_init(pc1, pc2)
start = time.time()
reg_p2p = o3.registration_icp(pc1, pc2, radius, init, o3.TransformationEstimationPointToPoint(with_constraint=with_constraint, with_scaling=False), o3.registration.ICPConvergenceCriteria(max_iteration=its)) # Default: 30
time_elapsed = time.time() - start
return reg_p2p.transformation, pc1_centroid, time_elapsed
def icp_p2plane(file_idx, cfg):
assert False
def icp_o3_gicp(file_idx, cfg, refine=None, refine_radius=0.05, precomputed_results=None):
pc1, pc2, pc1_centroid = load_pountclouds(file_idx, cfg)
voxel_size = 0.05
start = time.time()
if precomputed_results is None:
distance_threshold = voxel_size * 1.5
source_down, target_down, source_fpfh, target_fpfh = ICP._icp_global_prepare_dataset(pc1, pc2, voxel_size)
reg_res = o3.registration_ransac_based_on_feature_matching(
source_down,
target_down,
source_fpfh,
target_fpfh,
distance_threshold,
o3.TransformationEstimationPointToPoint(with_constraint=cfg.evaluation.special.icp.with_constraint, with_scaling=False),
4, # scaling=False
[o3.CorrespondenceCheckerBasedOnEdgeLength(0.9), o3.CorrespondenceCheckerBasedOnDistance(distance_threshold)],
o3.RANSACConvergenceCriteria(4000000, 500))
transformation = reg_res.transformation
else:
precomp_pred_translation, precomp_pred_angle, precomp_pred_center = precomputed_results
transformation = get_mat_angle(precomp_pred_translation, precomp_pred_angle, precomp_pred_center)
if refine is None:
time_elapsed = time.time() - start
return transformation, pc1_centroid, time_elapsed
else:
if refine == 'p2p':
reg_p2p = o3.registration_icp(pc1, pc2, refine_radius, transformation, o3.TransformationEstimationPointToPoint(with_constraint=cfg.evaluation.special.icp.with_constraint, with_scaling=False))
# if file_idx == 8019:
# print('->', reg_p2p.transformation)
time_elapsed = time.time() - start
return reg_p2p.transformation, pc1_centroid, time_elapsed
else:
assert False
def icp_o3_gicp_fast(file_idx, cfg, refine=None, refine_radius=0.05, precomputed_results=None):
pc1, pc2, pc1_centroid = load_pountclouds(file_idx, cfg)
voxel_size = 0.05
distance_threshold = voxel_size * 0.5
start = time.time()
if precomputed_results is None:
source_down, target_down, source_fpfh, target_fpfh = ICP._icp_global_prepare_dataset(pc1, pc2, voxel_size)
reg_res = o3.registration_fast_based_on_feature_matching(source_down, target_down, source_fpfh, target_fpfh, o3.FastGlobalRegistrationOption(with_constraint=cfg.evaluation.special.icp.with_constraint, maximum_correspondence_distance=distance_threshold))
transformation = reg_res.transformation
else:
precomp_pred_translation, precomp_pred_angle, precomp_pred_center = precomputed_results
transformation = get_mat_angle(precomp_pred_translation, precomp_pred_angle, precomp_pred_center)
if refine is None:
time_elapsed = time.time() - start
return transformation, pc1_centroid, time_elapsed
else:
if refine == 'p2p':
reg_p2p = o3.registration_icp(pc1, pc2, refine_radius, transformation, o3.TransformationEstimationPointToPoint(with_constraint=cfg.evaluation.special.icp.with_constraint, with_scaling=False))
time_elapsed = time.time() - start
return reg_p2p.transformation, pc1_centroid, time_elapsed
else:
assert False
def icp_goicp(file_idx, cfg, refine=None, refine_radius=0.05):
assert False
def evaluate(cfg, use_old_results=False):
val_idxs = provider.getDataFiles(f'{cfg.data.basepath}/split/val.txt')
# val_idxs = val_idxs[:100]
epoch = 0
total_time = 0.
do_refinement = cfg.evaluation.special.icp.has('refine')
refinement_method = cfg.evaluation.special.icp.refine if do_refinement else None
if cfg.evaluation.special.icp.variant in ['o3_gicp', 'o3_gicp_fast'] and do_refinement:
gicp_result_dir = f'{cfg.logging.logdir[:-4]}/val/eval{str(epoch).zfill(6)}'
assert os.path.isdir(gicp_result_dir), gicp_result_dir
assert os.path.isfile(f'{gicp_result_dir}/eval_180.json'), f'{gicp_result_dir}/eval_180.json'
eval_dict = json.load(open(f'{gicp_result_dir}/eval_180.json', 'r'))
precomp_time = eval_dict['mean_time'] * float(len(val_idxs))
total_time += precomp_time
precomp_pred_translations = np.load(f'{gicp_result_dir}/pred_translations.npy')
precomp_pred_angles = np.load(f'{gicp_result_dir}/pred_angles.npy')
precomp_pred_centers = np.load(f'{gicp_result_dir}/pred_s1_pc1centers.npy')
print('Precomputed results loaded')
pcs1, pcs2, all_gt_translations, all_gt_angles, all_gt_pc1centers, all_gt_pc2centers, all_gt_pc1angles, all_gt_pc2angles = provider.load_batch(val_idxs, override_batch_size=len(val_idxs))
eval_dir = f'{cfg.logging.logdir}/val/eval{str(epoch).zfill(6)}'
if use_old_results and os.path.isfile(f'{eval_dir}/pred_translations.npy'):
all_pred_translations = np.load(f'{eval_dir}/pred_translations.npy')
all_pred_angles = np.load(f'{eval_dir}/pred_angles.npy')
all_pred_centers = np.load(f'{eval_dir}/pred_s1_pc1centers.npy')
else:
all_pred_translations = np.empty((len(val_idxs), 3), dtype=np.float32)
all_pred_angles = np.empty((len(val_idxs), 1), dtype=np.float32)
all_pred_centers = np.empty((len(val_idxs), 3), dtype=np.float32)
for idx, file_idx in enumerate(tqdm(val_idxs)):
if cfg.evaluation.special.icp.variant == 'p2point':
pred_transform, pred_center, time_elapsed = icp_p2point(file_idx, cfg, radius=0.10)
elif cfg.evaluation.special.icp.variant == 'p2plane':
pred_transform, pred_center, time_elapsed = icp_p2plane(file_idx, cfg)
elif cfg.evaluation.special.icp.variant == 'goicp':
pred_transform, pred_center, time_elapsed = icp_goicp(file_idx, cfg, refine=refinement_method, refine_radius=0.10)
elif cfg.evaluation.special.icp.variant == 'o3_gicp':
pred_transform, pred_center, time_elapsed = icp_o3_gicp(file_idx, cfg, refine=refinement_method, refine_radius=0.10, precomputed_results=(precomp_pred_translations[idx], precomp_pred_angles[idx], precomp_pred_centers[idx]) if do_refinement else None)
elif cfg.evaluation.special.icp.variant == 'o3_gicp_fast':
pred_transform, pred_center, time_elapsed = icp_o3_gicp_fast(file_idx, cfg, refine=refinement_method, refine_radius=0.10, precomputed_results=(precomp_pred_translations[idx], precomp_pred_angles[idx], precomp_pred_centers[idx]) if do_refinement else None)
else:
assert False
# all_pred_centers[idx] = pred_center
# Important! The output of the ICP functions is around the origin, not around the centroid as used internally
all_pred_centers[idx] = np.array([0., 0, 0])
all_pred_translations[idx] = pred_transform[:3, 3]
rotation_mat = pred_transform[:3, :3]
rot_vec = Rotation.from_dcm(rotation_mat).as_rotvec()
all_pred_angles[idx] = rot_vec[2]
total_time += time_elapsed
os.makedirs(eval_dir, exist_ok=True)
np.save(f'{eval_dir}/pred_translations.npy', all_pred_translations)
np.save(f'{eval_dir}/pred_angles.npy', all_pred_angles)
np.save(f'{eval_dir}/pred_s1_pc1centers.npy', all_pred_centers)
for accept_inverted_angle in [False, True]:
eval_dict = evaluation.evaluate(cfg, val_idxs, all_pred_translations, all_pred_angles, all_gt_translations, all_gt_angles, all_pred_centers, all_gt_pc1centers, eval_dir=eval_dir, accept_inverted_angle=accept_inverted_angle, mean_time=total_time / len(val_idxs))
logger.info(eval_dict)
|
[
"gross.jojo@googlemail.com"
] |
gross.jojo@googlemail.com
|
32d2b673b4421719313ac17c64560921dade7d60
|
2b8d4e22d10ca118fba0100cc87af04f3939448f
|
/ioud10/ioud_sale_order/__manifest__.py
|
22982bed7d88a58ac835d123e58c4e47090afaf9
|
[] |
no_license
|
ahmed-amine-ellouze/personal
|
f10c0a161da709f689a3254ec20486411102a92d
|
4fe19ca76523cf274a3a85c8bcad653100ff556f
|
refs/heads/master
| 2023-03-28T23:17:05.402578
| 2021-03-25T13:33:18
| 2021-03-25T13:33:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,047
|
py
|
# -*- coding: utf-8 -*-
{
'name': "ioud_sale_order",
'summary': """
This module is for customization of sale for iOud """,
'description': """
This module is for customization of sale for iOud
""",
'author': "SolutionFounder",
'website': "http://www.solutionfounder.com",
# for the full list
'category': 'sale',
'version': '10.4.18',
# any module necessary for this one to work correctly
'depends': ['base','sale','mail','ioud_email_alerts','account_reports','delivery'],
# always loaded
'data': [
'data/partner_sequnce.xml',
'security/user_groups.xml',
'security/ir.model.access.csv',
'views/branches.xml',
'views/account_invoice_view.xml',
'views/sale_order_view.xml',
'views/res_partner_view.xml',
'views/region_config_view.xml',
'views/config.xml',
'views/stcok.xml',
#Backend View Load - JS
'views/assets.xml'
],
# only loaded in demonstration mode
}
|
[
"hussnainsajid08@gmail.com"
] |
hussnainsajid08@gmail.com
|
379eed348ef285df31430cde41c3cad20e18dc15
|
c151f8b82401500127aa5f924472b9b93c33f12f
|
/checkout/migrations/0001_initial.py
|
9db939681b0c9ea9a7d6f63f56e48e7c4d1889d9
|
[] |
no_license
|
djacura/AGame
|
4f7f8984ada210272b6249f496aacbc6d01eca56
|
0e85461e04b702d1f2e8094d733af480951e69f7
|
refs/heads/master
| 2023-03-11T09:17:56.309282
| 2021-02-28T08:52:09
| 2021-02-28T08:52:09
| 327,087,869
| 0
| 1
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,345
|
py
|
# Generated by Django 3.1.5 on 2021-01-18 21:06
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('products', '0002_auto_20210114_1719'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('order_number', models.CharField(editable=False, max_length=32)),
('full_name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
('phone_number', models.CharField(max_length=20)),
('country', models.CharField(max_length=40)),
('postcode', models.CharField(blank=True, max_length=20, null=True)),
('town_or_city', models.CharField(max_length=40)),
('street_address1', models.CharField(max_length=80)),
('street_address2', models.CharField(blank=True, max_length=80, null=True)),
('county', models.CharField(blank=True, max_length=80, null=True)),
('date', models.DateTimeField(auto_now_add=True)),
('delivery_cost', models.DecimalField(decimal_places=2, default=0, max_digits=6)),
('order_total', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
('grand_total', models.DecimalField(decimal_places=2, default=0, max_digits=10)),
],
),
migrations.CreateModel(
name='OrderLineItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_size', models.CharField(blank=True, max_length=2, null=True)),
('quantity', models.IntegerField(default=0)),
('lineitem_total', models.DecimalField(decimal_places=2, editable=False, max_digits=6)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='lineitems', to='checkout.order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='products.product')),
],
),
]
|
[
"d.jacura@sky.com"
] |
d.jacura@sky.com
|
a003ec5234690f0465bc53f3b4e729bc1c4555a9
|
199c2bd148e2e97c5f8544864847591bb2402898
|
/lesson_3_templates/manage.py
|
eda6e8efa4532dbf011eaa1518c476e3693ad2c2
|
[] |
no_license
|
achuDk/django_lesson
|
443fc1f4af3f0ce1f0277cb2bde56774501fa60d
|
e5a8a5d70d5293b3a8db120052e7fed120bd7c80
|
refs/heads/master
| 2020-03-13T13:28:29.596443
| 2018-05-08T10:02:53
| 2018-05-08T10:02:53
| 131,139,354
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 550
|
py
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "lesson_3_templates.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
|
[
"achudk@qq.com"
] |
achudk@qq.com
|
375a86fb3e15986700a4f9d2efe2244a228d0657
|
49f80da499ab75acc243b50a1d4c1617cff04284
|
/CerealAnalysis/__main__.py
|
c5155afdeb06ca924496772f7ef10e4580732f49
|
[] |
no_license
|
damiengarrouste78/CerealAnalysis-repo
|
524f9e243cdc494283a0bfd92e147a96a20ea696
|
f5935f9a3fcf24eae95ef2c98153b538701ff9be
|
refs/heads/main
| 2023-05-06T07:54:10.900923
| 2021-05-21T14:52:01
| 2021-05-21T14:52:01
| 369,239,541
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 89
|
py
|
import sys
sys.path.append(".\\")
from app import run
if __name__ == '__main__':
run()
|
[
"noreply@github.com"
] |
damiengarrouste78.noreply@github.com
|
bd1e14773ad0fd6f809ba56d000a8bfabe1c6386
|
add971046c4bd766c380d90d39535080d102ec4d
|
/blog/urls.py
|
03f7a5fafcdbe2a31d1969b8fe1d5c58bea75ab2
|
[] |
no_license
|
auroralemieux/djangogirls-tut
|
a82170979f6c3d719ea4c7945edcfa533d69d65a
|
b0121f9acbabbd2bec5e5097e8c42cc81346fc84
|
refs/heads/master
| 2020-04-05T12:09:51.957158
| 2017-06-30T16:08:33
| 2017-06-30T16:08:33
| 95,243,533
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 364
|
py
|
from django.conf.urls import url
from . import views
urlpatterns = [
# this is setting the root route
url(r'^$', views.post_list, name='post_list'),
url(r'^post/(?P<pk>\d+)/$', views.post_detail, name='post_detail'),
url(r'^post/new/$', views.post_new, name="post_new"),
url(r'^post/(?P<pk>\d+)/edit/$', views.post_edit, name="post_edit"),
]
|
[
"aurora.lemieux@gmail.com"
] |
aurora.lemieux@gmail.com
|
193122adf0ef9170907c47e035ebe8434d378807
|
e3910a25ca4456a35112d41f184fe2a919214ac0
|
/reservation/migrations/0003_auto_20160310_2101.py
|
4bd2beed2f08e5987ae67f1bc5dbe13adea43864
|
[] |
no_license
|
RobertPastor/studio_reservation
|
a498f1ae2077bb21199651d245f22cb59ef13370
|
63a47de856cc1d5aedbd4024d8696b39470d11f2
|
refs/heads/master
| 2021-01-10T16:13:32.935529
| 2018-01-28T14:19:28
| 2018-01-28T14:19:28
| 54,514,678
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 658
|
py
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.3 on 2016-03-10 20:01
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('reservation', '0002_reservation_made_when'),
]
operations = [
migrations.AlterField(
model_name='reservation',
name='made_by',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='Guest',
),
]
|
[
"robert.pastor0691@orange.fr"
] |
robert.pastor0691@orange.fr
|
5986254371197dae06e34dd142354b05a0bbf455
|
4feb215ca051153d458a0f1c01e1fd50cce39fd9
|
/manage.py
|
cca3a6efc5378e30d6b361936919e5dd4c41d78d
|
[] |
no_license
|
visse0001/animal_shelter
|
747f205d5f16fe42ac5d1a77bd3eae9a19a49acb
|
e3c001e4d58c85405b34ca01353dbfa0bf6bae0c
|
refs/heads/master
| 2022-11-26T03:12:28.197951
| 2021-02-13T07:39:02
| 2021-02-13T07:39:02
| 248,967,900
| 0
| 0
| null | 2022-11-22T05:26:28
| 2020-03-21T12:06:41
|
Python
|
UTF-8
|
Python
| false
| false
| 634
|
py
|
#!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'animal_shelter.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
[
"san.kuczynska@gmail.com"
] |
san.kuczynska@gmail.com
|
7f721f804bb356a2863c1e1a5bde29a255ad08f7
|
d5280a5dd28a81b9e3a687b7c5312a0a4c36353a
|
/Checkouts/emptyAmountTestCase.py
|
b5275c580e396a7636acf2aa5089fbf5b3290d7e
|
[] |
no_license
|
amritregmi/python_automation
|
4eb394ecbc09517eeae3edcd7ab6b8d335a58fb0
|
c514641a69c83dd3691eed973facf6f938dccd06
|
refs/heads/main
| 2023-03-07T06:02:36.563055
| 2021-02-19T21:40:08
| 2021-02-19T21:40:08
| 313,269,526
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,328
|
py
|
import os, sys
from selenium.webdriver.support.ui import Select
sys.path.insert(0, os.path.abspath(".."))
from Base import loggedInBaseTestCase
class EmptyAmountTestCase(loggedInBaseTestCase.LoggedInBaseTestCase):
def test_emptyAmount(self):
self._caseId = 291
self._suiteId = 8
self._user = "rumbu"
self._password = "Test@123"
driver = self.driver
self.login()
self.assertEqual(driver.current_url, "http://54.186.24.234/pages/dashboard")
driver.find_element_by_xpath('//*[@id="top"]/div/div[2]/ul[1]/li[3]/a').click()
driver.find_element_by_xpath('//*[@id="top"]/div/div[2]/ul[1]/li[3]/ul/li[2]/a').click()
driver.find_element_by_id("CheckoutDescription").clear()
driver.find_element_by_id("CheckoutDescription").send_keys("Ramesh")
driver.find_element_by_id("CheckoutAmount").clear()
driver.find_element_by_id("CheckoutAmount").send_keys("")
checkout_standard_entry_class_id = Select(driver.find_element_by_id("CheckoutStandardEntryClassId"))
checkout_standard_entry_class_id.select_by_visible_text("WEB")
driver.find_element_by_xpath('//*[@id="CheckoutCreateForm"]/div[4]/div/div/input').click()
self.assertEqual(driver.current_url, "http://54.186.24.234/checkouts/create")
|
[
"amrit@amrit.com"
] |
amrit@amrit.com
|
e1e60256ed0e465a7c3a3fe862106351f0ce1953
|
e24a3601449f5aaf235cb2a7445146d622da1c87
|
/test_ws/build/catkin_generated/order_packages.py
|
59923b0fbbb6d0b6ddc0f6d2f1c4c843a69b8168
|
[] |
no_license
|
udooer-old/ROS
|
1edb919524535baf010f62b3dd6e499e859b8d70
|
a19833431919d5995fc67dfd4a288b25919f5bfe
|
refs/heads/master
| 2022-11-08T06:48:35.324381
| 2020-06-30T09:42:02
| 2020-06-30T09:42:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 425
|
py
|
# generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = '/home/yong/ROS/test_ws/src'
whitelisted_packages = ''.split(';') if '' != '' else []
blacklisted_packages = ''.split(';') if '' != '' else []
underlay_workspaces = '/home/yong/sensor_ws/devel;/home/yong/catkin_ws/devel;/opt/ros/melodic'.split(';') if '/home/yong/sensor_ws/devel;/home/yong/catkin_ws/devel;/opt/ros/melodic' != '' else []
|
[
"hunghsuyong114shane@gmail.com"
] |
hunghsuyong114shane@gmail.com
|
8b346eaacf62c7cde882fe6c60be97b4649c2519
|
1620e0af4a522db2bac16ef9c02ac5b5a4569d70
|
/Ekeopara_Praise/Phase 2/DICTIONARY/Day48 Tasks/Task2.py
|
81b89aa1ff7a364846dcb6ab7608ea8ed5a16508
|
[
"MIT"
] |
permissive
|
Ekeopara-Praise/python-challenge-solutions
|
cda07902c9ffc09ba770ae7776e5e01026406a05
|
068b67c05524b5c5a0d6084315eca3424c768421
|
refs/heads/master
| 2022-12-15T15:29:03.031583
| 2020-09-25T06:46:27
| 2020-09-25T06:46:27
| 263,758,530
| 2
| 0
| null | 2020-05-13T22:37:33
| 2020-05-13T22:37:32
| null |
UTF-8
|
Python
| false
| false
| 211
|
py
|
'''2. Write a Python script to add a key to a dictionary.
Sample Dictionary : {0: 10, 1: 20}
Expected Result : {0: 10, 1: 20, 2: 30} '''
original_dict = {0: 10, 1: 20}
original_dict[2] = 30
print(original_dict)
|
[
"ekeoparapraise@gmail.com"
] |
ekeoparapraise@gmail.com
|
39449e677ee1bf94f14738a476fbaeffef554460
|
11e484590b27585facf758f0432eeebe66bf790a
|
/fal_default_discount/__openerp__.py
|
ebb37f69d114ff8b401e6a98002ffca961d71f5d
|
[] |
no_license
|
jeanabreu/falinwa_branch
|
51b38ee5a3373d42417b84a0431bad9f7295f373
|
be96a209479259cd5b47dec73694938848a2db6c
|
refs/heads/master
| 2021-01-18T10:25:49.866747
| 2015-08-25T10:05:05
| 2015-08-25T10:05:05
| 41,369,368
| 0
| 1
| null | 2015-08-25T14:51:50
| 2015-08-25T14:51:50
| null |
UTF-8
|
Python
| false
| false
| 569
|
py
|
# -*- coding: utf-8 -*-
{
"name": "GEN-39_Default Discount",
"version": "1.0",
'author': 'Falinwa Hans',
"description": """
Module to give default discount
""",
"depends" : ['base','account','sale','purchase'],
'init_xml': [],
'data': [
],
'update_xml': [
'res_partner_view.xml',
'sale_view.xml',
'account_view.xml',
],
'css': [],
'installable': True,
'active': False,
'application' : False,
'js': [],
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
[
"hans.yonathan@falinwa.com"
] |
hans.yonathan@falinwa.com
|
55070346d3ec2829e4dd6a8d84eee4fdd455a3b8
|
8ea915b38738ec717e2ac763786005877cfc0064
|
/main.py
|
b4749d36c6ae3341cdd0bee2c053deddbbec9801
|
[] |
no_license
|
jimmy43333/Python_Learning
|
374e3c2695200ab3020e423d02513a6866530adb
|
01672e23838827ac77dcf1c49034bde015d14be5
|
refs/heads/master
| 2023-08-14T17:54:18.630258
| 2023-07-12T14:07:28
| 2023-07-12T14:10:07
| 104,288,689
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,402
|
py
|
#from learning_test import Learning
import logging
from TGlog import setuplog
if __name__ == '__main__':
setuplog("./Log/infoLog.log",'a',logging.INFO)
setuplog("./Log/errorLog.log",'w',logging.ERROR)
logging.debug("Debug from main")
logging.info("Info from main")
logging.warning("Warning from main")
logging.error("Error from main")
logging.critical("Critical from main")
logger1 = logging.getLogger("Testing01")
logger2 = logging.getLogger("Testing02")
logger1.info("Info by Testing01")
logger2.info("Info by Testing02")
handler = logging.FileHandler("./Log/TestLog.log")
formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')
handler.setLevel(logging.WARNING)
handler.setFormatter(formatter)
logger1.addHandler(handler)
logger1.warning("Warning by Testing01")
#learn = Learning()
#learn.PrintFilePath(__file__)
'''
animal=learn.ReadConfig("Config/testconfigparser.cfg","Animal",["animal","age","color","born"])
print(animal)
'''
'''
learn.BasicOperatorExample(10,3.0,"/")
learn.BasicOperatorExample(10,3,"//")
learn.BasicOperatorExample(10,3.0,"//")
learn.BasicOperatorExample(10,3.0,"**")
learn.BasicOperatorExample(10,3.0,"&")
'''
#learn.PrintSystem()
#for item in []:
# print str(item) + "no exits"
|
[
"jimmy43333@gmail.com"
] |
jimmy43333@gmail.com
|
1dc3c512bc7cab780a6d844525295ac1300e961a
|
3ff0693a38c2dfe40970e04d67b5698c3445b546
|
/app/models.py
|
ba2a957175df3933b2589bb50218ca87b7b457dd
|
[] |
no_license
|
microwriting/pro15_tango
|
a1c13cca1434a076e88ff10aa0eeb9471f461401
|
dedad47277c96a62c7a9ba49d19ea8c15a4f6d74
|
refs/heads/master
| 2020-03-07T14:30:31.475682
| 2018-04-02T10:35:14
| 2018-04-02T10:35:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 584
|
py
|
from django.db import models
from django.utils import timezone
class Tag(models.Model):
name = models.CharField('Tagname', max_length=255)
created_at = models.DateTimeField('date', default=timezone.now)
ordering = models.IntegerField(default=0)
def __str__(self):
return self.name
class Meta:
ordering = ['ordering', 'pk']
class Sentence(models.Model):
name = models.TextField('Sentence')
tag = models.ManyToManyField(Tag, verbose_name='tag')
def __str__(self):
return self.name
class Meta:
ordering = ['pk']
|
[
"info@micro-w.top"
] |
info@micro-w.top
|
c1e10c049c27fd320f32a3101f5578c62b0d3f4b
|
92ba25f34af38603970f1f773381178e3ee1c787
|
/task_NER_Pipeline.py
|
c4f02bcc9d0ccb26469fad6c9758eabadc07a48b
|
[] |
no_license
|
tianyao-aka/Bert_for_NER_POS
|
870aaa89c50368bd3b049c56cd37e19b11a8e270
|
e88533aa8915b306d8c24b90f86be25133f6700c
|
refs/heads/main
| 2023-01-23T23:58:26.887003
| 2020-12-08T12:56:18
| 2020-12-08T12:56:18
| 319,638,434
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 21,213
|
py
|
import numpy as np
import pandas as pd
from conllu import parse_incr,parse
import torch
import matplotlib.pyplot as plt
from collections import Counter
import pickle
from seqeval.metrics import f1_score
from pytorch_transformers import BertTokenizer, BertModel, BertForMaskedLM,AdamW, WarmupLinearSchedule
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils import data
from sklearn.metrics import confusion_matrix,accuracy_score
import pycuda.driver as cuda
#bert-base-multilingual-cased
import nltk
import seaborn as sns
import os
import stanfordnlp
from collections import Counter
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix,accuracy_score
from seqeval.metrics import classification_report,f1_score
import lookahead as lk
## 本项目使用的数据集链接如下:
# https://github.com/ialfina/ner-dataset-modified-dee/tree/master/singgalang
os.environ["CUDA_VISIBLE_DEVICES"] = '0,1,2,3'
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased') # 加载BERT的tokenizer
model = BertModel.from_pretrained('bert-base-multilingual-cased') #加载预训练好的多语言BERT Model
#加载已处理好的数据集,分为两个部分:1)sentences,每个句子格式为:['i','am','working','at','school'],sentences: [[sent1],[sent2],,..]
# labels = [0,1,0,0,0,1], labels:[[sent1_labels],[sent2_labels],[sent3_labels]]
#----------------------------------------------------------------------------------------------------------------
sentences = pickle.load( open( "sentences.pickle", "rb" ) )
labels = pickle.load( open( "labels.pickle", "rb" ) )
entity2idx = {'O':0,'Place':1,'Person':2,'Organisation':3}
def str2idx(x):
return [entity2idx[i] for i in x]
idx2entity = {0:'O',1:'Place',2:'Person',3:'Company'}
labels = list(map(lambda x:str2idx(x),labels))
#----------------------------------------------------------------------------------------------------------------
def get_device(use_gpu=True): #utility function:get cpu or cuda device
if torch.cuda.is_available() and use_gpu:
return torch.device('cuda:0')
else:
return torch.device('cpu')
def train_loss_diff(history): # utility function: 返回连续 {len(history)}次training loss的绝对值平均值,用于训练阶段的early stopping
diffs = []
for i in range(len(history)-1):
diffs.append(abs(history[i+1]-history[i]))
return np.mean(np.asarray(diffs))
def make_dataset(sents, labels):
# 用于制造适合于BERT的数据集
# dataset: list of lists, 用于存放tokenize以后的句子
# ner_labels: 用于存放对应tokenize以后的句子关联的标记
# data_ids: 每一个单词对应一个globally unique的id,用于之后的数据操作
dataset = []
ner_labels = []
data_ids = []
ids = 1
for s, l in zip(sents, labels):
sent = []
sent_tag = []
sent_ids = []
for idx in range(len(s)):
w = tokenizer.tokenize(s[idx])
sent.extend(w)
sent_tag.extend([l[idx]] * len(w))
sent_ids.extend([ids] * len(w))
ids += 1
dataset.append(sent)
ner_labels.append(sent_tag)
data_ids.append(sent_ids)
return (dataset, ner_labels, data_ids)
def change_dataset(dataset, labels, sent_ids, max_len=172):
# 该函数进一步处理数据集,由于BERT的输入不是等长的,需要添加[CLS],[PAD]等相应特殊token。
# dataset_tensor: 经过padding处理过后的dataset,数据结构为long tensor
# labels_tensor: 经过padding处理后的labels,数据结构为long tensor
# attn_mask: 作为输入的一部分在BERT Model 前向计算时需要用到的tensor, 用于指示哪些为padding token。 注:0为padded token
# sent_id_tensor: 不同的token可能对应一个词,比如 love 经过BERT Tokenizer处理后,变为 lov, ##e。 sent_id用于给同一个单词打上同一个标记,用于后续NER模型的优化处理
sent_id_tensor = []
label_tensor = []
dataset_tensor = []
padded_data = []
padded_labels = []
padded_ids = []
for idx, d in enumerate(dataset):
labl = labels[idx]
ids = sent_ids[idx]
if len(d) >= max_len - 2:
d = d[:max_len - 2]
d = ['[CLS]'] + d + ['[SEP]']
padded_data.append(d)
labl = labl[:max_len - 2]
labl = [0] + labl + [0]
padded_labels.append(labl)
ids = ids[:max_len - 2]
ids = [-1] + ids + [-1]
padded_ids.append(ids)
else:
d = ['[CLS]'] + d + ['[SEP]']
labl = [0] + labl + [0]
ids = [-1] + ids + [-1]
while len(d) < max_len:
d.append('[PAD]')
labl.append(0)
ids.append(-1)
padded_data.append(d)
padded_labels.append(labl)
padded_ids.append(ids)
for d in padded_data:
dataset_tensor.append(tokenizer.convert_tokens_to_ids(d))
dataset_tensor = torch.tensor(dataset_tensor).long()
label_tensor = torch.tensor(padded_labels).long()
sent_id_tensor = torch.tensor(padded_ids).long()
attn_mask = dataset_tensor != 0
attn_mask = attn_mask
return dataset_tensor, label_tensor, attn_mask, sent_id_tensor
class Multiclass_Focal_Loss(nn.Module):
# 该函数用于NER的自定义loss function, 主要用于解决类的不平衡问题,数据集中'O'的数量为其他样本类型数量的20倍左右,因此为了让模型能够收敛,
# 本项目中使用了两种方法:
# 1)采用focal-loss进行计算,减弱'O'标记样本的影响。本项目中我将focal loss扩展成适用于multi-class
# 2) 另外一种解决样本不平衡的方法采用了hard negative mining
# 1)中方法详见 https://arxiv.org/abs/1708.02002-Focal Loss for Dense Object Detection
# 2—)中方法详见https://arxiv.org/pdf/1512.02325.pdf- SSD: Single Shot MultiBox Detector
# 经过实践,最终采用hard negative mining的方法,由于可以达到更高的f1 score
def __init__(self, alpha=2):
super(Multiclass_Focal_Loss, self).__init__()
self.alpha = alpha
def forward(self, outputs, labels):
outputs = outputs.to(device)
labels = labels.to(device)
type_i_mask = labels > 0
type_ii_mask = labels == 0
# print ('labels:',labels[:5])
labels = labels.view(-1, 1)
costs = torch.gather(outputs, 1, labels)
costs = costs.view(-1)
costs = -1. * torch.log(costs)
type_i_loss = costs[type_i_mask]
type_ii_loss = costs[type_ii_mask]
N = len(type_i_loss)
type_ii_loss_truncated = torch.sort(type_ii_loss, descending=True)[0][:int(2.5 * N)]
total_costs = (type_i_loss.sum() + type_ii_loss_truncated.sum()) / int((3.5 * N)) * 1.
# N = len(labels)
# labels = labels.view(-1,1)
# costs = torch.gather(outputs,1,labels)
# costs = costs.view(-1)
# log_costs = -1.*torch.log(costs)
# squared_cost = (1-costs)**self.alpha
# total_cost = torch.sum(log_costs*squared_cost)/N
return total_costs
class NER_Model(nn.Module):
# 该NER_Model基于多语言的BERT模型,但是用于NER的任务进行了优化改进。原paper中用每个token对该单词的label进行训练和预测,本模型结合local context
# 和global context一起对每个单词的分类进行预测。h_cls为每个句子的hidden embedding, h_{token}为对应token的embedding, 则举例对于love这个词的词性分类
#采用如下特征进行描述: h_cls||AGG(h_{lov},h_{##e}) 作为love这个单词的特征,这里的AGGREGATION FUNCTION采用average operator
#以下是该模型的具体实现:
def __init__(self, model, alpha):
super(NER_Model, self).__init__()
self.model = model
self.linear = nn.Linear(768 * 2, 512)
self.dropout = nn.Dropout(p=0.5)
self.relu = nn.ReLU()
self.linear2 = nn.Linear(512, 4)
self.softmax = nn.Softmax(dim=1)
self.lossfunc = Multiclass_Focal_Loss(alpha)
def forward(self, inputs, labels, attn_mask, sent_ids, extract_feats=False):
out = self.model(inputs, attention_mask=attn_mask)
out = out[0]
cls_dict = self.build_dict(out, sent_ids)
data = []
label_list = []
for k in np.unique(sent_ids.cpu().numpy()):
if k == -1:
continue
cls_vector = cls_dict[k]
mask = sent_ids == k
temp = out[mask]
data.append(self.avg_vector(cls_vector, temp))
label_list.append(labels[mask][0])
data = list(map(lambda x: x.view(1, -1), data))
data = torch.cat(data, dim=0)
data = data.float()
label_list = torch.tensor(list(map(lambda x: x.item(), label_list))).long().to(device)
output = self.linear(data)
output = self.dropout(output)
output = self.relu(output)
output = self.linear2(output)
output = self.softmax(output)
cost = self.lossfunc(output, label_list)
if not extract_feats:
return cost
else:
# print (label_list.shape,label_list[:10])
out = torch.argmax(output, dim=1).to(device)
# print (out[:6])
return cost, label_list, out
def build_dict(self, out, sent_ids):
sent_ids = sent_ids.cpu().numpy()
cls_dict = dict()
N = sent_ids.shape[0]
for i in range(N):
for j in set(list(sent_ids[i, :])):
if j == -1: continue
cls_dict[j] = out[i][0]
return cls_dict
def avg_vector(self, cls_vector, inputs):
if len(inputs) == 1:
return torch.cat((cls_vector, inputs.squeeze()))
return torch.cat((cls_vector, torch.mean(inputs, dim=0)))
def eval_model(ner_model, dev_data_gen):
# utility function: 用于验证模型的性能,输入为我们的模型和dev data loader,输出为entity-level f1-score.
#具体可参见:http://www.davidsbatista.net/blog/2018/05/09/Named_Entity_Evaluation/
print('eval.........')
# torch.cuda.empty_cache()
ner_model = ner_model.to(device)
# ner_model = nn.DataParallel(ner_model,device_ids)
ner_model.eval()
y_trues = []
y_preds = []
losses = []
with torch.no_grad():
for inputs, labels, attn_mask, sent_ids in dev_data_gen:
rand_num = np.random.uniform()
if rand_num > 0.4:
continue
inputs = inputs.to(device)
labels = labels.to(device)
attn_mask = attn_mask.to(device)
sent_ids = sent_ids.to(device)
cost, y_true, y_pred = ner_model(inputs, labels, attn_mask, sent_ids, extract_feats=True)
y_trues.append([idx2entity[x] for x in list(y_true.cpu().numpy())])
y_preds.append([idx2entity[x] for x in list(y_pred.cpu().numpy())])
# losses.append(cost.item())
# print (y_trues)
# print (y_preds)
# print (losses)
# eval_loss = np.sum(np.asarray(losses))/len(losses)
# print ('----------------------------------')
# print (y_trues)
print(classification_report(y_trues, y_preds))
# con_mat = confusion_matrix(y_trues,y_preds)
# acc_score = accuracy_score(y_true,y_pred)
del inputs
del labels
del attn_mask
del sent_ids
return f1_score(y_trues, y_preds)
def train(ner_model, train_dset, dev_data_gen, batch_size=124, step_every=60, lr=2e-4, warmup_steps=900,total_steps=9000):
# 用于训练ner模型,本函数最终采用Adam optimizer, lr= 2e-4, epoch=30, batch_size = 124, 用4块GPU并行训练
# 由于该数据集较大,且噪声较大,当batch size较小时(16或者32), 模型无法收敛,只有当batch size >= 96时,模型才收敛。
torch.cuda.empty_cache()
# ner_model =ner_model.to(device)
ner_model = nn.DataParallel(ner_model, device_ids)
ner_model.train()
history = []
print('go')
best_f1 = 0.
min_training_error = 1.
# optimizer = AdamW(ner_model.parameters(), lr=lr, correct_bias=False)
adam_optim = optim.Adam(ner_model.parameters(), lr=lr)
# lookahead = lk.Lookahead(adam_optim, k=5, alpha=0.5)
# scheduler = WarmupLinearSchedule(optimizer, warmup_steps=warmup_steps, t_total=total_steps)
params = {'batch_size': batch_size,
'shuffle': True,
'num_workers': 8}
train_data_gen = data.DataLoader(train_dset, **params)
steps = 0
for e in range(30):
print('epoch: ', e)
for inputs, labels, attn_mask, sent_ids in train_data_gen:
# inputs = inputs.to(device)
# labels = labels.to(device)
# attn_mask = attn_mask.to(device)
# sent_ids = sent_ids.to(device)
steps += 1
loss = ner_model(inputs, labels, attn_mask, sent_ids)
loss.sum().backward()
adam_optim.step()
# scheduler.step()
adam_optim.zero_grad()
history.append(loss.sum().item())
if steps % 20 == 0:
print('training error: ', loss.sum().item())
print('step:', steps)
if loss.sum().item() < min_training_error and loss.sum().item() < 0.05 and best_f1 > 0.85:
p = np.random.uniform()
if p > 0.6:
continue
min_training_error = loss.sum().item()
print('-----------------eval mode------------------')
b = eval_model(ner_model, dev_data_gen, loss_func=None)
print('eval f1_score', b)
print('------------------end ----------------------')
if b >= best_f1 and b > 0.85:
print('----------saving model-----------------')
path = 'best_NLER_model_f1_score_' + str(b)
torch.save(ner_model.state_dict(), path)
files = current_best_f1_measure()
if len(files) >= 8:
for i in files[:3]:
os.remove(i)
print('')
print('')
print('----------end saving model-------------')
if b > best_f1:
best_f1 = b
# if d>0.94:
# print ('----------saving model-----------------')
# path = 'best_ner_model_acc_score_'+str(d)
# torch.save(ner_model.state_dict(), path)
# print ('')
# print ('')
# print ('----------end saving model-------------')
if steps % step_every == 0 and steps > 0:
print('-----------------eval mode------------------')
b = eval_model(ner_model, dev_data_gen, loss_func=None)
print('eval f1_score', b)
print('------------------end ----------------------')
if b >= best_f1 and b > 0.85:
print('----------saving model-----------------')
path = 'best_NLER_model_f1_score_' + str(b)
torch.save(ner_model.state_dict(), path)
print('')
print('')
print('----------end saving model-------------')
if b > best_f1:
best_f1 = b
# if d>0.94:
# print ('----------saving model-----------------')
# path = 'best_ner_model_acc_score_'+str(d)
# torch.save(ner_model.state_dict(), path)
# print ('')
# print ('')
# print ('----------end saving model-------------')
# diff = train_loss_diff(history[::-1][:10])
# if diff<0.00000005:
# return history
return history
## indicate devices to use, here we use 4 GPUs
# training scripts
#---------------------------------------------------------------------------------------------------
device = get_device(use_gpu = True)
print (device)
cuda1 = torch.device('cuda:0')
cuda2 = torch.device('cuda:1')
cuda3 = torch.device('cuda:2')
cuda4 = torch.device('cuda:3')
device_ids = [cuda1,cuda2,cuda3,cuda4]
a,b,c = make_dataset(sentences[:],labels[:])
dataset_tensor,label_tensor,attn_mask, sent_id_tensor = change_dataset(a,b,c) # process dataset
model = BertModel.from_pretrained('bert-base-multilingual-cased')
ner_model = NER_Model(model,3) # load model
ner_model.cuda(0) # move model to cuda device
params = {'batch_size': 32,
'shuffle': False,
'num_workers': 8 }
N = dataset_tensor.size(0)
temp = np.arange(N)
np.random.shuffle(temp)
train_idx = torch.tensor(temp[:int(0.8*N)]).long() # 80% for training data
test_idx = torch.tensor(temp[int(0.8*N):]).long() # 20% for test data
train_dataset_tensor,train_label_tensor,train_attn_mask, train_sent_id_tensor = dataset_tensor[train_idx],label_tensor[train_idx],attn_mask[train_idx], sent_id_tensor[train_idx]
test_dataset_tensor,test_label_tensor,test_attn_mask, test_sent_id_tensor = dataset_tensor[test_idx],label_tensor[test_idx],attn_mask[test_idx], sent_id_tensor[test_idx]
train_dset = data.TensorDataset(train_dataset_tensor,train_label_tensor,train_attn_mask, train_sent_id_tensor ) # 将dataset变成pytorch的tensordataset
dev_dset = data.TensorDataset(test_dataset_tensor,test_label_tensor,test_attn_mask, test_sent_id_tensor) # 将dataset变成pytorch的tensordataset
dev_loader = data.DataLoader(dev_dset,**params) # 形成test dataset的data loader
loss_history = train(ner_model,train_dset,dev_loader,batch_size=120,step_every=50,lr = 2e-5, warmup_steps = 950, total_steps = 9500)
#-------------------------------------------------------------------------------------------------
# inference- using trained model to do inference on unseen data
#-------------------------------------------------------------------------------------------------
from collections import OrderedDict
device = get_device(False)
tokenizer = BertTokenizer.from_pretrained('bert-base-multilingual-cased')
model = BertModel.from_pretrained('bert-base-multilingual-cased')
ner_model = NER_Model(model,3)
params = OrderedDict()
path_name = 'best_NLER_model_f1_score_0.8714718053239453'
s = torch.load(path_name)
for k in s:
params[k[7:]] = s[k]
ner_model.load_state_dict(params)
def extract_labels(outputs, cum_sents_length, idx2entity):
s = 0
final_outputs = []
for i in range(len(cum_sents_length) - 1):
if i == 0:
temp = outputs[:cum_sents_length[i]].cpu().numpy()
final_outputs.append([idx2entity[t] for t in temp])
s += 1
temp = outputs[cum_sents_length[i]:cum_sents_length[i + 1]].cpu().numpy()
final_outputs.append([idx2entity[t] for t in temp])
return final_outputs
def inference_sents(sents, ner_model, idx2entity):
# format of sents: sents::List[String] e.g.:['i love studying .', 'good job, nice work.','deep learning is fun !',.....]
if len(sents) > 32:
print('number of sentences must be less than 33')
return
ret = copy.deepcopy(sents)
sents = [s.split() for s in sents]
sent_length = [len(s) for s in sents]
labels = [[0] * t for t in sent_length]
cum_sents_length = np.cumsum(np.asarray(sent_length))
a, b, c = make_dataset(sents[:], labels[:])
dataset_tensor, label_tensor, attn_mask, sent_id_tensor = change_dataset(a, b, c)
_, _, o2 = ner_model(dataset_tensor, label_tensor, attn_mask, sent_id_tensor, extract_feats=True)
labels = extract_labels(o2, cum_sents_length, idx2entity)
return ret, labels
sents = ['Anda bisa juga langsung melakukan prediksi dengan menggunakan model yang telah saya buat , yaitu','Ngurusin data lagi untuk kerjaan suatu kementerian .']
sents,labels = inference_sents(sents,ner_model,idx2entity)
|
[
"noreply@github.com"
] |
tianyao-aka.noreply@github.com
|
fad28a7559308bee0c5acdfc8681f51b8076f9be
|
c101c4f7dfdb4492a380e7564beaf2892c9ae527
|
/modules/s3/s3resource.py
|
f30e194ceb9f99545db7f694b1314d6241141f10
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
somayjain/eden
|
0137b4c89f04ae35b431881d27a175deb4b31ebb
|
d401e20a7512e7b7781f16a13503bbd984bf2dbb
|
refs/heads/master
| 2021-01-18T11:56:50.477613
| 2014-11-16T11:34:27
| 2014-11-20T12:34:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 210,832
|
py
|
# -*- coding: utf-8 -*-
""" S3 Resources
@copyright: 2009-2014 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
@group Resource API: S3Resource,
@group Filter API: S3ResourceFilter
@group Helper Classes: S3RecordMerger
"""
__all__ = ("S3AxisFilter",
"S3Resource",
"S3ResourceFilter",
)
import datetime
import sys
from itertools import chain
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
try:
from lxml import etree
except ImportError:
print >> sys.stderr, "ERROR: lxml module needed for XML handling"
raise
try:
import json # try stdlib (Python 2.6)
except ImportError:
try:
import simplejson as json # try external module
except:
import gluon.contrib.simplejson as json # fallback to pure-Python module
from gluon import current
from gluon.html import A, TAG
from gluon.http import HTTP
from gluon.validators import IS_EMPTY_OR
try:
from gluon.dal import Field
from gluon.dal.objects import Row, Rows, Table, Expression
except ImportError:
# old web2py
from gluon.dal import Row, Rows, Table, Field, Expression
from gluon.storage import Storage
from gluon.tools import callback
from s3data import S3DataTable, S3DataList, S3PivotTable
from s3fields import S3Represent, s3_all_meta_field_names
from s3query import FS, S3ResourceField, S3ResourceQuery, S3Joins, S3URLQuery
from s3utils import s3_has_foreign_key, s3_get_foreign_key, s3_unicode, s3_get_last_record_id, s3_remove_last_record_id
from s3validators import IS_ONE_OF
from s3xml import S3XMLFormat
DEBUG = False
if DEBUG:
print >> sys.stderr, "S3Resource: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
osetattr = object.__setattr__
ogetattr = object.__getattribute__
MAXDEPTH = 10
# Compact JSON encoding
#SEPARATORS = (",", ":")
# =============================================================================
class S3Resource(object):
"""
API for resources.
A "resource" is a set of records in a database table including their
references in certain related resources (components). A resource can
be defined like:
resource = S3Resource(table)
A resource defined like this would include all records in the table.
Further parameters for the resource constructor as well as methods
of the resource instance can be used to filter for particular subsets.
This API provides extended standard methods to access and manipulate
data in resources while respecting current authorization and other
S3 framework rules.
"""
def __init__(self, tablename,
id=None,
prefix=None,
uid=None,
filter=None,
vars=None,
parent=None,
linked=None,
linktable=None,
alias=None,
components=None,
filter_component=None,
include_deleted=False,
approved=True,
unapproved=False,
context=False):
"""
Constructor
@param tablename: tablename, Table, or an S3Resource instance
@param prefix: prefix to use for the tablename
@param id: record ID (or list of record IDs)
@param uid: record UID (or list of record UIDs)
@param filter: filter query
@param vars: dictionary of URL query variables
@param components: list of component aliases
to load for this resource
@param filter_component: alias of the component the URL filters
apply for (filters for this component
must be handled separately)
@param alias: the alias for this resource (internal use only)
@param parent: the parent resource (internal use only)
@param linked: the linked resource (internal use only)
@param linktable: the link table (internal use only)
@param include_deleted: include deleted records (used for
synchronization)
@param approved: include approved records
@param unapproved: include unapproved records
@param context: apply context filters
"""
s3db = current.s3db
auth = current.auth
# Names ---------------------------------------------------------------
self.table = None
self._alias = None
if prefix is None:
if not isinstance(tablename, basestring):
if isinstance(tablename, Table):
self.table = tablename
self._alias = self.table._tablename
tablename = self._alias
elif isinstance(tablename, S3Resource):
self.table = tablename.table
self._alias = self.table._tablename
tablename = tablename.tablename
else:
error = "%s is not a valid type for a tablename" % tablename
raise SyntaxError(error)
if "_" in tablename:
prefix, name = tablename.split("_", 1)
else:
raise SyntaxError("invalid tablename: %s" % tablename)
else:
name = tablename
tablename = "%s_%s" % (prefix, name)
self.prefix = prefix
""" Module prefix of the tablename """
self.name = name
""" Tablename without module prefix """
self.tablename = tablename
""" Tablename """
self.alias = alias or name
"""
Alias of the resource, defaults to tablename
without module prefix
"""
# Table ---------------------------------------------------------------
if self.table is None:
self.table = s3db[tablename]
table = self.table
# Set default approver
auth.permission.set_default_approver(table)
if not self._alias:
self._alias = tablename
""" Table alias (the tablename used in joins/queries) """
if parent is not None:
if parent.tablename == self.tablename:
alias = "%s_%s_%s" % (prefix, self.alias, name)
pkey = table._id.name
table = table = table.with_alias(alias)
table._id = table[pkey]
self._alias = alias
self.table = table
self.fields = table.fields
self._id = table._id
# Hooks ---------------------------------------------------------------
# Authorization hooks
self.accessible_query = auth.s3_accessible_query
# Filter --------------------------------------------------------------
# Default query options
self.include_deleted = include_deleted
self._approved = approved
self._unapproved = unapproved
# Component Filter
self.filter = None
# Resource Filter
self.rfilter = None
# Rows ----------------------------------------------------------------
self._rows = None
self._rowindex = None
self.rfields = None
self.dfields = None
self._ids = []
self._uids = []
self._length = None
# Request attributes --------------------------------------------------
self.vars = None # set during build_query
self.lastid = None
self.files = Storage()
# Components ----------------------------------------------------------
# Initialize component properties (will be set during _attach)
self.link = None
self.linktable = None
self.actuate = None
self.lkey = None
self.rkey = None
self.pkey = None
self.fkey = None
self.multiple = True
self.parent = parent # the parent resource
self.linked = linked # the linked resource
self.components = Storage()
self.links = Storage()
if parent is None:
# This is the master resource - attach components
attach = self._attach
hooks = s3db.get_components(table, names=components)
[attach(alias, hooks[alias]) for alias in hooks]
# Build query
self.build_query(id=id,
uid=uid,
filter=filter,
vars=vars,
filter_component=filter_component)
if context:
self.add_filter(s3db.context)
# Component - attach link table
elif linktable is not None:
# This is link-table component - attach the link table
self.link = S3Resource(linktable,
parent=self.parent,
linked=self,
include_deleted=self.include_deleted,
approved=self._approved,
unapproved=self._unapproved)
# Export and Import ---------------------------------------------------
# Pending Imports
self.skip_import = False
self.job = None
self.mtime = None
self.error = None
self.error_tree = None
self.import_count = 0
self.import_created = []
self.import_updated = []
self.import_deleted = []
# Export meta data
self.muntil = None # latest mtime of the exported records
self.results = None # number of exported records
# Standard methods ----------------------------------------------------
# CRUD
from s3crud import S3CRUD
self.crud = S3CRUD()
self.crud.resource = self
# -------------------------------------------------------------------------
def _attach(self, alias, hook):
"""
Attach a component
@param alias: the alias
@param hook: the hook
"""
if alias is not None and hook.filterby is not None:
table_alias = "%s_%s_%s" % (hook.prefix,
hook.alias,
hook.name)
table = hook.table.with_alias(table_alias)
table._id = table[table._id.name]
hook.table = table
else:
table_alias = None
# Create as resource
component = S3Resource(hook.table,
parent=self,
alias=alias,
linktable=hook.linktable,
include_deleted=self.include_deleted,
approved=self._approved,
unapproved=self._unapproved)
if table_alias:
component.tablename = hook.tablename
component._alias = table_alias
# Update component properties
component.pkey = hook.pkey
component.fkey = hook.fkey
component.linktable = hook.linktable
component.lkey = hook.lkey
component.rkey = hook.rkey
component.actuate = hook.actuate
component.autodelete = hook.autodelete
component.autocomplete = hook.autocomplete
component.alias = alias
component.multiple = hook.multiple
component.values = hook.values
if hook.filterby is not None:
filterfor = hook.filterfor
is_list = isinstance(filterfor, (tuple, list))
if is_list and len(filterfor) == 1:
is_list = False
filterfor = filterfor[0]
if not is_list:
component.filter = (hook.table[hook.filterby] == filterfor)
elif filterfor:
component.filter = (hook.table[hook.filterby].belongs(filterfor))
else:
component.filter = None
else:
component.filter = None
# Copy properties to the link
if component.link is not None:
link = component.link
link.pkey = component.pkey
link.fkey = component.lkey
link.actuate = component.actuate
link.autodelete = component.autodelete
link.multiple = component.multiple
# @todo: possible ambiguity if the same link is used
# in multiple components (e.g. filtered or 3-way),
# need a better aliasing mechanism here
self.links[link.name] = link
self.components[alias] = component
return
# -------------------------------------------------------------------------
# Query handling
# -------------------------------------------------------------------------
def build_query(self,
id=None,
uid=None,
filter=None,
vars=None,
filter_component=None):
"""
Query builder
@param id: record ID or list of record IDs to include
@param uid: record UID or list of record UIDs to include
@param filter: filtering query (DAL only)
@param vars: dict of URL query variables
@param filter_component: the alias of the component the URL
filters apply for (filters for this
component must be handled separately)
"""
# Reset the rows counter
self._length = None
self.rfilter = S3ResourceFilter(self,
id=id,
uid=uid,
filter=filter,
vars=vars,
filter_component=filter_component)
return self.rfilter
# -------------------------------------------------------------------------
def add_filter(self, f=None, c=None):
"""
Extend the current resource filter
@param f: a Query or a S3ResourceQuery instance
@param c: alias of the component this filter concerns,
automatically adds the respective component join
(not needed for S3ResourceQuery instances)
"""
if f is None:
return
self.clear()
if self.rfilter is None:
self.rfilter = S3ResourceFilter(self)
self.rfilter.add_filter(f, component=c)
# -------------------------------------------------------------------------
def add_component_filter(self, alias, f=None):
"""
Extend the resource filter of a particular component, does
not affect the master resource filter (as opposed to add_filter)
@param alias: the alias of the component
@param f: a Query or a S3ResourceQuery instance
"""
if f is None:
return
if self.rfilter is None:
self.rfilter = S3ResourceFilter(self)
self.rfilter.add_filter(f, component=alias, master=False)
# -------------------------------------------------------------------------
def get_query(self):
""" Get the effective query """
if self.rfilter is None:
self.build_query()
return self.rfilter.get_query()
# -------------------------------------------------------------------------
def get_filter(self):
""" Get the effective virtual fields filter """
if self.rfilter is None:
self.build_query()
return self.rfilter.get_filter()
# -------------------------------------------------------------------------
def clear_query(self):
""" Removes the current query (does not remove the set!) """
self.rfilter = None
components = self.components
if components:
for c in components:
components[c].clear_query()
# -------------------------------------------------------------------------
# Data access (new API)
# -------------------------------------------------------------------------
def count(self, left=None, distinct=False):
"""
Get the total number of available records in this resource
@param left: left outer joins, if required
@param distinct: only count distinct rows
"""
if self.rfilter is None:
self.build_query()
if self._length is None:
self._length = self.rfilter.count(left=left,
distinct=distinct)
return self._length
# -------------------------------------------------------------------------
def select(self,
fields,
start=0,
limit=None,
left=None,
orderby=None,
groupby=None,
distinct=False,
virtual=True,
count=False,
getids=False,
as_rows=False,
represent=False,
show_links=True,
raw_data=False):
"""
Extract data from this resource
@param fields: the fields to extract (selector strings)
@param start: index of the first record
@param limit: maximum number of records
@param left: additional left joins required for filters
@param orderby: orderby-expression for DAL
@param groupby: fields to group by (overrides fields!)
@param distinct: select distinct rows
@param virtual: include mandatory virtual fields
@param count: include the total number of matching records
@param getids: include the IDs of all matching records
@param as_rows: return the rows (don't extract)
@param represent: render field value representations
@param raw_data: include raw data in the result
"""
data = S3ResourceData(self,
fields,
start=start,
limit=limit,
left=left,
orderby=orderby,
groupby=groupby,
distinct=distinct,
virtual=virtual,
count=count,
getids=getids,
as_rows=as_rows,
represent=represent,
show_links=show_links,
raw_data=raw_data)
if as_rows:
return data.rows
else:
return data
# -------------------------------------------------------------------------
def insert(self, **fields):
"""
Insert a record into this resource
@param fields: dict of field/value pairs to insert
"""
# Check permission
authorised = current.auth.s3_has_permission("create", self.tablename)
if not authorised:
raise IOError("Operation not permitted: INSERT INTO %s" %
self.tablename)
# Insert new record
record_id = self.table.insert(**fields)
# Audit
if record_id:
record = Storage(fields).update(id=record_id)
current.audit("create", self.prefix, self.name, form=record)
return record_id
# -------------------------------------------------------------------------
def update(self):
raise NotImplementedError
# -------------------------------------------------------------------------
def delete(self,
format=None,
cascade=False,
replaced_by=None):
"""
Delete all (deletable) records in this resource
@param format: the representation format of the request (optional)
@param cascade: this is a cascade delete (prevents rollbacks/commits)
@param replaced_by: used by record merger
@return: number of records deleted
"""
s3db = current.s3db
# Reset error
self.error = None
table = self.table
get_config = self.get_config
pkey = self._id.name
# Determine relevant fields
fields = [pkey]
add_field = fields.append
supertables = get_config("super_entity")
if supertables:
# Add super-keys (avoids reloading in delete_super)
if not isinstance(supertables, (list, tuple)):
supertables = [supertables]
for sname in supertables:
stable = s3db.table(sname) \
if isinstance(sname, str) else sname
if stable is None:
continue
key = stable._id.name
if key in table.fields:
add_field(key)
if "uuid" in table.fields:
add_field("uuid")
# Get all rows
rows = self.select(fields, as_rows=True)
if not rows:
# No rows? => that was it already :)
return 0
numrows = 0
db = current.db
has_permission = current.auth.s3_has_permission
audit = current.audit
prefix = self.prefix
name = self.name
define_resource = s3db.resource
delete_super = s3db.delete_super
DELETED = current.xml.DELETED
INTEGRITY_ERROR = current.ERROR.INTEGRITY_ERROR
tablename = self.tablename
if current.deployment_settings.get_security_archive_not_delete() and \
DELETED in table:
# Find all references
if not cascade:
# Must load all models to detect dependencies
s3db.load_all_models()
if db._lazy_tables:
# Must roll out all lazy tables to detect dependencies
for tn in db._LAZY_TABLES.keys():
db[tn]
references = table._referenced_by
try:
rfields = [f for f in references if f.ondelete == "RESTRICT"]
except AttributeError:
# older web2py
references = [db[tn][fn] for tn, fn in references]
rfields = [f for f in references if f.ondelete == "RESTRICT"]
# Determine deletable rows
deletable = set(row[pkey] for row in rows)
for rfield in rfields:
if deletable:
fn, tn = rfield.name, rfield.tablename
rtable = db[tn]
query = (rfield.belongs(deletable))
if tn == self.tablename:
query &= (rfield != rtable._id)
if DELETED in rtable:
query &= (rtable[DELETED] != True)
rrows = db(query).select(rfield)
for rrow in rrows:
deletable.discard(rrow[fn])
# Get custom ondelete-cascade
ondelete_cascade = get_config("ondelete_cascade")
for row in rows:
record_id = row[pkey]
# Check permission to delete this record
if not has_permission("delete", table, record_id=record_id):
continue
error = self.error
self.error = None
# Run custom ondelete_cascade first
if ondelete_cascade:
try:
callback(ondelete_cascade, row, tablename=tablename)
except:
# Custom RESTRICT or cascade failure: row not deletable
continue
if record_id not in deletable:
# Check deletability again
restricted = False
for rfield in rfields:
fn, tn = rfield.name, rfield.tablename
rtable = db[tn]
#rfield = rtable[fn]
query = (rfield == record_id)
if tn == self.tablename:
query &= (rfield != rtable._id)
if DELETED in rtable:
query &= (rtable[DELETED] != True)
rrow = db(query).select(rfield,
limitby=(0, 1)).first()
if rrow:
restricted = True
break
if not restricted:
deletable.add(record_id)
if record_id not in deletable:
# Row is not deletable
self.error = INTEGRITY_ERROR
continue
# Run automatic ondelete-cascade
for rfield in references:
fn, tn = rfield.name, rfield.tablename
rtable = db[tn]
query = (rfield == record_id)
if tn == self.tablename:
query &= (rfield != rtable._id)
if rfield.ondelete == "CASCADE":
rresource = define_resource(tn,
filter=query,
unapproved=True)
rresource.delete(cascade=True)
if rresource.error:
self.error = rresource.error
break
elif rfield.ondelete == "SET NULL":
try:
db(query).update(**{fn:None})
except:
self.error = INTEGRITY_ERROR
break
elif rfield.ondelete == "SET DEFAULT":
try:
db(query).update(**{fn:rfield.default})
except:
self.error = INTEGRITY_ERROR
break
# Unlink all super-records
if not self.error and not delete_super(table, row):
self.error = INTEGRITY_ERROR
if self.error:
# Error in deletion cascade: roll back + skip row
if not cascade:
db.rollback()
continue
else:
# Auto-delete linked records if this was the last link
linked = self.linked
if linked and self.autodelete and linked.autodelete:
rkey = linked.rkey
fkey = linked.fkey
if rkey in table:
query = (table._id == record_id)
this = db(query).select(table._id,
table[rkey],
limitby=(0, 1)).first()
query = (table._id != this[pkey]) & \
(table[rkey] == this[rkey])
if DELETED in table:
query &= (table[DELETED] != True)
remaining = db(query).select(table._id,
limitby=(0, 1)).first()
if not remaining:
linked_table = s3db.table(linked.tablename)
query = (linked_table[fkey] == this[rkey])
linked = define_resource(linked_table,
filter=query,
unapproved=True)
linked.delete(cascade=True)
# Pull back prior error status
self.error = error
error = None
# "Park" foreign keys to resolve constraints, "un-delete"
# would then restore any still-valid FKs from this field!
fields = dict(deleted=True)
if "deleted_fk" in table:
record = table[record_id]
fk = {}
for f in table.fields:
if record[f] is not None and \
s3_has_foreign_key(table[f]):
fk[f] = record[f]
fields[f] = None
else:
continue
if fk:
fields.update(deleted_fk=json.dumps(fk))
# Annotate the replacement record
idstr = str(record_id)
if replaced_by and idstr in replaced_by and \
"deleted_rb" in table.fields:
fields.update(deleted_rb=replaced_by[idstr])
# Update the row, finally
db(table._id == record_id).update(**fields)
numrows += 1
# Clear session
if s3_get_last_record_id(tablename) == record_id:
s3_remove_last_record_id(tablename)
# Audit
audit("delete", prefix, name,
record=record_id, representation=format)
# On-delete hook
ondelete = get_config("ondelete")
if ondelete:
callback(ondelete, row)
# Commit after each row to not have it rolled back by
# subsequent cascade errors
if not cascade:
db.commit()
else:
# Hard delete
for row in rows:
record_id = row[pkey]
# Check permission to delete this row
if not has_permission("delete", table, record_id=record_id):
continue
# @ToDo: ondelete_cascade?
# Delete super-entity
success = delete_super(table, row)
if not success:
self.error = INTEGRITY_ERROR
continue
# Delete the row
try:
del table[record_id]
except:
# Row is not deletable
self.error = INTEGRITY_ERROR
continue
else:
# Successfully deleted
numrows += 1
# Clear session
if s3_get_last_record_id(tablename) == record_id:
s3_remove_last_record_id(tablename)
# Audit
audit("delete", prefix, name,
record=row[pkey], representation=format)
# On-delete hook
ondelete = get_config("ondelete")
if ondelete:
callback(ondelete, row)
# Commit after each row to not have it rolled back by
# subsequent cascade errors
if not cascade:
db.commit()
if numrows == 0 and not deletable:
# No deletable rows found
self.error = INTEGRITY_ERROR
return numrows
# -------------------------------------------------------------------------
def approve(self, components=[], approve=True):
"""
Approve all records in this resource
@param components: list of component aliases to include, None
for no components, empty list for all components
@param approve: set to approved (False for reset to unapproved)
"""
db = current.db
auth = current.auth
if auth.s3_logged_in():
user_id = approve and auth.user.id or None
else:
return False
tablename = self.tablename
table = self._table
records = self.select([self._id.name], limit=None)
for record in records["rows"]:
record_id = record[str(self._id)]
# Forget any cached permission for this record
auth.permission.forget(table, record_id)
if "approved_by" in table.fields:
dbset = db(table._id == record_id)
success = dbset.update(approved_by = user_id)
if not success:
current.db.rollback()
return False
else:
onapprove = self.get_config("onapprove", None)
if onapprove is not None:
row = dbset.select(limitby=(0, 1)).first()
if row:
callback(onapprove, row, tablename=tablename)
if components is None:
continue
for alias in self.components:
if components and alias not in components:
continue
component = self.components[alias]
success = component.approve(components=None, approve=approve)
if not success:
current.db.rollback()
return False
return True
# -------------------------------------------------------------------------
def reject(self, cascade=False):
""" Reject (delete) all records in this resource """
db = current.db
s3db = current.s3db
define_resource = s3db.resource
DELETED = current.xml.DELETED
INTEGRITY_ERROR = current.ERROR.INTEGRITY_ERROR
tablename = self.tablename
table = self.table
pkey = table._id.name
# Get hooks configuration
get_config = s3db.get_config
ondelete = get_config(tablename, "ondelete")
onreject = get_config(tablename, "onreject")
ondelete_cascade = get_config(tablename, "ondelete_cascade")
# Get all rows
if "uuid" in table.fields:
rows = self.select([table._id.name, "uuid"], as_rows=True)
else:
rows = self.select([table._id.name], as_rows=True)
if not rows:
return True
delete_super = s3db.delete_super
if DELETED in table:
references = table._referenced_by
for row in rows:
error = self.error
self.error = None
# On-delete-cascade
if ondelete_cascade:
callback(ondelete_cascade, row, tablename=tablename)
# Automatic cascade
for ref in references:
try:
tn, fn = ref.tablename, ref.name
except:
# old web2py < 2.0
tn, fn = ref
rtable = db[tn]
rfield = rtable[fn]
query = (rfield == row[pkey])
# Ignore RESTRICTs => reject anyway
if rfield.ondelete in ("CASCADE", "RESTRICT"):
rresource = define_resource(tn, filter=query, unapproved=True)
rresource.reject(cascade=True)
if rresource.error:
break
elif rfield.ondelete == "SET NULL":
try:
db(query).update(**{fn:None})
except:
self.error = INTEGRITY_ERROR
break
elif rfield.ondelete == "SET DEFAULT":
try:
db(query).update(**{fn:rfield.default})
except:
self.error = INTEGRITY_ERROR
break
if not self.error and not delete_super(table, row):
self.error = INTEGRITY_ERROR
if self.error:
db.rollback()
raise RuntimeError("Reject failed for %s.%s" %
(tablename, row[table._id]))
else:
# Pull back prior error status
self.error = error
error = None
# On-reject hook
if onreject:
callback(onreject, row, tablename=tablename)
# Park foreign keys
fields = dict(deleted=True)
if "deleted_fk" in table:
record = table[row[pkey]]
fk = {}
for f in table.fields:
if record[f] is not None and \
s3_has_foreign_key(table[f]):
fk[f] = record[f]
fields[f] = None
else:
continue
if fk:
fields.update(deleted_fk=json.dumps(fk))
# Update the row, finally
db(table._id == row[pkey]).update(**fields)
# Clear session
if s3_get_last_record_id(tablename) == row[pkey]:
s3_remove_last_record_id(tablename)
# On-delete hook
if ondelete:
callback(ondelete, row, tablename=tablename)
else:
# Hard delete
for row in rows:
# On-delete-cascade
if ondelete_cascade:
callback(ondelete_cascade, row, tablename=tablename)
# On-reject
if onreject:
callback(onreject, row, tablename=tablename)
try:
del table[row[pkey]]
except:
# Row is not deletable
self.error = INTEGRITY_ERROR
db.rollback()
raise
else:
# Clear session
if s3_get_last_record_id(tablename) == row[pkey]:
s3_remove_last_record_id(tablename)
# Delete super-entity
delete_super(table, row)
# On-delete
if ondelete:
callback(ondelete, row, tablename=tablename)
return True
# -------------------------------------------------------------------------
def merge(self,
original_id,
duplicate_id,
replace=None,
update=None,
main=True):
""" Merge two records, see also S3RecordMerger.merge """
from s3merge import S3RecordMerger
return S3RecordMerger(self).merge(original_id,
duplicate_id,
replace=replace,
update=update,
main=main)
# -------------------------------------------------------------------------
# Exports
# -------------------------------------------------------------------------
def datatable(self,
fields=None,
start=0,
limit=None,
left=None,
orderby=None,
distinct=False,
getids=False):
"""
Generate a data table of this resource
@param fields: list of fields to include (field selector strings)
@param start: index of the first record to include
@param limit: maximum number of records to include
@param left: additional left joins for DB query
@param orderby: orderby for DB query
@param distinct: distinct-flag for DB query
@param getids: return the record IDs of all records matching the
query (used in search to create a filter)
@return: tuple (S3DataTable, numrows, ids), where numrows represents
the total number of rows in the table that match the query;
ids is empty unless getids=True
"""
# Choose fields
if fields is None:
fields = [f.name for f in self.readable_fields()]
selectors = list(fields)
# Automatically include the record ID
table = self.table
if table._id.name not in selectors:
fields.insert(0, table._id.name)
selectors.insert(0, table._id.name)
# Skip representation of IDs in data tables
id_repr = table._id.represent
table._id.represent = None
# Extract the data
data = self.select(selectors,
start=start,
limit=limit,
orderby=orderby,
left=left,
distinct=distinct,
count=True,
getids=getids,
represent=True)
rows = data["rows"]
# Restore ID representation
table._id.represent = id_repr
# Empty table - or just no match?
empty = False
if not rows:
DELETED = current.xml.DELETED
if DELETED in table:
query = (table[DELETED] != True)
else:
query = (table._id > 0)
row = current.db(query).select(table._id, limitby=(0, 1)).first()
if not row:
empty = True
# Generate the data table
rfields = data["rfields"]
dt = S3DataTable(rfields, rows, orderby=orderby, empty=empty)
return dt, data["numrows"], data["ids"]
# -------------------------------------------------------------------------
def datalist(self,
fields=None,
start=0,
limit=None,
left=None,
orderby=None,
distinct=False,
getids=False,
list_id=None,
layout=None):
"""
Generate a data list of this resource
@param fields: list of fields to include (field selector strings)
@param start: index of the first record to include
@param limit: maximum number of records to include
@param left: additional left joins for DB query
@param orderby: orderby for DB query
@param distinct: distinct-flag for DB query
@param getids: return the record IDs of all records matching the
query (used in search to create a filter)
@param list_id: the list identifier
@param layout: custom renderer function (see S3DataList.render)
@return: tuple (S3DataList, numrows, ids), where numrows represents
the total number of rows in the table that match the query;
ids is empty unless getids=True
"""
# Choose fields
if fields is None:
fields = [f.name for f in self.readable_fields()]
selectors = list(fields)
# Automatically include the record ID
table = self.table
if table._id.name not in selectors:
fields.insert(0, table._id.name)
selectors.insert(0, table._id.name)
# Extract the data
data = self.select(selectors,
start=start,
limit=limit,
orderby=orderby,
left=left,
distinct=distinct,
count=True,
getids=getids,
raw_data=True,
represent=True)
# Generate the data list
numrows = data["numrows"]
dl = S3DataList(self,
fields,
data["rows"],
list_id=list_id,
start=start,
limit=limit,
total=numrows,
layout=layout)
return dl, numrows, data["ids"]
# -------------------------------------------------------------------------
def pivottable(self, rows, cols, layers, strict=True):
"""
Generate a pivot table of this resource.
@param rows: field selector for the rows dimension
@param cols: field selector for the columns dimension
@param layers: list of tuples (field selector, method) for
the aggregation layers
@param strict: filter out dimension values which don't match
the resource filter
@return: an S3PivotTable instance
Supported methods: see S3PivotTable
"""
return S3PivotTable(self, rows, cols, layers, strict=strict)
# -------------------------------------------------------------------------
def json(self,
fields=None,
start=0,
limit=None,
left=None,
distinct=False,
orderby=None):
"""
Export a JSON representation of the resource.
@param fields: list of field selector strings
@param start: index of the first record
@param limit: maximum number of records
@param left: list of (additional) left joins
@param distinct: select only distinct rows
@param orderby: Orderby-expression for the query
@return: the JSON (as string), representing a list of
dicts with {"tablename.fieldname":"value"}
"""
data = self.select(fields=fields,
start=start,
limit=limit,
orderby=orderby,
left=left,
distinct=distinct)["rows"]
return json.dumps(data)
# -------------------------------------------------------------------------
# Data Object API
# -------------------------------------------------------------------------
def load(self,
fields=None,
skip=None,
start=None,
limit=None,
orderby=None,
virtual=True,
cacheable=False):
"""
Loads records from the resource, applying the current filters,
and stores them in the instance.
@param fields: list of field names to include
@param skip: list of field names to skip
@param start: the index of the first record to load
@param limit: the maximum number of records to load
@param orderby: orderby-expression for the query
@param virtual: whether to load virtual fields or not
@param cacheable: don't define Row actions like update_record
or delete_record (faster, and the record can
be cached)
@return: the records as list of Rows
"""
table = self.table
tablename = self.tablename
UID = current.xml.UID
load_uids = hasattr(table, UID)
if not skip:
skip = tuple()
if fields or skip:
s3 = current.response.s3
if "all_meta_fields" in s3:
meta_fields = s3.all_meta_fields
else:
meta_fields = s3.all_meta_fields = s3_all_meta_field_names()
s3db = current.s3db
# Field selection
qfields = ([table._id.name, UID])
append = qfields.append
for f in table.fields:
if tablename == "gis_location" and \
((f == "the_geom") or (f == "wkt" and current.auth.permission.format != "cap")):
# Filter out bulky Polygons
continue
elif f in ("wkt", "the_geom") and tablename.startswith("gis_layer_shapefile_"):
# Filter out bulky Polygons
continue
if fields or skip:
# Must include all meta-fields
if f in meta_fields:
append(f)
continue
# Must include all super-keys
ktablename = s3_get_foreign_key(table[f], m2m=False)[0]
if ktablename:
ktable = s3db.table(ktablename)
if ktable and hasattr(ktable, "instance_type"):
append(f)
continue
if f in skip:
continue
if not fields or f in fields:
qfields.append(f)
fields = list(set(filter(lambda f: hasattr(table, f), qfields)))
if self._rows is not None:
self.clear()
rfilter = self.rfilter
multiple = rfilter.multiple if rfilter is not None else True
if not multiple and self.parent and self.parent.count() == 1:
start = 0
limit = 1
rows = self.select(fields,
start=start,
limit=limit,
orderby=orderby,
virtual=virtual,
as_rows=True)
ids = self._ids = []
new_id = ids.append
self._uids = []
new_uid = self._uids.append
self._rows = []
new_row = self._rows.append
if rows:
pkey = table._id.name
for row in rows:
if hasattr(row, tablename):
_row = ogetattr(row, tablename)
if type(_row) is Row:
row = _row
record_id = ogetattr(row, pkey)
if record_id not in ids:
new_id(record_id)
new_row(row)
if load_uids:
new_uid(ogetattr(row, UID))
self._length = len(self._rows)
return self._rows
# -------------------------------------------------------------------------
def clear(self):
""" Removes the records currently stored in this instance """
self._rows = None
self._rowindex = None
self._length = None
self._ids = None
self._uids = None
self.files = Storage()
if self.components:
for c in self.components:
self.components[c].clear()
# -------------------------------------------------------------------------
def records(self, fields=None):
"""
Get the current set as Rows instance
@param fields: the fields to include (list of Fields)
"""
if fields is None:
if self.tablename == "gis_location":
fields = [f for f in self.table
if f.name not in ("wkt", "the_geom")]
else:
fields = [f for f in self.table]
if self._rows is None:
return Rows(current.db)
else:
colnames = map(str, fields)
return Rows(current.db, self._rows, colnames=colnames)
# -------------------------------------------------------------------------
def __getitem__(self, key):
"""
Find a record currently stored in this instance by its record ID
@param key: the record ID
@return: a Row
@raises: IndexError if the record is not currently loaded
"""
index = self._rowindex
if index is None:
_id = self._id.name
rows = self._rows
if rows:
index = Storage([(str(row[_id]), row) for row in rows])
else:
index = Storage()
self._rowindex = index
key = str(key)
if key in index:
return index[key]
raise IndexError
# -------------------------------------------------------------------------
def __iter__(self):
"""
Iterate over the records currently stored in this instance
"""
if self._rows is None:
self.load()
rows = self._rows
for i in xrange(len(rows)):
yield rows[i]
return
# -------------------------------------------------------------------------
def get(self, key, component=None, link=None):
"""
Get component records for a record currently stored in this
instance.
@param key: the record ID
@param component: the name of the component
@param link: the name of the link table
@return: a Row (if component is None) or a list of rows
"""
if not key:
raise KeyError("Record not found")
if self._rows is None:
self.load()
try:
master = self[key]
except IndexError:
raise KeyError("Record not found")
if not component and not link:
return master
elif link:
if link in self.links:
c = self.links[link]
else:
raise AttributeError("Undefined link %s" % link)
else:
if component in self.components:
c = self.components[component]
else:
raise AttributeError("Undefined component %s" % component)
rows = c._rows
if rows is None:
rows = c.load()
if not rows:
return []
pkey, fkey = c.pkey, c.fkey
if pkey in master:
master_id = master[pkey]
if c.link:
lkey, rkey = c.lkey, c.rkey
lids = [r[rkey] for r in c.link if master_id == r[lkey]]
rows = [record for record in rows if record[fkey] in lids]
else:
try:
rows = [record for record in rows if master_id == record[fkey]]
except AttributeError:
# Most likely need to tweak static/formats/geoson/export.xsl
raise AttributeError("Component %s records are missing fkey %s" % (component, fkey))
else:
rows = []
return rows
# -------------------------------------------------------------------------
def get_id(self):
""" Get the IDs of all records currently stored in this instance """
if self._ids is None:
self.__load_ids()
if not self._ids:
return None
elif len(self._ids) == 1:
return self._ids[0]
else:
return self._ids
# -------------------------------------------------------------------------
def get_uid(self):
""" Get the UUIDs of all records currently stored in this instance """
if current.xml.UID not in self.table.fields:
return None
if self._ids is None:
self.__load_ids()
if not self._uids:
return None
elif len(self._uids) == 1:
return self._uids[0]
else:
return self._uids
# -------------------------------------------------------------------------
def __len__(self):
"""
The number of currently loaded rows
"""
if self._rows is not None:
return len(self._rows)
else:
return 0
# -------------------------------------------------------------------------
def __load_ids(self):
""" Loads the IDs/UIDs of all records matching the current filter """
table = self.table
UID = current.xml.UID
pkey = table._id.name
if UID in table.fields:
has_uid = True
fields = (pkey, UID)
else:
has_uid = False
fields = (pkey, )
rfilter = self.rfilter
multiple = rfilter.multiple if rfilter is not None else True
if not multiple and self.parent and self.parent.count() == 1:
start = 0
limit = 1
else:
start = limit = None
rows = self.select(fields,
start=start,
limit=limit)["rows"]
if rows:
ID = str(table._id)
self._ids = [row[ID] for row in rows]
if has_uid:
uid = str(table[UID])
self._uids = [row[uid] for row in rows]
else:
self._ids = []
return
# -------------------------------------------------------------------------
# Representation
# -------------------------------------------------------------------------
def __repr__(self):
"""
String representation of this resource
"""
pkey = self.table._id.name
if self._rows:
ids = [r[pkey] for r in self]
return "<S3Resource %s %s>" % (self.tablename, ids)
else:
return "<S3Resource %s>" % self.tablename
# -------------------------------------------------------------------------
def __contains__(self, item):
"""
Tests whether this resource contains a (real) field.
@param item: the field selector or Field instance
"""
fn = str(item)
if "." in fn:
tn, fn = fn.split(".", 1)
if tn == self.tablename:
item = fn
try:
rf = self.resolve_selector(str(item))
except (SyntaxError, AttributeError):
return 0
if rf.field is not None:
return 1
else:
return 0
# -------------------------------------------------------------------------
def __nonzero__(self):
"""
Boolean test of this resource
"""
return self is not None
# -------------------------------------------------------------------------
# XML Export
# -------------------------------------------------------------------------
def export_xml(self,
start=None,
limit=None,
msince=None,
fields=None,
dereference=True,
maxdepth=MAXDEPTH,
mcomponents=[],
rcomponents=None,
references=None,
stylesheet=None,
as_tree=False,
as_json=False,
maxbounds=False,
filters=None,
pretty_print=False,
location_data=None,
map_data=None,
**args):
"""
Export this resource as S3XML
@param start: index of the first record to export (slicing)
@param limit: maximum number of records to export (slicing)
@param msince: export only records which have been modified
after this datetime
@param fields: data fields to include (default: all)
@param dereference: include referenced resources
@param maxdepth:
@param mcomponents: components of the master resource to
include (list of tablenames), empty list
for all
@param rcomponents: components of referenced resources to
include (list of tablenames), empty list
for all
@param references: foreign keys to include (default: all)
@param stylesheet: path to the XSLT stylesheet (if required)
@param as_tree: return the ElementTree (do not convert into string)
@param as_json: represent the XML tree as JSON
@param maxbounds: include lat/lon boundaries in the top
level element (off by default)
@param filters: additional URL filters (Sync), as dict
{tablename: {url_var: string}}
@param pretty_print: insert newlines/indentation in the output
@param location_data: dictionary of location data which has been
looked-up in bulk ready for xml.gis_encode()
@param map_data: dictionary of options which can be read by the map
@param args: dict of arguments to pass to the XSLT stylesheet
"""
xml = current.xml
output = None
args = Storage(args)
xmlformat = S3XMLFormat(stylesheet) if stylesheet else None
# Export as element tree
#if DEBUG:
#_start = datetime.datetime.now()
#tablename = self.tablename
#_debug("export_tree of %s starting" % tablename)
tree = self.export_tree(start=start,
limit=limit,
msince=msince,
fields=fields,
dereference=dereference,
maxdepth=maxdepth,
mcomponents=mcomponents,
rcomponents=rcomponents,
references=references,
filters=filters,
maxbounds=maxbounds,
xmlformat=xmlformat,
location_data=location_data,
map_data=map_data)
#if DEBUG:
#end = datetime.datetime.now()
#duration = end - _start
#duration = '{:.2f}'.format(duration.total_seconds())
#_debug("export_tree of %s completed in %s seconds" % \
#(tablename, duration))
# XSLT transformation
if tree and xmlformat is not None:
#if DEBUG:
# _start = datetime.datetime.now()
import uuid
tfmt = xml.ISOFORMAT
args.update(domain=xml.domain,
base_url=current.response.s3.base_url,
prefix=self.prefix,
name=self.name,
utcnow=datetime.datetime.utcnow().strftime(tfmt),
msguid=uuid.uuid4().urn)
tree = xmlformat.transform(tree, **args)
#if DEBUG:
#end = datetime.datetime.now()
#duration = end - _start
#duration = '{:.2f}'.format(duration.total_seconds())
#_debug("transform of %s using %s completed in %s seconds" % \
#(tablename, stylesheet, duration))
# Convert into the requested format
# (Content Headers are set by the calling function)
if tree:
if as_tree:
output = tree
elif as_json:
#if DEBUG:
#_start = datetime.datetime.now()
output = xml.tree2json(tree, pretty_print=pretty_print)
#if DEBUG:
#end = datetime.datetime.now()
#duration = end - _start
#duration = '{:.2f}'.format(duration.total_seconds())
#_debug("tree2json of %s completed in %s seconds" % \
#(tablename, duration))
else:
output = xml.tostring(tree, pretty_print=pretty_print)
return output
# -------------------------------------------------------------------------
def export_tree(self,
start=0,
limit=None,
msince=None,
fields=None,
references=None,
dereference=True,
maxdepth=MAXDEPTH,
mcomponents=None,
rcomponents=None,
filters=None,
maxbounds=False,
xmlformat=None,
location_data=None,
map_data=None,
):
"""
Export the resource as element tree
@param start: index of the first record to export
@param limit: maximum number of records to export
@param msince: minimum modification date of the records
@param fields: data fields to include (default: all)
@param references: foreign keys to include (default: all)
@param dereference: also export referenced records
@param maxdepth:
@param mcomponents: components of the master resource to
include (list of tablenames), empty list
for all
@param rcomponents: components of referenced resources to
include (list of tablenames), empty list
for all
@param filters: additional URL filters (Sync), as dict
{tablename: {url_var: string}}
@param maxbounds: include lat/lon boundaries in the top
level element (off by default)
@param xmlformat:
@param location_data: dictionary of location data which has been
looked-up in bulk ready for xml.gis_encode()
@param map_data: dictionary of options which can be read by the map
"""
xml = current.xml
if xml.show_urls:
base_url = current.response.s3.base_url
else:
base_url = None
# Split reference/data fields
(rfields, dfields) = self.split_fields(data=fields,
references=references)
# Filter for MCI >= 0 (setting)
table = self.table
if xml.filter_mci and "mci" in table.fields:
mci_filter = (table.mci >= 0)
self.add_filter(mci_filter)
# Sync filters
tablename = self.tablename
if filters and tablename in filters:
queries = S3URLQuery.parse(self, filters[tablename])
[self.add_filter(q) for a in queries for q in queries[a]]
# Initialize export metadata
self.muntil = None
self.results = 0
# Load slice
if msince is not None and "modified_on" in table.fields:
orderby = "%s ASC" % table["modified_on"]
else:
orderby = None
# Fields to load
if xmlformat:
include, exclude = xmlformat.get_fields(self.tablename)
else:
include, exclude = None, None
self.load(fields=include,
skip=exclude,
start=start,
limit=limit,
orderby=orderby,
virtual=False,
cacheable=True)
# Total number of results
results = self.count()
if not location_data:
format = current.auth.permission.format
if format == "geojson":
if results > current.deployment_settings.get_gis_max_features():
headers = {"Content-Type": "application/json"}
message = "Too Many Records"
status = 509
raise HTTP(status,
body=xml.json_message(success=False,
statuscode=status,
message=message),
web2py_error=message,
**headers)
# Lookups per layer not per record
if tablename == "gis_layer_shapefile":
# GIS Shapefile Layer
location_data = current.gis.get_shapefile_geojson(self) or {}
elif tablename == "gis_theme_data":
# GIS Theme Layer
location_data = current.gis.get_theme_geojson(self) or {}
else:
# e.g. GIS Feature Layer
# e.g. Search results
location_data = current.gis.get_location_data(self) or {}
elif format in ("georss", "kml", "gpx"):
location_data = current.gis.get_location_data(self) or {}
else:
# @ToDo: Bulk lookup of LatLons for S3XML.latlon()
location_data = {}
# Build the tree
#if DEBUG:
# _start = datetime.datetime.now()
root = etree.Element(xml.TAG.root)
if map_data:
# Gets loaded before re-dumping, so no need to compact or avoid double-encoding
# NB Ensure we don't double-encode unicode!
#root.set("map", json.dumps(map_data, separators=SEPARATORS,
# ensure_ascii=False))
root.set("map", json.dumps(map_data))
export_map = Storage()
all_references = []
prefix = self.prefix
name = self.name
if base_url:
url = "%s/%s/%s" % (base_url, prefix, name)
else:
url = "/%s/%s" % (prefix, name)
# Use lazy representations
lazy = []
current.auth_user_represent = S3Represent(lookup="auth_user",
fields=["email"])
export_resource = self.__export_resource
# Collect all references from master records
reference_map = []
for record in self._rows:
element = export_resource(record,
rfields=rfields,
dfields=dfields,
parent=root,
base_url=url,
reference_map=reference_map,
export_map=export_map,
lazy=lazy,
components=mcomponents,
filters=filters,
msince=msince,
location_data=location_data,
xmlformat=xmlformat)
if element is None:
results -= 1
if reference_map:
all_references.extend(reference_map)
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - _start
# duration = '{:.2f}'.format(duration.total_seconds())
# _debug("export_resource of primary resource and components completed in %s seconds" % \
# duration)
# Add referenced resources to the tree
#if DEBUG:
# _start = datetime.datetime.now()
define_resource = current.s3db.resource
# Iteratively resolve all references
depth = maxdepth if dereference else 0
while reference_map and depth:
depth -= 1
load_map = dict()
get_exported = export_map.get
for ref in reference_map:
if "table" in ref and "id" in ref:
# Get tablename and IDs
tname = ref["table"]
ids = ref["id"]
if not isinstance(ids, list):
ids = [ids]
# Exclude records which are already in the tree
exported = get_exported(tname, [])
ids = [x for x in ids if x not in exported]
if not ids:
continue
# Append the new ids to load_map[tname]
if tname in load_map:
ids = [x for x in ids if x not in load_map[tname]]
load_map[tname] += ids
else:
load_map[tname] = ids
# Collect all references from the referenced records
reference_map = []
REF = xml.ATTRIBUTE.ref
for tablename in load_map:
load_list = load_map[tablename]
# Sync filters
if filters:
filter_vars = filters.get(tablename, None)
else:
filter_vars = None
prefix, name = tablename.split("_", 1)
rresource = define_resource(tablename,
id=load_list,
components=[],
vars=filter_vars)
table = rresource.table
if base_url:
url = "%s/%s/%s" % (base_url, prefix, name)
else:
url = "/%s/%s" % (prefix, name)
rfields, dfields = rresource.split_fields(data=fields,
references=references)
# Fields to load
if xmlformat:
include, exclude = xmlformat.get_fields(rresource.tablename)
else:
include, exclude = None, None
rresource.load(fields=include,
skip=exclude,
limit=None,
virtual=False,
cacheable=True)
export_resource = rresource.__export_resource
for record in rresource:
element = export_resource(record,
rfields=rfields,
dfields=dfields,
parent=root,
base_url=url,
reference_map=reference_map,
export_map=export_map,
components=rcomponents,
lazy=lazy,
filters=filters,
master=False,
location_data=location_data,
xmlformat=xmlformat)
# Mark as referenced element (for XSLT)
if element is not None:
element.set(REF, "True")
if reference_map:
all_references.extend(reference_map)
#if DEBUG:
# end = datetime.datetime.now()
# duration = end - _start
# duration = '{:.2f}'.format(duration.total_seconds())
# _debug("export_resource of referenced resources and their components completed in %s seconds" % \
# duration)
# Render all pending lazy representations
if lazy:
for renderer, element, attr, f in lazy:
renderer.render_node(element, attr, f)
# Add Lat/Lon attributes to all location references
if all_references:
xml.latlon(all_references)
# Complete the tree
tree = xml.tree(None,
root=root,
domain=xml.domain,
url=base_url,
results=results,
start=start,
limit=limit,
maxbounds=maxbounds)
# Store number of results
self.results = results
return tree
# -------------------------------------------------------------------------
def __export_resource(self,
record,
rfields=[],
dfields=[],
parent=None,
base_url=None,
reference_map=None,
export_map=None,
lazy=None,
components=None,
filters=None,
msince=None,
master=True,
location_data=None,
xmlformat=None):
"""
Add a <resource> to the element tree
@param record: the record
@param rfields: list of reference fields to export
@param dfields: list of data fields to export
@param parent: the parent element
@param base_url: the base URL of the resource
@param reference_map: the reference map of the request
@param export_map: the export map of the request
@param lazy:
@param components: list of components to include from referenced
resources (tablenames)
@param filters: additional URL filters (Sync), as dict
{tablename: {url_var: string}}
@param msince: the minimum update datetime for exported records
@param master: True of this is the master resource
@param location_data: the location_data for GIS encoding
@param xmlformat:
"""
xml = current.xml
pkey = self.table._id
# Construct the record URL
if base_url:
record_url = "%s/%s" % (base_url, record[pkey])
else:
record_url = None
# Export the record
add = False
export = self._export_record
element, rmap = export(record,
rfields=rfields,
dfields=dfields,
parent=parent,
export_map=export_map,
lazy=lazy,
url=record_url,
msince=msince,
master=master,
location_data=location_data)
if element is not None:
add = True
# Export components
if components is not None:
resource_components = self.components.values()
unfiltered = [c for c in resource_components if c.filter is None]
for component in resource_components:
ctablename = component.tablename
# Shall this component be included?
if components and ctablename not in components:
continue
# We skip a filtered component if an unfiltered
# component of the same table is available:
if component.filter is not None and ctablename in unfiltered:
continue
cpkey = component.table._id
if component.link is not None:
c = component.link
calias = None
lalias = c.alias
else:
c = component
calias = c.alias
lalias = None
# Before loading the component: add filters
if c._rows is None:
# MCI filter
ctable = c.table
if xml.filter_mci and xml.MCI in ctable.fields:
mci_filter = FS(xml.MCI) >= 0
c.add_filter(mci_filter)
# Sync filters
ctablename = c.tablename
if filters and ctablename in filters:
queries = S3URLQuery.parse(self, filters[ctablename])
[c.add_filter(q) for a in queries for q in queries[a]]
# Fields to load
if xmlformat:
include, exclude = xmlformat.get_fields(c.tablename)
else:
include, exclude = None, None
# Load the records
c.load(fields=include,
skip=exclude,
limit=None,
virtual=False,
cacheable=True)
# Split fields
crfields, cdfields = c.split_fields(skip=[c.fkey])
# Construct the component base URL
if record_url:
component_url = "%s/%s" % (record_url, c.alias)
else:
component_url = None
# Find related records
crecords = self.get(record[pkey],
component = calias,
link = lalias,
)
# @todo: load() should limit this automatically:
if not c.multiple and len(crecords):
crecords = [crecords[0]]
# Export records
export = c._export_record
map_record = c.__map_record
for crecord in crecords:
# Construct the component record URL
if component_url:
crecord_url = "%s/%s" % (component_url, crecord[cpkey])
else:
crecord_url = None
# Export the component record
celement, crmap = export(crecord,
rfields=crfields,
dfields=cdfields,
parent=element,
export_map=export_map,
lazy=lazy,
url=crecord_url,
msince=msince,
master=False,
location_data=location_data)
if celement is not None:
add = True # keep the parent record
# Update "modified until" from component
if not self.muntil or \
c.muntil and c.muntil > self.muntil:
self.muntil = c.muntil
map_record(crecord, crmap,
reference_map, export_map)
# Update reference_map and export_map
if add:
self.__map_record(record, rmap, reference_map, export_map)
elif parent is not None and element is not None:
idx = parent.index(element)
if idx:
del parent[idx]
return None
return element
# -------------------------------------------------------------------------
def _export_record(self,
record,
rfields=[],
dfields=[],
parent=None,
export_map=None,
lazy=None,
url=None,
msince=None,
master=True,
location_data=None):
"""
Exports a single record to the element tree.
@param record: the record
@param rfields: list of foreign key fields to export
@param dfields: list of data fields to export
@param parent: the parent element
@param export_map: the export map of the current request
@param url: URL of the record
@param msince: minimum last update time
@param master: True if this is a record in the master resource
@param location_data: the location_data for GIS encoding
"""
xml = current.xml
tablename = self.tablename
table = self.table
# Replace user ID representation by lazy method
auth_user_represent = Storage()
if hasattr(current, "auth_user_represent"):
user_ids = ("created_by", "modified_by", "owned_by_user")
for fn in user_ids:
if hasattr(table, fn):
f = ogetattr(table, fn)
auth_user_represent[fn] = f.represent
f.represent = current.auth_user_represent
default = (None, None)
# Do not export the record if it already is in the export map
if tablename in export_map and record[table._id] in export_map[tablename]:
return default
# Do not export the record if it hasn't been modified since msince
# NB This can't be moved to tree level as we do want to export records
# which have modified components
MTIME = xml.MTIME
if MTIME in record:
if msince is not None and record[MTIME] <= msince:
return default
if not self.muntil or record[MTIME] > self.muntil:
self.muntil = record[MTIME]
# Audit read
current.audit("read", self.prefix, self.name,
record=record[table._id], representation="xml")
# Reference map for this record
rmap = xml.rmap(table, record, rfields)
# Use alias if distinct from resource name
linked = self.linked
if self.parent is not None and linked is not None:
alias = linked.alias
name = linked.name
else:
alias = self.alias
name = self.name
if alias == name:
alias = None
postprocess = self.get_config("xml_post_render")
# Generate the element
element = xml.resource(parent, table, record,
fields=dfields,
alias=alias,
lazy=lazy,
url=url,
postprocess=postprocess)
# Add the references
xml.add_references(element, rmap,
show_ids=current.xml.show_ids, lazy=lazy)
if master:
# GIS-encode the element
# @ToDo: Do this 1/tree not 1/record
xml.gis_encode(self, record, element, location_data=location_data)
# Restore normal user_id representations
for fn in auth_user_represent:
ogetattr(table, fn).represent = auth_user_represent[fn]
return (element, rmap)
# -------------------------------------------------------------------------
def __map_record(self, record, rmap, reference_map, export_map):
"""
Add the record to the export map, and update the
reference map with the record's references
@param record: the record
@param rmap: the reference map of the record
@param reference_map: the reference map of the request
@param export_map: the export map of the request
"""
tablename = self.tablename
record_id = record[self.table._id]
if rmap:
reference_map.extend(rmap)
if tablename in export_map:
export_map[tablename].append(record_id)
else:
export_map[tablename] = [record_id]
return
# -------------------------------------------------------------------------
# XML Import
# -------------------------------------------------------------------------
def import_xml(self, source,
files=None,
id=None,
format="xml",
stylesheet=None,
extra_data=None,
ignore_errors=False,
job_id=None,
commit_job=True,
delete_job=False,
strategy=None,
update_policy=None,
conflict_policy=None,
last_sync=None,
onconflict=None,
**args):
"""
XML Importer
@param source: the data source, accepts source=xxx, source=[xxx, yyy, zzz] or
source=[(resourcename1, xxx), (resourcename2, yyy)], where the
xxx has to be either an ElementTree or a file-like object
@param files: attached files (None to read in the HTTP request)
@param id: ID (or list of IDs) of the record(s) to update (performs only update)
@param format: type of source = "xml", "json" or "csv"
@param stylesheet: stylesheet to use for transformation
@param extra_data: for CSV imports, dict of extra cols to add to each row
@param ignore_errors: skip invalid records silently
@param job_id: resume from previous import job_id
@param commit_job: commit the job to the database
@param delete_job: delete the import job from the queue
@param strategy: tuple of allowed import methods (create/update/delete)
@param update_policy: policy for updates (sync)
@param conflict_policy: policy for conflict resolution (sync)
@param last_sync: last synchronization datetime (sync)
@param onconflict: callback hook for conflict resolution (sync)
@param args: parameters to pass to the transformation stylesheet
"""
# Check permission for the resource
has_permission = current.auth.s3_has_permission
authorised = has_permission("create", self.table) and \
has_permission("update", self.table)
if not authorised:
raise IOError("Insufficient permissions")
xml = current.xml
tree = None
self.job = None
if not job_id:
# Resource data
prefix = self.prefix
name = self.name
# Additional stylesheet parameters
tfmt = xml.ISOFORMAT
utcnow = datetime.datetime.utcnow().strftime(tfmt)
domain = xml.domain
base_url = current.response.s3.base_url
args.update(domain=domain,
base_url=base_url,
prefix=prefix,
name=name,
utcnow=utcnow)
# Build import tree
if not isinstance(source, (list, tuple)):
source = [source]
for item in source:
if isinstance(item, (list, tuple)):
resourcename, s = item[:2]
else:
resourcename, s = None, item
if isinstance(s, etree._ElementTree):
t = s
elif format == "json":
if isinstance(s, basestring):
source = StringIO(s)
t = xml.json2tree(s)
else:
t = xml.json2tree(s)
elif format == "csv":
t = xml.csv2tree(s,
resourcename=resourcename,
extra_data=extra_data)
elif format == "xls":
t = xml.xls2tree(s,
resourcename=resourcename,
extra_data=extra_data)
else:
t = xml.parse(s)
if not t:
if xml.error:
raise SyntaxError(xml.error)
else:
raise SyntaxError("Invalid source")
if stylesheet is not None:
t = xml.transform(t, stylesheet, **args)
_debug(t)
if not t:
raise SyntaxError(xml.error)
if not tree:
tree = t.getroot()
else:
tree.extend(list(t.getroot()))
if files is not None and isinstance(files, dict):
self.files = Storage(files)
else:
# job ID given
pass
response = current.response
# Flag to let onvalidation/onaccept know this is coming from a Bulk Import
response.s3.bulk = True
success = self.import_tree(id, tree,
ignore_errors=ignore_errors,
job_id=job_id,
commit_job=commit_job,
delete_job=delete_job,
strategy=strategy,
update_policy=update_policy,
conflict_policy=conflict_policy,
last_sync=last_sync,
onconflict=onconflict)
response.s3.bulk = False
self.files = Storage()
# Response message
if format == "json":
# Whilst all Responses are JSON, it's easier to debug by having the
# response appear in the browser than launching a text editor
response.headers["Content-Type"] = "application/json"
if self.error_tree is not None:
tree = xml.tree2json(self.error_tree)
else:
tree = None
import_info = {"records":self.import_count}
created = self.import_created
if created:
import_info["created"] = created
updated = self.import_updated
if updated:
import_info["updated"] = updated
deleted = self.import_deleted
if deleted:
import_info["deleted"] = deleted
if success is True:
return xml.json_message(message=self.error, tree=tree,
**import_info)
elif success and hasattr(success, "job_id"):
self.job = success
return xml.json_message(message=self.error, tree=tree,
**import_info)
else:
return xml.json_message(False, 400,
message=self.error, tree=tree)
# -------------------------------------------------------------------------
def import_tree(self, id, tree,
job_id=None,
ignore_errors=False,
delete_job=False,
commit_job=True,
strategy=None,
update_policy=None,
conflict_policy=None,
last_sync=None,
onconflict=None):
"""
Import data from an S3XML element tree.
@param id: record ID or list of record IDs to update
@param tree: the element tree
@param ignore_errors: continue at errors (=skip invalid elements)
@param job_id: restore a job from the job table (ID or UID)
@param delete_job: delete the import job from the job table
@param commit_job: commit the job (default)
@todo: update for link table support
"""
from s3import import S3ImportJob
db = current.db
xml = current.xml
auth = current.auth
tablename = self.tablename
table = self.table
if job_id is not None:
# Restore a job from the job table
self.error = None
self.error_tree = None
try:
import_job = S3ImportJob(table,
job_id=job_id,
strategy=strategy,
update_policy=update_policy,
conflict_policy=conflict_policy,
last_sync=last_sync,
onconflict=onconflict)
except:
self.error = current.ERROR.BAD_SOURCE
return False
# Delete the job?
if delete_job:
import_job.delete()
return True
# Load all items
job_id = import_job.job_id
item_table = import_job.item_table
items = db(item_table.job_id == job_id).select()
load_item = import_job.load_item
for item in items:
success = load_item(item)
if not success:
self.error = import_job.error
self.error_tree = import_job.error_tree
import_job.restore_references()
# this is only relevant for commit_job=True
if commit_job:
if self.error and not ignore_errors:
return False
else:
return import_job
# Call the import pre-processor to prepare tables
# and cleanup the tree as necessary
import_prep = current.response.s3.import_prep
if import_prep:
tree = import_job.get_tree()
callback(import_prep,
# takes tuple (resource, tree) as argument
(self, tree),
tablename=tablename)
# Skip import?
if self.skip_import:
_debug("Skipping import to %s" % self.tablename)
self.skip_import = False
return True
else:
# Create a new job from an element tree
# Do not import into tables without "id" field
if "id" not in table.fields:
self.error = current.ERROR.BAD_RESOURCE
return False
# Reset error and error tree
self.error = None
self.error_tree = None
# Call the import pre-processor to prepare tables
# and cleanup the tree as necessary
import_prep = current.response.s3.import_prep
if import_prep:
if not isinstance(tree, etree._ElementTree):
tree = etree.ElementTree(tree)
callback(import_prep,
# takes tuple (resource, tree) as argument
(self, tree),
tablename=tablename)
# Skip import?
if self.skip_import:
_debug("Skipping import to %s" % self.tablename)
self.skip_import = False
return True
# Select the elements for this table
elements = xml.select_resources(tree, tablename)
if not elements:
# nothing to import => still ok
return True
# Find matching elements, if a target record ID is given
UID = xml.UID
if id and UID in table:
if not isinstance(id, (tuple, list)):
query = (table._id == id)
else:
query = (table._id.belongs(id))
originals = db(query).select(table[UID])
uids = [row[UID] for row in originals]
matches = []
import_uid = xml.import_uid
append = matches.append
for element in elements:
element_uid = import_uid(element.get(UID, None))
if not element_uid:
continue
if element_uid in uids:
append(element)
if not matches:
first = elements[0]
if len(elements) and not first.get(UID, None):
first.set(UID, uids[0])
matches = [first]
if not matches:
self.error = current.ERROR.NO_MATCH
return False
else:
elements = matches
# Import all matching elements
import_job = S3ImportJob(table,
tree=tree,
files=self.files,
strategy=strategy,
update_policy=update_policy,
conflict_policy=conflict_policy,
last_sync=last_sync,
onconflict=onconflict)
add_item = import_job.add_item
for element in elements:
success = add_item(element=element,
components=self.components)
if not success:
self.error = import_job.error
self.error_tree = import_job.error_tree
if self.error and not ignore_errors:
return False
# Commit the import job
auth.rollback = not commit_job
success = import_job.commit(ignore_errors=ignore_errors,
log_items = self.get_config("oncommit_import_item"))
auth.rollback = False
self.error = import_job.error
self.import_count += import_job.count
self.import_created += import_job.created
self.import_updated += import_job.updated
self.import_deleted += import_job.deleted
job_mtime = import_job.mtime
if self.mtime is None or \
job_mtime and job_mtime > self.mtime:
self.mtime = job_mtime
if self.error:
if ignore_errors:
self.error = "%s - invalid items ignored" % self.error
self.error_tree = import_job.error_tree
elif not success:
# Oops - how could this happen? We can have an error
# without failure, but not a failure without error!
# If we ever get here, then there's a bug without a
# chance to recover - hence let it crash:
raise RuntimeError("Import failed without error message")
if not success or not commit_job:
db.rollback()
if not commit_job:
import_job.store()
return import_job
else:
# Remove the job when committed
if job_id is not None:
import_job.delete()
return self.error is None or ignore_errors
# -------------------------------------------------------------------------
# XML introspection
# -------------------------------------------------------------------------
def export_options(self,
component=None,
fields=None,
only_last=False,
show_uids=False,
hierarchy=False,
as_json=False):
"""
Export field options of this resource as element tree
@param component: name of the component which the options are
requested of, None for the primary table
@param fields: list of names of fields for which the options
are requested, None for all fields (which have
options)
@param as_json: convert the output into JSON
@param only_last: obtain only the latest record
"""
if component is not None:
c = self.components.get(component)
if c:
tree = c.export_options(fields=fields,
only_last=only_last,
show_uids=show_uids,
hierarchy=hierarchy,
as_json=as_json)
return tree
else:
# If we get here, we've been called from the back-end,
# otherwise the request would have failed during parse.
# So it's safe to raise an exception:
raise AttributeError
else:
if as_json and only_last and len(fields) == 1:
# Identify the field
default = {"option":[]}
try:
field = self.table[fields[0]]
except AttributeError:
# Can't raise an exception here as this goes
# directly to the client
return json.dumps(default)
# Check that the validator has a lookup table
requires = field.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
requires = requires[0]
if isinstance(requires, IS_EMPTY_OR):
requires = requires.other
from s3validators import IS_LOCATION
if not isinstance(requires, (IS_ONE_OF, IS_LOCATION)):
# Can't raise an exception here as this goes
# directly to the client
return json.dumps(default)
# Identify the lookup table
db = current.db
lookuptable = requires.ktable
lookupfield = db[lookuptable][requires.kfield]
# Fields to extract
fields = [lookupfield]
h = None
if hierarchy:
from s3hierarchy import S3Hierarchy
h = S3Hierarchy(lookuptable)
if not h.config:
h = None
elif h.pkey.name != lookupfield.name:
# Also extract the node key for the hierarchy
fields.append(h.pkey)
# Get the latest record
# NB: this assumes that the lookupfield is auto-incremented
row = db().select(orderby=~lookupfield,
limitby=(0, 1),
*fields).first()
# Represent the value and generate the output JSON
if row:
value = row[lookupfield]
widget = field.widget
if hasattr(widget, "represent") and widget.represent:
# Prefer the widget's represent as options.json
# is usually called to Ajax-update the widget
represent = widget.represent(value)
elif field.represent:
represent = field.represent(value)
else:
represent = s3_unicode(value)
if isinstance(represent, A):
represent = represent.components[0]
item = {"@value": value, "$": represent}
if h:
parent = h.parent(row[h.pkey])
if parent:
item["@parent"] = str(parent)
result = [item]
else:
result = []
return json.dumps({'option': result})
xml = current.xml
tree = xml.get_options(self.table,
fields=fields,
show_uids=show_uids,
hierarchy=hierarchy)
if as_json:
return xml.tree2json(tree, pretty_print=False,
native=True)
else:
return xml.tostring(tree, pretty_print=False)
# -------------------------------------------------------------------------
def export_fields(self, component=None, as_json=False):
"""
Export a list of fields in the resource as element tree
@param component: name of the component to lookup the fields
(None for primary table)
@param as_json: convert the output XML into JSON
"""
if component is not None:
c = self.components.get(component, None)
if c:
tree = c.export_fields()
return tree
else:
raise AttributeError
else:
xml = current.xml
tree = xml.get_fields(self.prefix, self.name)
if as_json:
return xml.tree2json(tree, pretty_print=True)
else:
return xml.tostring(tree, pretty_print=True)
# -------------------------------------------------------------------------
def export_struct(self,
meta=False,
options=False,
references=False,
stylesheet=None,
as_json=False,
as_tree=False):
"""
Get the structure of the resource
@param options: include option lists in option fields
@param references: include option lists even for reference fields
@param stylesheet: the stylesheet to use for transformation
@param as_json: convert into JSON after transformation
"""
xml = current.xml
# Get the structure of the main resource
root = etree.Element(xml.TAG.root)
main = xml.get_struct(self.prefix, self.name,
alias=self.alias,
parent=root,
meta=meta,
options=options,
references=references)
# Include the selected components
for component in self.components.values():
prefix = component.prefix
name = component.name
xml.get_struct(prefix, name,
alias = component.alias,
parent = main,
meta = meta,
options = options,
references = references)
# Transformation
tree = etree.ElementTree(root)
if stylesheet is not None:
tfmt = xml.ISOFORMAT
args = dict(domain=xml.domain,
base_url=current.response.s3.base_url,
prefix=self.prefix,
name=self.name,
utcnow=datetime.datetime.utcnow().strftime(tfmt))
tree = xml.transform(tree, stylesheet, **args)
if tree is None:
return None
# Return tree if requested
if as_tree:
return tree
# Otherwise string-ify it
if as_json:
return xml.tree2json(tree, pretty_print=True)
else:
return xml.tostring(tree, pretty_print=True)
# -------------------------------------------------------------------------
# Data Model Helpers
# -------------------------------------------------------------------------
@classmethod
def original(cls, table, record, mandatory=None):
"""
Find the original record for a possible duplicate:
- if the record contains a UUID, then only that UUID is used
to match the record with an existing DB record
- otherwise, if the record contains some values for unique
fields, all of them must match the same existing DB record
@param table: the table
@param record: the record as dict or S3XML Element
"""
db = current.db
xml = current.xml
xml_decode = xml.xml_decode
VALUE = xml.ATTRIBUTE["value"]
UID = xml.UID
ATTRIBUTES_TO_FIELDS = xml.ATTRIBUTES_TO_FIELDS
# Get primary keys
pkeys = [f for f in table.fields if table[f].unique]
pvalues = Storage()
# Get the values from record
get = record.get
if type(record) is etree._Element: #isinstance(record, etree._Element):
xpath = record.xpath
xexpr = "%s[@%s='%%s']" % (xml.TAG["data"],
xml.ATTRIBUTE["field"])
for f in pkeys:
v = None
if f == UID or f in ATTRIBUTES_TO_FIELDS:
v = get(f, None)
else:
child = xpath(xexpr % f)
if child:
child = child[0]
v = child.get(VALUE, xml_decode(child.text))
if v:
pvalues[f] = v
elif isinstance(record, dict):
for f in pkeys:
v = get(f, None)
if v:
pvalues[f] = v
else:
raise TypeError
# Build match query
query = None
for f in pvalues:
if f == UID:
continue
_query = (table[f] == pvalues[f])
if query is not None:
query = query | _query
else:
query = _query
fields = cls.import_fields(table, pvalues, mandatory=mandatory)
# Try to find exactly one match by non-UID unique keys
if query is not None:
original = db(query).select(limitby=(0, 2), *fields)
if len(original) == 1:
return original.first()
# If no match, then try to find a UID-match
if UID in pvalues:
uid = xml.import_uid(pvalues[UID])
query = (table[UID] == uid)
original = db(query).select(limitby=(0, 1), *fields).first()
if original:
return original
# No match or multiple matches
return None
# -------------------------------------------------------------------------
@staticmethod
def import_fields(table, data, mandatory=None):
fnames = set(s3_all_meta_field_names())
fnames.add(table._id.name)
if mandatory:
fnames |= set(mandatory)
for fn in data:
fnames.add(fn)
return [table[fn] for fn in fnames if fn in table.fields]
# -------------------------------------------------------------------------
def readable_fields(self, subset=None):
"""
Get a list of all readable fields in the resource table
@param subset: list of fieldnames to limit the selection to
"""
fkey = None
table = self.table
if self.parent and self.linked is None:
component = self.parent.components.get(self.alias, None)
if component:
fkey = component.fkey
elif self.linked is not None:
component = self.linked
if component:
fkey = component.lkey
if subset:
return [ogetattr(table, f) for f in subset
if f in table.fields and \
ogetattr(table, f).readable and f != fkey]
else:
return [ogetattr(table, f) for f in table.fields
if ogetattr(table, f).readable and f != fkey]
# -------------------------------------------------------------------------
def resolve_selectors(self, selectors,
skip_components=False,
extra_fields=True,
show=True):
"""
Resolve a list of field selectors against this resource
@param selectors: the field selectors
@param skip_components: skip fields in components
@param extra_fields: automatically add extra_fields of all virtual
fields in this table
@param show: default for S3ResourceField.show
@return: tuple of (fields, joins, left, distinct)
"""
prefix = lambda s: "~.%s" % s \
if "." not in s.split("$", 1)[0] else s
# Store field selectors
display_fields = []
append = display_fields.append
for _s in selectors:
if isinstance(_s, tuple):
s = _s[-1]
else:
s = _s
if isinstance(s, S3ResourceField):
selector = s.selector
elif isinstance(s, FS):
selector = s.name
else:
selector = s
append(prefix(selector))
slist = list(selectors)
# Collect extra fields from virtual tables
if extra_fields:
append = slist.append
extra = self.get_config("extra_fields", [])
for selector in extra:
s = prefix(selector)
if s not in display_fields:
append(s)
joins = {}
left = {}
distinct = False
rfields = []
columns = []
append = rfields.append
for s in slist:
# Allow to override the field label
if isinstance(s, tuple):
label, selector = s
else:
label, selector = None, s
# Resolve the selector
if isinstance(selector, str):
selector = prefix(selector)
try:
rfield = S3ResourceField(self, selector, label=label)
except (AttributeError, SyntaxError):
continue
elif isinstance(selector, FS):
try:
rfield = selector.resolve(self)
except (AttributeError, SyntaxError):
continue
elif isinstance(selector, S3ResourceField):
rfield = selector
else:
continue
# Unresolvable selector?
if rfield.field is None and not rfield.virtual:
continue
# Replace default label
if label is not None:
rfield.label = label
# Skip components
if skip_components:
head = rfield.selector.split("$", 1)[0]
if "." in head and head.split(".")[0] not in ("~", self.alias):
continue
# De-duplicate columns
if rfield.colname in columns:
continue
else:
columns.append(rfield.colname)
# Resolve the joins
if rfield.distinct:
left.update(rfield._joins)
distinct = True
elif rfield.join:
joins.update(rfield._joins)
rfield.show = show and rfield.selector in display_fields
append(rfield)
return (rfields, joins, left, distinct)
# -------------------------------------------------------------------------
def resolve_selector(self, selector):
"""
Wrapper for S3ResourceField, retained for backward compatibility
"""
return S3ResourceField(self, selector)
# -------------------------------------------------------------------------
def split_fields(self, skip=[], data=None, references=None):
"""
Split the readable fields in the resource table into
reference and non-reference fields.
@param skip: list of field names to skip
@param data: data fields to include (None for all)
@param references: foreign key fields to include (None for all)
"""
rfields = self.rfields
dfields = self.dfields
if rfields is None or dfields is None:
if self.tablename == "gis_location":
if "wkt" not in skip and current.auth.permission.format != "cap":
# Skip bulky WKT fields
skip.append("wkt")
if current.deployment_settings.get_gis_spatialdb() and \
"the_geom" not in skip:
skip.append("the_geom")
xml = current.xml
UID = xml.UID
IGNORE_FIELDS = xml.IGNORE_FIELDS
FIELDS_TO_ATTRIBUTES = xml.FIELDS_TO_ATTRIBUTES
show_ids = current.xml.show_ids
rfields = []
dfields = []
table = self.table
pkey = table._id.name
for f in table.fields:
if f == UID or \
f in skip or \
f in IGNORE_FIELDS:
if f != pkey or not show_ids:
continue
if s3_has_foreign_key(table[f]) and \
f not in FIELDS_TO_ATTRIBUTES and \
(references is None or f in references):
rfields.append(f)
elif data is None or \
f in data or \
f in FIELDS_TO_ATTRIBUTES:
dfields.append(f)
self.rfields = rfields
self.dfields = dfields
return (rfields, dfields)
# -------------------------------------------------------------------------
# Utility functions
# -------------------------------------------------------------------------
def configure(self, **settings):
"""
Update configuration settings for this resource
@param settings: configuration settings for this resource
as keyword arguments
"""
current.s3db.configure(self.tablename, **settings)
# -------------------------------------------------------------------------
def get_config(self, key, default=None):
"""
Get a configuration setting for the current resource
@param key: the setting key
@param default: the default value to return if the setting
is not configured for this resource
"""
return current.s3db.get_config(self.tablename, key, default=default)
# -------------------------------------------------------------------------
def limitby(self, start=0, limit=0):
"""
Convert start+limit parameters into a limitby tuple
- limit without start => start = 0
- start without limit => limit = ROWSPERPAGE
- limit 0 (or less) => limit = 1
- start less than 0 => start = 0
@param start: index of the first record to select
@param limit: maximum number of records to select
"""
if limit is None:
return None
if start is None:
start = 0
if limit == 0:
limit = current.response.s3.ROWSPERPAGE
if limit <= 0:
limit = 1
if start < 0:
start = 0
return (start, start + limit)
# -------------------------------------------------------------------------
def _join(self, implicit=False, reverse=False):
"""
Get a join for this component
@param implicit: return a subquery with an implicit join rather
than an explicit join
@param reverse: get the reverse join (joining master to component)
@return: a Query if implicit=True, otherwise a list of joins
"""
if self.parent is None:
# This isn't a component
return None
else:
ltable = self.parent.table
rtable = self.table
pkey = self.pkey
fkey = self.fkey
DELETED = current.xml.DELETED
if self.linked:
return self.linked._join(implicit=implicit, reverse=reverse)
elif self.linktable:
linktable = self.linktable
lkey = self.lkey
rkey = self.rkey
lquery = (ltable[pkey] == linktable[lkey])
if DELETED in linktable:
lquery &= (linktable[DELETED] != True)
if self.filter is not None and not reverse:
rquery = (linktable[rkey] == rtable[fkey]) & self.filter
else:
rquery = (linktable[rkey] == rtable[fkey])
if reverse:
join = [linktable.on(rquery), ltable.on(lquery)]
else:
join = [linktable.on(lquery), rtable.on(rquery)]
else:
lquery = (ltable[pkey] == rtable[fkey])
if DELETED in rtable and not reverse:
lquery &= (rtable[DELETED] != True)
if self.filter is not None:
lquery &= self.filter
if reverse:
join = [ltable.on(lquery)]
else:
join = [rtable.on(lquery)]
if implicit:
query = None
for expression in join:
if query is None:
query = expression.second
else:
query &= expression.second
return query
else:
return join
# -------------------------------------------------------------------------
def get_join(self):
""" Get join for this component """
return self._join(implicit=True)
# -------------------------------------------------------------------------
def get_left_join(self):
""" Get a left join for this component """
return self._join()
# -------------------------------------------------------------------------
def link_id(self, master_id, component_id):
"""
Helper method to find the link table entry ID for
a pair of linked records.
@param master_id: the ID of the master record
@param component_id: the ID of the component record
"""
if self.parent is None or self.linked is None:
return None
join = self.get_join()
ltable = self.table
mtable = self.parent.table
ctable = self.linked.table
query = join & \
(mtable._id == master_id) & \
(ctable._id == component_id)
row = current.db(query).select(ltable._id, limitby=(0, 1)).first()
if row:
return row[ltable._id.name]
else:
return None
# -------------------------------------------------------------------------
def component_id(self, master_id, link_id):
"""
Helper method to find the component record ID for
a particular link of a particular master record
@param link: the link (S3Resource)
@param master_id: the ID of the master record
@param link_id: the ID of the link table entry
"""
if self.parent is None or self.linked is None:
return None
join = self.get_join()
ltable = self.table
mtable = self.parent.table
ctable = self.linked.table
query = join & (ltable._id == link_id)
if master_id is not None:
# master ID is redundant, but can be used to check negatives
query &= (mtable._id == master_id)
row = current.db(query).select(ctable._id, limitby=(0, 1)).first()
if row:
return row[ctable._id.name]
else:
return None
# -------------------------------------------------------------------------
def update_link(self, master, record):
"""
Create a new link in a link table if it doesn't yet exist.
This function is meant to also update links in "embed"
actuation mode once this gets implemented, therefore the
method name "update_link".
@param master: the master record
@param record: the new component record to be linked
"""
if self.parent is None or self.linked is None:
return None
# Find the keys
resource = self.linked
pkey = resource.pkey
lkey = resource.lkey
rkey = resource.rkey
fkey = resource.fkey
if pkey not in master:
return None
_lkey = master[pkey]
if fkey not in record:
return None
_rkey = record[fkey]
if not _lkey or not _rkey:
return None
ltable = self.table
ltn = ltable._tablename
# Create the link if it does not already exist
query = ((ltable[lkey] == _lkey) &
(ltable[rkey] == _rkey))
row = current.db(query).select(ltable._id, limitby=(0, 1)).first()
if not row:
s3db = current.s3db
onaccept = s3db.get_config(ltn, "create_onaccept")
if onaccept is None:
onaccept = s3db.get_config(ltn, "onaccept")
data = {lkey:_lkey, rkey:_rkey}
link_id = ltable.insert(**data)
data[ltable._id.name] = link_id
s3db.update_super(ltable, data)
if link_id and onaccept:
callback(onaccept, Storage(vars=Storage(data)))
else:
link_id = row[ltable._id.name]
return link_id
# -------------------------------------------------------------------------
def datatable_filter(self, fields, get_vars):
"""
Parse datatable search/sort vars into a tuple of
query, orderby and left joins
@param fields: list of field selectors representing
the order of fields in the datatable (list_fields)
@param get_vars: the datatable GET vars
@return: tuple of (query, orderby, left joins)
"""
db = current.db
left_joins = S3Joins(self.tablename)
sSearch = "sSearch"
iColumns = "iColumns"
iSortingCols = "iSortingCols"
parent = self.parent
fkey = self.fkey
# Skip joins for linked tables
if self.linked is not None:
skip = self.linked.tablename
else:
skip = None
# Resolve the list fields
rfields = self.resolve_selectors(fields)[0]
# FILTER --------------------------------------------------------------
searchq = None
if sSearch in get_vars and iColumns in get_vars:
# Build filter
text = get_vars[sSearch]
words = [w for w in text.lower().split()]
if words:
try:
numcols = int(get_vars[iColumns])
except ValueError:
numcols = 0
flist = []
for i in xrange(numcols):
try:
rfield = rfields[i]
field = rfield.field
except (KeyError, IndexError):
continue
if field is None:
continue
ftype = str(field.type)
# Add left joins
left_joins.extend(rfield.left)
if ftype[:9] == "reference" and \
hasattr(field, "sortby") and field.sortby:
# For foreign keys, we search through their sortby
# Get the lookup table
tn = ftype[10:]
if parent is not None and \
parent.tablename == tn and field.name != fkey:
alias = "%s_%s_%s" % (parent.prefix,
"linked",
parent.name)
ktable = db[tn].with_alias(alias)
ktable._id = ktable[ktable._id.name]
tn = alias
elif tn == field.tablename:
prefix, name = field.tablename.split("_", 1)
alias = "%s_%s_%s" % (prefix, field.name, name)
ktable = db[tn].with_alias(alias)
ktable._id = ktable[ktable._id.name]
tn = alias
else:
ktable = db[tn]
# Add left join for lookup table
if tn != skip:
left_joins.add(ktable.on(field == ktable._id))
if isinstance(field.sortby, (list, tuple)):
flist.extend([ktable[f] for f in field.sortby
if f in ktable.fields])
else:
if field.sortby in ktable.fields:
flist.append(ktable[field.sortby])
else:
# Otherwise, we search through the field itself
flist.append(field)
# Build search query
# @todo: migrate this to S3ResourceQuery?
opts = Storage()
queries = []
for w in words:
wqueries = []
for field in flist:
ftype = str(field.type)
options = None
fname = str(field)
if fname in opts:
options = opts[fname]
elif ftype[:7] in ("integer",
"list:in",
"list:st",
"referen",
"list:re",
"string"):
requires = field.requires
if not isinstance(requires, (list, tuple)):
requires = [requires]
if requires:
r = requires[0]
if isinstance(r, IS_EMPTY_OR):
r = r.other
if hasattr(r, "options"):
try:
options = r.options()
except:
options = []
if options is None and ftype in ("string", "text"):
wqueries.append(field.lower().like("%%%s%%" % w))
elif options is not None:
opts[fname] = options
vlist = [v for v, t in options
if s3_unicode(t).lower().find(s3_unicode(w)) != -1]
if vlist:
wqueries.append(field.belongs(vlist))
if len(wqueries):
queries.append(reduce(lambda x, y: x | y \
if x is not None else y,
wqueries))
if len(queries):
searchq = reduce(lambda x, y: x & y \
if x is not None else y, queries)
# ORDERBY -------------------------------------------------------------
orderby = []
if iSortingCols in get_vars:
# Sorting direction
def direction(i):
sort_dir = get_vars["sSortDir_%s" % str(i)]
return sort_dir and " %s" % sort_dir or ""
# Get the fields to order by
try:
numcols = int(get_vars[iSortingCols])
except:
numcols = 0
columns = []
pkey = str(self._id)
for i in xrange(numcols):
try:
iSortCol = int(get_vars["iSortCol_%s" % i])
except (AttributeError, KeyError):
# iSortCol_x not present in get_vars => ignore
columns.append(Storage(field=None))
continue
# Map sortable-column index to the real list_fields
# index: for every non-id non-sortable column to the
# left of sortable column subtract 1
for j in xrange(iSortCol):
if get_vars.get("bSortable_%s" % j, "true") == "false":
try:
if rfields[j].colname != pkey:
iSortCol -= 1
except KeyError:
break
try:
rfield = rfields[iSortCol]
except KeyError:
# iSortCol specifies a non-existent column, i.e.
# iSortCol_x>=numcols => ignore
columns.append(Storage(field=None))
else:
columns.append(rfield)
# Process the orderby-fields
for i in xrange(len(columns)):
rfield = columns[i]
field = rfield.field
if field is None:
continue
ftype = str(field.type)
represent = field.represent
if not hasattr(represent, "skip_dt_orderby") and \
hasattr(represent, "dt_orderby"):
# Custom orderby logic in field.represent
field.represent.dt_orderby(field,
direction(i),
orderby,
left_joins)
elif ftype[:9] == "reference" and \
hasattr(field, "sortby") and field.sortby:
# Foreign keys with sortby will be sorted by sortby
# Get the lookup table
tn = ftype[10:]
if parent is not None and \
parent.tablename == tn and field.name != fkey:
alias = "%s_%s_%s" % (parent.prefix, "linked", parent.name)
ktable = db[tn].with_alias(alias)
ktable._id = ktable[ktable._id.name]
tn = alias
elif tn == field.tablename:
prefix, name = field.tablename.split("_", 1)
alias = "%s_%s_%s" % (prefix, field.name, name)
ktable = db[tn].with_alias(alias)
ktable._id = ktable[ktable._id.name]
tn = alias
else:
ktable = db[tn]
# Add left joins for lookup table
if tn != skip:
left_joins.extend(rfield.left)
left_joins.add(ktable.on(field == ktable._id))
# Construct orderby from sortby
if not isinstance(field.sortby, (list, tuple)):
orderby.append("%s.%s%s" % (tn, field.sortby, direction(i)))
else:
orderby.append(", ".join(["%s.%s%s" %
(tn, fn, direction(i))
for fn in field.sortby]))
else:
# Otherwise, we sort by the field itself
orderby.append("%s%s" % (field, direction(i)))
if orderby:
orderby = ", ".join(orderby)
else:
orderby = None
left_joins = left_joins.as_list(tablenames=left_joins.joins.keys())
return (searchq, orderby, left_joins)
# -------------------------------------------------------------------------
def axisfilter(self, axes):
"""
Get all values for the given S3ResourceFields (axes) which
match the resource query, used in pivot tables to filter out
additional values where dimensions can have multiple values
per record
@param axes: the axis fields as list/tuple of S3ResourceFields
@return: a dict with values per axis, only containes those
axes which are affected by the resource filter
"""
axisfilter = {}
qdict = self.get_query().as_dict(flat=True)
for rfield in axes:
field = rfield.field
if field is None:
# virtual field or unresolvable selector
continue
left_joins = S3Joins(self.tablename)
left_joins.extend(rfield.left)
tablenames = left_joins.joins.keys()
tablenames.append(self.tablename)
af = S3AxisFilter(qdict, tablenames)
if af.op is not None:
query = af.query()
left = left_joins.as_list()
# @todo: this does not work with virtual fields: need
# to retrieve all extra_fields for the dimension table
# and can't groupby (=must deduplicate afterwards)
rows = current.db(query).select(field,
left=left,
groupby=field)
colname = rfield.colname
if rfield.ftype[:5] == "list:":
values = []
vappend = values.append
for row in rows:
v = row[colname]
if v:
vappend(v)
values = set(chain.from_iterable(values))
include, exclude = af.values(rfield)
fdict = {}
if include:
for v in values:
vstr = s3_unicode(v)
if vstr in include and vstr not in exclude:
fdict[v] = None
else:
fdict = dict((v, None) for v in values)
axisfilter[colname] = fdict
else:
axisfilter[colname] = dict((row[colname], None)
for row in rows)
return axisfilter
# -------------------------------------------------------------------------
def prefix_selector(self, selector):
"""
Helper method to ensure consistent prefixing of field selectors
@param selector: the selector
"""
head = selector.split("$", 1)[0]
if "." in head:
prefix = head.split(".", 1)[0]
if prefix == self.alias:
return selector.replace("%s." % prefix, "~.")
else:
return selector
else:
return "~.%s" % selector
# -------------------------------------------------------------------------
def list_fields(self, key="list_fields", id_column=0):
"""
Get the list_fields for this resource
@param key: alternative key for the table configuration
@param id_column: True/False, whether to include the record ID
or not, or 0 to enforce the record ID to be
the first column
"""
list_fields = self.get_config(key, None)
if not list_fields and key != "list_fields":
list_fields = self.get_config("list_fields", None)
if not list_fields:
list_fields = [f.name for f in self.readable_fields()]
pkey = _pkey = self._id.name
fields = []
append = fields.append
selectors = set()
seen = selectors.add
for f in list_fields:
selector = f if type(f) is not tuple else f[1]
if selector == _pkey and not id_column:
pkey = f
elif selector not in selectors:
seen(selector)
append(f)
if id_column is 0:
fields.insert(0, pkey)
return fields
# -------------------------------------------------------------------------
@property
def _table(self):
"""
Get the original Table object (without SQL Alias), this
is required for SQL update (DAL doesn't detect the alias
and uses the wrong tablename).
"""
if self.tablename != self._alias:
return current.s3db[self.tablename]
else:
return self.table
# =============================================================================
class S3AxisFilter(object):
"""
Experimental: helper class to extract filter values for pivot
table axis fields
"""
# -------------------------------------------------------------------------
def __init__(self, qdict, tablenames):
"""
Constructor, recursively introspect the query dict and extract
all relevant subqueries.
@param qdict: the query dict (from Query.as_dict(flat=True))
@param tablenames: the names of the relevant tables
"""
self.l = None
self.r = None
self.op = None
self.tablename = None
self.fieldname = None
if not qdict:
return
l = qdict["first"]
if "second" in qdict:
r = qdict["second"]
else:
r = None
op = qdict["op"]
if "tablename" in l:
if l["tablename"] in tablenames:
self.tablename = l["tablename"]
self.fieldname = l["fieldname"]
if isinstance(r, dict):
self.op = None
else:
self.op = op
self.r = r
elif op == "AND":
self.l = S3AxisFilter(l, tablenames)
self.r = S3AxisFilter(r, tablenames)
if self.l.op or self.r.op:
self.op = op
elif op == "OR":
self.l = S3AxisFilter(l, tablenames)
self.r = S3AxisFilter(r, tablenames)
if self.l.op and self.r.op:
self.op = op
elif op == "NOT":
self.l = S3AxisFilter(l, tablenames)
self.op = op
else:
self.l = S3AxisFilter(l, tablenames)
if self.l.op:
self.op = op
# -------------------------------------------------------------------------
def query(self):
""" Reconstruct the query from this filter """
op = self.op
if op is None:
return None
if self.tablename and self.fieldname:
l = current.s3db[self.tablename][self.fieldname]
elif self.l:
l = self.l.query()
else:
l = None
r = self.r
if op in ("AND", "OR", "NOT"):
r = r.query() if r else True
if op == "AND":
if l is not None and r is not None:
return l & r
elif r is not None:
return r
else:
return l
elif op == "OR":
if l is not None and r is not None:
return l | r
else:
return None
elif op == "NOT":
if l is not None:
return ~l
else:
return None
elif l is None:
return None
if isinstance(r, S3AxisFilter):
r = r.query()
if r is None:
return None
if op == "LOWER":
return l.lower()
elif op == "UPPER":
return l.upper()
elif op == "EQ":
return l == r
elif op == "NE":
return l != r
elif op == "LT":
return l < r
elif op == "LE":
return l <= r
elif op == "GE":
return l >= r
elif op == "GT":
return l > r
elif op == "BELONGS":
return l.belongs(r)
elif op == "CONTAINS":
return l.contains(r)
else:
return None
# -------------------------------------------------------------------------
def values(self, rfield):
"""
Helper method to filter list:type axis values
@param rfield: the axis field
@return: pair of value lists [include], [exclude]
"""
op = self.op
tablename = self.tablename
fieldname = self.fieldname
if tablename == rfield.tname and \
fieldname == rfield.fname:
value = self.r
if isinstance(value, (list, tuple)):
value = [s3_unicode(v) for v in value]
else:
value = [s3_unicode(value)]
if op == "CONTAINS":
return value, []
elif op == "EQ":
return value, []
elif op == "NE":
return [], value
elif op == "AND":
li, le = self.l.values(rfield)
ri, re = self.r.values(rfield)
return [v for v in li + ri if v not in le + re], []
elif op == "OR":
li, le = self.l.values(rfield)
ri, re = self.r.values(rfield)
return [v for v in li + ri], []
if op == "NOT":
li, le = self.l.values(rfield)
return [], li
return [], []
# =============================================================================
class S3ResourceFilter(object):
""" Class representing a resource filter """
def __init__(self,
resource,
id=None,
uid=None,
filter=None,
vars=None,
filter_component=None):
"""
Constructor
@param resource: the S3Resource
@param id: the record ID (or list of record IDs)
@param uid: the record UID (or list of record UIDs)
@param filter: a filter query (Query or S3ResourceQuery)
@param vars: the dict of GET vars (URL filters)
@param filter_component: the alias of the component the URL
filters apply for (filters for this
component must be handled separately)
"""
self.resource = resource
self.queries = []
self.filters = []
self.cqueries = {}
self.cfilters = {}
self.query = None
self.rfltr = None
self.vfltr = None
self.transformed = None
self.multiple = True
self.distinct = False
# Joins
self.ijoins = {}
self.ljoins = {}
table = resource.table
# Accessible/available query
if resource.accessible_query is not None:
method = []
if resource._approved:
method.append("read")
if resource._unapproved:
method.append("review")
mquery = resource.accessible_query(method, table)
else:
mquery = (table._id > 0)
# Deletion status
DELETED = current.xml.DELETED
if DELETED in table.fields and not resource.include_deleted:
remaining = (table[DELETED] != True)
mquery = remaining & mquery
# ID query
if id is not None:
if not isinstance(id, (list, tuple)):
self.multiple = False
mquery = mquery & (table._id == id)
else:
mquery = mquery & (table._id.belongs(id))
# UID query
UID = current.xml.UID
if uid is not None and UID in table:
if not isinstance(uid, (list, tuple)):
self.multiple = False
mquery = mquery & (table[UID] == uid)
else:
mquery = mquery & (table[UID].belongs(uid))
parent = resource.parent
if not parent:
# Standard master query
self.mquery = mquery
# URL queries
if vars:
resource.vars = Storage(vars)
# BBox
bbox, joins = self.parse_bbox_query(resource, vars)
if bbox is not None:
self.queries.append(bbox)
if joins:
self.ljoins.update(joins)
# Filters
add_filter = self.add_filter
# Current concept:
# Interpret all URL filters in the context of master
queries = S3URLQuery.parse(resource, vars)
# @todo: Alternative concept (inconsistent?):
# Interpret all URL filters in the context of filter_component:
#if filter_component and \
#filter_component in resource.components:
#context = resource.components[filter_component]
#else:
#context = resource
#queries = S3URLQuery.parse(context, vars)
for alias in queries:
if filter_component == alias:
for q in queries[alias]:
add_filter(q, component=alias, master=False)
else:
for q in queries[alias]:
add_filter(q)
self.cfilters = queries
else:
# Parent filter
pf = parent.rfilter
if not pf:
pf = parent.build_query()
# Extended master query
self.mquery = mquery & pf.get_query()
# Join the master
self.ijoins[parent._alias] = resource._join(reverse=True)
# Component/link-table specific filters
add_filter = self.add_filter
aliases = [resource.alias]
if resource.link is not None:
aliases.append(resource.link.alias)
elif resource.linked is not None:
aliases.append(resource.linked.alias)
for alias in aliases:
for filter_set in (pf.cqueries, pf.cfilters):
if alias in filter_set:
[add_filter(q) for q in filter_set[alias]]
# Additional filters
if filter is not None:
self.add_filter(filter)
# -------------------------------------------------------------------------
def add_filter(self, query, component=None, master=True):
"""
Extend this filter
@param query: a Query or S3ResourceQuery object
@param component: alias of the component the filter shall be
added to (None for master)
@param master: False to filter only component
"""
alias = None
if not master:
if not component:
return
if component != self.resource.alias:
alias = component
if isinstance(query, S3ResourceQuery):
self.transformed = None
filters = self.filters
cfilters = self.cfilters
self.distinct |= query._joins(self.resource)[1]
else:
# DAL Query
filters = self.queries
cfilters = self.cqueries
self.query = None
if alias:
if alias in self.cfilters:
cfilters[alias].append(query)
else:
cfilters[alias] = [query]
else:
filters.append(query)
return
# -------------------------------------------------------------------------
def get_query(self):
""" Get the effective DAL query """
if self.query is not None:
return self.query
resource = self.resource
query = reduce(lambda x, y: x & y, self.queries, self.mquery)
if self.filters:
if self.transformed is None:
# Combine all filters
filters = reduce(lambda x, y: x & y, self.filters)
# Transform with external search engine
transformed = filters.transform(resource)
self.transformed = transformed
# Split DAL and virtual filters
self.rfltr, self.vfltr = transformed.split(resource)
# Add to query
rfltr = self.rfltr
if rfltr is not None:
if isinstance(rfltr, S3ResourceQuery):
query &= rfltr.query(resource)
else:
# Combination of virtual field filter and web2py Query
query &= rfltr
self.query = query
return query
# -------------------------------------------------------------------------
def get_filter(self):
""" Get the effective virtual filter """
if self.query is None:
self.get_query()
return self.vfltr
# -------------------------------------------------------------------------
def get_joins(self, left=False, as_list=True):
"""
Get the joins required for this filter
@param left: get the left joins
@param as_list: return a flat list rather than a nested dict
"""
if self.query is None:
self.get_query()
joins = dict(self.ljoins if left else self.ijoins)
resource = self.resource
for q in self.filters:
subjoins = q._joins(resource, left=left)[0]
joins.update(subjoins)
# Cross-component left joins
parent = resource.parent
if parent:
pf = parent.rfilter
if pf is None:
pf = parent.build_query()
parent_left = pf.get_joins(left=True, as_list=False)
if parent_left:
tablename = resource._alias
if left:
for tn in parent_left:
if tn not in joins and tn != tablename:
joins[tn] = parent_left[tn]
joins[parent._alias] = resource._join(reverse=True)
else:
joins.pop(parent._alias, None)
if as_list:
return [j for tablename in joins for j in joins[tablename]]
else:
return joins
# -------------------------------------------------------------------------
def get_fields(self):
""" Get all field selectors in this filter """
if self.query is None:
self.get_query()
if self.vfltr:
return self.vfltr.fields()
else:
return []
# -------------------------------------------------------------------------
@staticmethod
def parse_bbox_query(resource, get_vars):
"""
Generate a Query from a URL boundary box query; supports multiple
bboxes, but optimised for the usual case of just 1
@param resource: the resource
@param get_vars: the URL GET vars
"""
tablenames = ("gis_location",
"gis_feature_query",
"gis_layer_shapefile")
POLYGON = "POLYGON((%s %s, %s %s, %s %s, %s %s, %s %s))"
query = None
joins = {}
if get_vars:
table = resource.table
tablename = resource.tablename
fields = table.fields
introspect = tablename not in tablenames
for k, v in get_vars.items():
if k[:4] == "bbox":
if type(v) is list:
v = v[-1]
try:
minLon, minLat, maxLon, maxLat = v.split(",")
except ValueError:
# Badly-formed bbox - ignore
continue
# Identify the location reference
field = None
rfield = None
alias = False
if k.find(".") != -1:
# Field specified in query
fname = k.split(".")[1]
if fname not in fields:
# Field not found - ignore
continue
field = table[fname]
if query is not None or "bbox" in get_vars:
# Need alias
alias = True
elif introspect:
# Location context?
context = resource.get_config("context")
if context and "location" in context:
try:
rfield = resource.resolve_selector("(location)$lat")
except (SyntaxError, AttributeError):
rfield = None
else:
if not rfield.field or rfield.tname != "gis_location":
# Invalid location context
rfield = None
# Fall back to location_id (or site_id as last resort)
if rfield is None:
fname = None
for f in fields:
ftype = str(table[f].type)
if ftype[:22] == "reference gis_location":
fname = f
break
elif not fname and \
ftype[:18] == "reference org_site":
fname = f
field = table[fname] if fname else None
if not rfield and not field:
# No location reference could be identified => skip
continue
# Construct the join to gis_location
gtable = current.s3db.gis_location
if rfield:
joins.update(rfield.left)
elif field:
fname = field.name
gtable = current.s3db.gis_location
if alias:
gtable = gtable.with_alias("gis_%s_location" % fname)
tname = str(gtable)
ftype = str(field.type)
if ftype == "reference gis_location":
joins[tname] = [gtable.on(gtable.id == field)]
elif ftype == "reference org_site":
stable = current.s3db.org_site
if alias:
stable = stable.with_alias("org_%s_site" % fname)
joins[tname] = [stable.on(stable.site_id == field),
gtable.on(gtable.id == stable.location_id)]
elif introspect:
# => not a location or site reference
continue
elif tablename in ("gis_location", "gis_feature_query"):
gtable = table
elif tablename == "gis_layer_shapefile":
# @todo: this needs a join too, no?
gtable = resource.components.items()[0][1].table
# Construct the bbox filter
bbox_filter = None
if current.deployment_settings.get_gis_spatialdb():
# Use the Spatial Database
minLon = float(minLon)
maxLon = float(maxLon)
minLat = float(minLat)
maxLat = float(maxLat)
bbox = POLYGON % (minLon, minLat,
minLon, maxLat,
maxLon, maxLat,
maxLon, minLat,
minLon, minLat)
try:
# Spatial DAL & Database
bbox_filter = gtable.the_geom \
.st_intersects(bbox)
except:
# Old DAL or non-spatial database
pass
if bbox_filter is None:
# Standard Query
bbox_filter = (gtable.lon > float(minLon)) & \
(gtable.lon < float(maxLon)) & \
(gtable.lat > float(minLat)) & \
(gtable.lat < float(maxLat))
# Add bbox filter to query
if query is None:
query = bbox_filter
else:
# Merge with the previous BBOX
query = query & bbox_filter
return query, joins
# -------------------------------------------------------------------------
def __call__(self, rows, start=None, limit=None):
"""
Filter a set of rows by the effective virtual filter
@param rows: a Rows object
@param start: index of the first matching record to select
@param limit: maximum number of records to select
"""
vfltr = self.get_filter()
if rows is None or vfltr is None:
return rows
resource = self.resource
if start is None:
start = 0
first = start
if limit is not None:
last = start + limit
if last < first:
first, last = last, first
if first < 0:
first = 0
if last < 0:
last = 0
else:
last = None
i = 0
result = []
append = result.append
for row in rows:
if last is not None and i >= last:
break
success = vfltr(resource, row, virtual=True)
if success or success is None:
if i >= first:
append(row)
i += 1
return Rows(rows.db, result,
colnames=rows.colnames, compact=False)
# -------------------------------------------------------------------------
def count(self, left=None, distinct=False):
"""
Get the total number of matching records
@param left: left outer joins
@param distinct: count only distinct rows
"""
distinct |= self.distinct
resource = self.resource
if resource is None:
return 0
table = resource.table
vfltr = self.get_filter()
if vfltr is None and not distinct:
tablename = table._tablename
ijoins = S3Joins(tablename, self.get_joins(left=False))
ljoins = S3Joins(tablename, self.get_joins(left=True))
ljoins.add(left)
join = ijoins.as_list(prefer=ljoins)
left = ljoins.as_list()
cnt = table._id.count()
row = current.db(self.query).select(cnt,
join=join,
left=left).first()
if row:
return row[cnt]
else:
return 0
else:
data = resource.select([table._id.name],
# We don't really want to retrieve
# any rows but just count, hence:
limit=1,
count=True)
return data["numrows"]
# -------------------------------------------------------------------------
def __repr__(self):
""" String representation of the instance """
resource = self.resource
left_joins = self.get_joins(left=True)
if left_joins:
left = S3Joins(resource.tablename, left_joins)
joins = ", ".join([str(j) for j in left.as_list()])
else:
left = None
joins = None
vfltr = self.get_filter()
if vfltr:
vfltr = vfltr.represent(resource)
else:
vfltr = None
represent = "<S3ResourceFilter %s, " \
"query=%s, " \
"left=[%s], " \
"distinct=%s, " \
"filter=%s>" % (
resource.tablename,
self.get_query(),
joins,
self.distinct,
vfltr
)
return represent
# -------------------------------------------------------------------------
def serialize_url(self):
"""
Serialize this filter as URL query
@return: a Storage of URL GET variables
"""
resource = self.resource
url_vars = Storage()
for f in self.filters:
sub = f.serialize_url(resource=resource)
url_vars.update(sub)
return url_vars
# =============================================================================
class S3ResourceData(object):
""" Class representing data in a resource """
def __init__(self,
resource,
fields,
start=0,
limit=None,
left=None,
orderby=None,
groupby=None,
distinct=False,
virtual=True,
count=False,
getids=False,
as_rows=False,
represent=False,
show_links=True,
raw_data=False):
"""
Constructor, extracts (and represents) data from a resource
@param resource: the resource
@param fields: the fields to extract (selector strings)
@param start: index of the first record
@param limit: maximum number of records
@param left: additional left joins required for custom filters
@param orderby: orderby-expression for DAL
@param groupby: fields to group by (overrides fields!)
@param distinct: select distinct rows
@param virtual: include mandatory virtual fields
@param count: include the total number of matching records
@param getids: include the IDs of all matching records
@param as_rows: return the rows (don't extract/represent)
@param represent: render field value representations
@param raw_data: include raw data in the result
@note: as_rows / groupby prevent automatic splitting of
large multi-table joins, so use with care!
@note: with groupby, only the groupby fields will be returned
(i.e. fields will be ignored), because aggregates are
not supported (yet)
"""
# The resource
self.resource = resource
self.table = table = resource.table
# Dict to collect accessible queries for differential
# field authorization (each joined table is authorized
# separately)
self.aqueries = aqueries = {}
# Joins (inner/left)
tablename = table._tablename
self.ijoins = ijoins = S3Joins(tablename)
self.ljoins = ljoins = S3Joins(tablename)
# The query
master_query = query = resource.get_query()
# Joins from filters
# @note: in components, rfilter is None until after get_query!
rfilter = resource.rfilter
filter_tables = set(ijoins.add(rfilter.get_joins(left=False)))
filter_tables.update(ljoins.add(rfilter.get_joins(left=True)))
# Left joins from caller
master_tables = set(ljoins.add(left))
filter_tables.update(master_tables)
resolve = resource.resolve_selectors
# Virtual fields and extra fields required by filter
virtual_fields = rfilter.get_fields()
vfields, vijoins, vljoins, d = resolve(virtual_fields, show=False)
extra_tables = set(ijoins.extend(vijoins))
extra_tables.update(ljoins.extend(vljoins))
distinct |= d
# Display fields (fields to include in the result)
if fields is None:
fields = [f.name for f in resource.readable_fields()]
dfields, dijoins, dljoins, d = resolve(fields, extra_fields=False)
ijoins.extend(dijoins)
ljoins.extend(dljoins)
distinct |= d
# Initialize field data and effort estimates
if not groupby or as_rows:
self.init_field_data(dfields)
else:
self.field_data = self.effort = None
# Resolve ORDERBY
orderby, orderby_aggr, orderby_fields, tables = self.resolve_orderby(orderby)
if tables:
filter_tables.update(tables)
# Virtual fields filter and limitby
vfltr = resource.get_filter()
if vfltr is None:
limitby = resource.limitby(start=start, limit=limit)
else:
# Skip start/limit in master query if we filter by virtual
# fields: we need to extract all matching rows first, then
# filter by virtual fields, then apply page limits
limitby = None
# Filter Query:
# If we need to determine the number and/or ids of all matching
# records, but not to extract all records, then we run a
# separate query here to extract just this information:
# Joins for filter query
filter_ijoins = ijoins.as_list(tablenames=filter_tables,
aqueries=aqueries,
prefer=ljoins)
filter_ljoins = ljoins.as_list(tablenames=filter_tables,
aqueries=aqueries)
ids = page = totalrows = None
if getids or count or ljoins or ijoins:
if not groupby and \
not vfltr and \
(count or limitby or extra_tables != filter_tables):
# Execute the filter query
totalrows, ids = self.filter_query(query,
join=filter_ijoins,
left=filter_ljoins,
getids=getids or ljoins or ijoins,
orderby=orderby_aggr)
if ids is not None:
if limitby:
page = ids[limitby[0]:limitby[1]]
else:
page = ids
# Once we have the ids, we don't need to apply the
# filter query (and the joins it requires) again,
# but can use a simplified master query:
master_query = table._id.belongs(page)
# Order and limits are also determined by the page
# (which is an ordered list of record IDs), so we
# do not need to retain them (and join orderby
# fields in subsequent queries) either.
orderby = None
limitby = None
# If we don't use a simplified master_query, we must include
# all necessary joins for filter and orderby (=filter_tables) in
# the master query
if ids is None and (filter_ijoins or filter_ljoins):
master_tables = filter_tables
# Determine fields in master query
if not groupby:
master_tables.update(extra_tables)
tables, qfields, mfields, groupby = self.master_fields(dfields,
vfields,
master_tables,
as_rows=as_rows,
groupby=groupby)
# Additional tables to join?
if tables:
master_tables.update(tables)
# ORDERBY settings
pkey = str(table._id)
if groupby:
distinct = False
orderby = orderby_aggr
has_id = pkey in qfields
else:
if distinct and orderby:
# With DISTINCT, ORDERBY-fields must appear in SELECT
# (required by postgresql?)
for orderby_field in orderby_fields:
fn = str(orderby_field)
if fn not in qfields:
qfields[fn] = orderby_field
# Make sure we have the primary key in SELECT
if pkey not in qfields:
qfields[pkey] = resource._id
has_id = True
# Joins for master query
master_ijoins = ijoins.as_list(tablenames=master_tables,
aqueries=aqueries,
prefer=ljoins)
master_ljoins = ljoins.as_list(tablenames=master_tables,
aqueries=aqueries)
# Suspend (mandatory) virtual fields if so requested
if not virtual:
vf = table.virtualfields
osetattr(table, "virtualfields", [])
# Execute master query
db = current.db
rows = db(master_query).select(join=master_ijoins,
left=master_ljoins,
distinct=distinct,
groupby=groupby,
orderby=orderby,
limitby=limitby,
cacheable=not as_rows,
*qfields.values())
# Restore virtual fields
if not virtual:
osetattr(table, "virtualfields", vf)
# Apply virtual fields filter
if rows and vfltr is not None:
if count:
rows = rfilter(rows)
totalrows = len(rows)
if limit and start is None:
start = 0
if start is not None and limit is not None:
rows = Rows(db,
records=rows.records[start:start+limit],
colnames=rows.colnames,
compact=False)
elif start is not None:
rows = Rows(db,
records=rows.records[start:],
colnames=rows.colnames,
compact=False)
else:
rows = rfilter(rows, start=start, limit=limit)
if (getids or ljoins or ijoins) and has_id:
ids = self.getids(rows, pkey)
totalrows = len(ids)
# Build the result
self.rfields = dfields
self.numrows = 0 if totalrows is None else totalrows
self.ids = ids
if groupby or as_rows:
# Just store the rows, no further queries or extraction
self.rows = rows
elif not rows:
# No rows found => empty list
self.rows = []
else:
# Extract the data from the master rows
records = self.extract(rows,
pkey,
list(mfields),
join = hasattr(rows[0], tablename),
represent = represent)
# Extract the page record IDs if we don't have them yet
if page is None:
if ids is None:
self.ids = ids = self.getids(rows, pkey)
page = ids
# Execute any joined queries
joined_fields = self.joined_fields(dfields, qfields)
joined_query = table._id.belongs(page)
for jtablename, jfields in joined_fields.items():
records = self.joined_query(jtablename,
joined_query,
jfields,
records,
represent=represent)
# Re-combine and represent the records
results = {}
field_data = self.field_data
NONE = current.messages["NONE"]
render = self.render
for dfield in dfields:
if represent:
# results = {RecordID: {ColumnName: Representation}}
results = render(dfield,
results,
none=NONE,
raw_data=raw_data,
show_links=show_links)
else:
# results = {RecordID: {ColumnName: Value}}
colname = dfield.colname
fdata = field_data[colname]
frecords = fdata[1]
list_type = fdata[3]
for record_id in records:
if record_id not in results:
result = results[record_id] = Storage()
else:
result = results[record_id]
data = frecords[record_id].keys()
if len(data) == 1 and not list_type:
data = data[0]
result[colname] = data
self.rows = [results[record_id] for record_id in page]
# -------------------------------------------------------------------------
def init_field_data(self, rfields):
"""
Initialize field data and effort estimates for representation
Field data: allow representation per unique value (rather than
record by record), together with bulk-represent this
can reduce the total lookup effort per field to a
single query
Effort estimates: if no bulk-represent is available for a
list:reference, then a lookup per unique value
is only faster if the number of unique values
is significantly lower than the number of
extracted rows (and the number of values per
row), otherwise a per-row lookup is more
efficient.
E.g. 5 rows with 2 values each,
10 unique values in total
=> row-by-row lookup more efficient
(5 queries vs 10 queries)
but: 5 rows with 2 values each,
2 unique values in total
=> value-by-value lookup is faster
(5 queries vs 2 queries)
However: 15 rows with 15 values each,
20 unique values in total
=> value-by-value lookup faster
(15 queries á 15 values vs.
20 queries á 1 value)!
The required effort is estimated
during the data extraction, and then used to
determine the lookup strategy for the
representation.
@param rfields: the fields to extract ([S3ResourceField])
"""
table = self.resource.table
tablename = table._tablename
pkey = str(table._id)
field_data = {pkey: ({}, {}, False, False, False, False)}
effort = {pkey: 0}
for dfield in rfields:
colname = dfield.colname
effort[colname] = 0
ftype = dfield.ftype[:4]
field_data[colname] = ({}, {},
dfield.tname != tablename,
ftype == "list",
dfield.virtual,
ftype == "json",
)
self.field_data = field_data
self.effort = effort
return
# -------------------------------------------------------------------------
def resolve_orderby(self, orderby):
"""
Resolve the ORDERBY expression.
@param orderby: the orderby expression from the caller
@return: tuple (expr, aggr, fields, tables):
expr: the orderby expression (resolved into Fields)
aggr: the orderby expression with aggregations
fields: the fields in the orderby
tables: the tables required for the orderby
@note: for GROUPBY id (e.g. filter query), all ORDERBY fields
must appear in aggregation functions, otherwise ORDERBY
can be ambiguous => use aggr instead of expr
"""
table = self.resource.table
tablename = table._tablename
pkey = str(table._id)
ljoins = self.ljoins
ijoins = self.ijoins
tables = set()
if orderby:
db = current.db
items = self.resolve_expression(orderby)
expr = []
aggr = []
fields = []
for item in items:
expression = None
if type(item) is Expression:
f = item.first
op = item.op
if op == db._adapter.AGGREGATE:
# Already an aggregation
expression = item
elif isinstance(f, Field) and op == db._adapter.INVERT:
direction = "desc"
else:
# Other expression - not supported
continue
elif isinstance(item, Field):
direction = "asc"
f = item
elif isinstance(item, str):
fn, direction = (item.strip().split() + ["asc"])[:2]
tn, fn = ([tablename] + fn.split(".", 1))[-2:]
try:
f = db[tn][fn]
except (AttributeError, KeyError):
continue
else:
continue
fname = str(f)
tname = fname.split(".", 1)[0]
if tname != tablename:
if tname in ljoins or tname in ijoins:
tables.add(tname)
else:
# No join found for this field => skip
continue
fields.append(f)
if expression is None:
expression = f if direction == "asc" else ~f
expr.append(expression)
direction = direction.strip().lower()[:3]
if fname != pkey:
expression = f.min() if direction == "asc" else ~(f.max())
else:
expr.append(expression)
aggr.append(expression)
else:
expr = None
aggr = None
fields = None
return expr, aggr, fields, tables
# -------------------------------------------------------------------------
def filter_query(self, query,
join=None,
left=None,
getids=False,
orderby=None):
"""
Execute a query to determine the number/record IDs of all
matching rows
@param query: the query to execute
@param join: the inner joins for this query
@param left: the left joins for this query
@param getids: also extract the IDs if all matching records
@param orderby: ORDERBY expression for this query
@return: tuple of (TotalNumberOfRecords, RecordIDs)
"""
db = current.db
table = self.table
if getids:
field = table._id
distinct = False
groupby = field
else:
field = table._id.count()
distinct = True
groupby = None
# Temporarily deactivate virtual fields
vf = table.virtualfields
osetattr(table, "virtualfields", [])
# Extract the data
rows = db(query).select(field,
join=join,
left=left,
distinct=distinct,
orderby=orderby,
groupby=groupby,
cacheable=True)
# Restore the virtual fields
osetattr(table, "virtualfields", vf)
if getids:
pkey = str(table._id)
ids = [row[pkey] for row in rows]
totalrows = len(ids)
else:
ids = None
totalrows = rows.first()[field]
return totalrows, ids
# -------------------------------------------------------------------------
def master_fields(self,
dfields,
vfields,
joined_tables,
as_rows=False,
groupby=None):
"""
Find all tables and fields to retrieve in the master query
@param dfields: the requested fields (S3ResourceFields)
@param vfields: the virtual filter fields
@param joined_tables: the tables joined in the master query
@param as_rows: whether to produce web2py Rows
@param groupby: the GROUPBY expression from the caller
@return: tuple (tables, fields, extract, groupby):
tables: the tables required to join
fields: the fields to retrieve
extract: the fields to extract from the result
groupby: the GROUPBY expression (resolved into Fields)
"""
db = current.db
tablename = self.resource.table._tablename
# Names of additional tables to join
tables = set()
# Fields to retrieve in the master query, as dict {ColumnName: Field}
fields = {}
# Column names of fields to extract from the master rows
extract = set()
if groupby:
# Resolve the groupby into Fields
items = self.resolve_expression(groupby)
groupby = []
groupby_append = groupby.append
for item in items:
# Identify the field
tname = None
if isinstance(item, Field):
f = item
elif isinstance(item, str):
fn = item.strip()
tname, fn = ([tablename] + fn.split(".", 1))[-2:]
try:
f = db[tname][fn]
except (AttributeError, KeyError):
continue
else:
continue
groupby_append(f)
# Add to fields
fname = str(f)
if not tname:
tname = f.tablename
fields[fname] = f
# Do we need to join additional tables?
if tname == tablename:
# no join required
continue
else:
# Get joins from dfields
tnames = None
for dfield in dfields:
if dfield.colname == fname:
tnames = self.rfield_tables(dfield)
break
if tnames:
tables |= tnames
else:
# Join at least the table that holds the fields
tables.add(tname)
# Only extract GROUPBY fields (as we don't support aggregates)
extract = set(fields.keys())
else:
rfields = dfields + vfields
for rfield in rfields:
# Is the field in a joined table?
tname = rfield.tname
joined = tname == tablename or tname in joined_tables
if as_rows or joined:
colname = rfield.colname
if rfield.show:
# If show => add to extract
extract.add(colname)
if rfield.field:
# If real field => add to fields
fields[colname] = rfield.field
if not joined:
# Not joined yet? => add all required tables
tables |= self.rfield_tables(rfield)
return tables, fields, extract, groupby
# -------------------------------------------------------------------------
def joined_fields(self, all_fields, master_fields):
"""
Determine which fields in joined tables haven't been
retrieved in the master query
@param all_fields: all requested fields (list of S3ResourceFields)
@param master_fields: all fields in the master query, a dict
{ColumnName: Field}
@return: a nested dict {TableName: {ColumnName: Field}},
additionally required left joins are stored per
table in the inner dict as "_left"
"""
resource = self.resource
table = resource.table
tablename = table._tablename
fields = {}
for rfield in all_fields:
colname = rfield.colname
if colname in master_fields or rfield.tname == tablename:
continue
tname = rfield.tname
if tname not in fields:
sfields = fields[tname] = {}
left = rfield.left
joins = S3Joins(table)
if left:
[joins.add(left[tn]) for tn in left]
sfields["_left"] = joins
else:
sfields = fields[tname]
if colname not in sfields:
sfields[colname] = rfield.field
return fields
# -------------------------------------------------------------------------
def joined_query(self, tablename, query, fields, records, represent=False):
"""
Extract additional fields from a joined table: if there are
fields in joined tables which haven't been extracted in the
master query, then we perform a separate query for each joined
table (this is faster than building a multi-table-join)
@param tablename: name of the joined table
@param query: the Query
@param fields: the fields to extract
@param records: the output dict to update, structure:
{RecordID: {ColumnName: RawValues}}
@param represent: store extracted data (self.field_data) for
fast representation, and estimate lookup
efforts (self.effort)
@return: the output dict
"""
s3db = current.s3db
ljoins = self.ljoins
table = self.resource.table
pkey = str(table._id)
# Get the extra fields for subtable
sresource = s3db.resource(tablename)
efields, ejoins, l, d = sresource.resolve_selectors([])
# Get all left joins for subtable
tnames = ljoins.extend(l) + list(fields["_left"].tables)
sjoins = ljoins.as_list(tablenames=tnames,
aqueries=self.aqueries)
if not sjoins:
return records
del fields["_left"]
# Get all fields for subtable query
extract = fields.keys()
for efield in efields:
fields[efield.colname] = efield.field
sfields = [f for f in fields.values() if f]
if not sfields:
sfields.append(sresource._id)
sfields.insert(0, table._id)
# Retrieve the subtable rows
rows = current.db(query).select(left=sjoins,
distinct=True,
cacheable=True,
*sfields)
# Extract and merge the data
records = self.extract(rows,
pkey,
extract,
records=records,
join=True,
represent=represent)
return records
# -------------------------------------------------------------------------
def extract(self,
rows,
pkey,
columns,
join=True,
records=None,
represent=False):
"""
Extract the data from rows and store them in self.field_data
@param rows: the rows
@param pkey: the primary key
@param columns: the columns to extract
@param join: the rows are the result of a join query
@param records: the records dict to merge the data into
@param represent: collect unique values per field and estimate
representation efforts for list:types
"""
field_data = self.field_data
effort = self.effort
if records is None:
records = {}
def get(key):
t, f = key.split(".", 1)
if join:
return lambda row, t=t, f=f: ogetattr(ogetattr(row, t), f)
else:
return lambda row, f=f: ogetattr(row, f)
getkey = get(pkey)
getval = [get(c) for c in columns]
from itertools import groupby
for k, g in groupby(rows, key=getkey):
group = list(g)
record = records.get(k, {})
for idx, col in enumerate(columns):
fvalues, frecords, joined, list_type, virtual, json_type = field_data[col]
values = record.get(col, {})
lazy = False
for row in group:
try:
value = getval[idx](row)
except AttributeError:
_debug("Warning S3Resource.extract: column %s not in row" % col)
value = None
if lazy or callable(value):
# Lazy virtual field
value = value()
lazy = True
if virtual and not list_type and type(value) is list:
# Virtual field that returns a list
list_type = True
if list_type and value is not None:
if represent and value:
effort[col] += 30 + len(value)
for v in value:
if v not in values:
values[v] = None
if represent and v not in fvalues:
fvalues[v] = None
elif json_type:
# Returns unhashable types
value = json.dumps(value)
if value not in values:
values[value] = None
if represent and value not in fvalues:
fvalues[value] = None
else:
if value not in values:
values[value] = None
if represent and value not in fvalues:
fvalues[value] = None
record[col] = values
if k not in frecords:
frecords[k] = record[col]
records[k] = record
return records
# -------------------------------------------------------------------------
def render(self,
rfield,
results,
none="-",
raw_data=False,
show_links=True):
"""
Render the representations of the values for rfield in
all records in the result
@param rfield: the field (S3ResourceField)
@param results: the output dict to update with the representations,
structure: {RecordID: {ColumnName: Representation}},
the raw data will be a special item "_row" in the
inner dict holding a Storage of the raw field values
@param none: default representation of None
@param raw_data: retain the raw data in the output dict
@param show_links: allow representation functions to render
links as HTML
"""
colname = rfield.colname
field_data = self.field_data
fvalues, frecords, joined, list_type, virtual, json_type = field_data[colname]
# Get the renderer
renderer = rfield.represent
if not callable(renderer):
# @ToDo: Don't convert unformatted numbers to strings
renderer = lambda v: s3_unicode(v) if v is not None else none
# Deactivate linkto if so requested
if not show_links and hasattr(renderer, "show_link"):
show_link = renderer.show_link
renderer.show_link = False
else:
show_link = None
per_row_lookup = list_type and \
self.effort[colname] < len(fvalues) * 30
# Render all unique values
if hasattr(renderer, "bulk") and not list_type:
per_row_lookup = False
fvalues = renderer.bulk(fvalues.keys(), list_type=False)
elif not per_row_lookup:
for value in fvalues:
try:
text = renderer(value)
except:
text = s3_unicode(value)
fvalues[value] = text
# Write representations into result
for record_id in frecords:
if record_id not in results:
results[record_id] = Storage() \
if not raw_data \
else Storage(_row=Storage())
record = frecords[record_id]
result = results[record_id]
# List type with per-row lookup?
if per_row_lookup:
value = record.keys()
if None in value and len(value) > 1:
value = [v for v in value if v is not None]
try:
text = renderer(value)
except:
text = s3_unicode(value)
result[colname] = text
if raw_data:
result["_row"][colname] = value
# Single value (master record)
elif len(record) == 1 or \
not joined and not list_type:
value = record.keys()[0]
result[colname] = fvalues[value] \
if value in fvalues else none
if raw_data:
result["_row"][colname] = value
continue
# Multiple values (joined or list-type)
else:
vlist = []
for value in record:
if value is None and not list_type:
continue
value = fvalues[value] \
if value in fvalues else none
vlist.append(value)
# Concatenate multiple values
if any([hasattr(v, "xml") for v in vlist]):
data = TAG[""](
list(
chain.from_iterable(
[(v, ", ") for v in vlist])
)[:-1]
)
else:
data = ", ".join([s3_unicode(v) for v in vlist])
result[colname] = data
if raw_data:
result["_row"][colname] = record.keys()
# Restore linkto
if show_link is not None:
renderer.show_link = show_link
return results
# -------------------------------------------------------------------------
def __getitem__(self, key):
"""
Helper method to access the results as dict items, for
backwards-compatibility
@param key: the key
@todo: migrate use-cases to .<key> notation, then deprecate
"""
if key in ("rfields", "numrows", "ids", "rows"):
return getattr(self, key)
else:
raise AttributeError
# -------------------------------------------------------------------------
def getids(self, rows, pkey):
"""
Extract all unique record IDs from rows, preserving the
order by first match
@param rows: the Rows
@param pkey: the primary key
@return: list of unique record IDs
"""
x = set()
seen = x.add
result = []
append = result.append
for row in rows:
row_id = row[pkey]
if row_id not in x:
seen(row_id)
append(row_id)
return result
# -------------------------------------------------------------------------
@staticmethod
def rfield_tables(rfield):
"""
Get the names of all tables that need to be joined for a field
@param rfield: the field (S3ResourceField)
@return: a set of tablenames
"""
left = rfield.left
if left:
# => add all left joins required for that table
tablenames = set(j.first._tablename
for tn in left for j in left[tn])
else:
# => we don't know any further left joins,
# but as a minimum we need to add this table
tablenames = set([rfield.tname])
return tablenames
# -------------------------------------------------------------------------
@staticmethod
def resolve_expression(expr):
"""
Resolve an orderby or groupby expression into its items
@param expr: the orderby/groupby expression
"""
if isinstance(expr, str):
items = expr.split(",")
elif not isinstance(expr, (list, tuple)):
items = [expr]
else:
items = expr
return items
# END =========================================================================
|
[
"dominic@nursix.org"
] |
dominic@nursix.org
|
f3dd96f1082ad540ddc6630f148e84b403cb2e0d
|
651c5a5b3abdb220cd20c33db9dfb5447c973917
|
/events/utils/reports.py
|
b22dcdc497a611005dc5f4e7a92a0b25509d85cd
|
[] |
no_license
|
Frankie-Figz/capital-nature-ingest
|
cc8b568f7af32d700b6caf603a1f0465bbc49b74
|
a33d4fc86f500729a88b1041d4f5abf8e9b74712
|
refs/heads/master
| 2020-12-22T00:49:28.706241
| 2020-10-11T22:05:52
| 2020-10-11T22:05:52
| 236,619,317
| 0
| 0
| null | 2020-01-27T23:35:30
| 2020-01-27T23:35:29
| null |
UTF-8
|
Python
| false
| false
| 15,177
|
py
|
import csv
from datetime import datetime
from io import StringIO
import os
import re
import pandas as pd
from pandas.errors import EmptyDataError
from .event_source_map import event_source_map
from .aws_utils import get_matching_s3_keys, object_key_exists, \
read_and_delete_object, put_object
BUCKET = os.getenv('BUCKET_NAME')
if BUCKET:
import boto3
S3 = boto3.resource('s3')
def events_to_csv(events, out_dir='data', bucket=BUCKET):
'''
Write events to csv, either locally or to an S3 bucket.
Parameters:
events (list): a list of dicts, with each representing a single event.
out_dir (str): dir to write file.
bucket (str or None): the name of the S3 bucket. None by default
Returns:
scrape_file: location of file written to.
'''
scrape_date = datetime.now().strftime("%m-%d-%Y")
filename = f'cap-nature-events-scraped-{scrape_date}.csv'
fieldnames = {
'Do Not Import', 'Event Name', 'Event Description', 'Event Excerpt',
'Event Start Date', 'Event Start Time', 'Event End Date',
'Event End Time', 'Timezone', 'All Day Event',
'Hide Event From Event Listings', 'Event Sticky in Month View',
'Feature Event', 'Event Venue Name',
'Event Organizers', 'Event Show Map Link', 'Event Show Map',
'Event Cost', 'Event Currency Symbol', 'Event Currency Position',
'Event Category', 'Event Tags', 'Event Website',
'Event Featured Image', 'Allow Comments',
'Event Allow Trackbacks and Pingbacks'
}
if bucket:
key = f'{out_dir}/{filename}'
with StringIO() as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for event in events:
writer.writerow(event)
data = f.getvalue()
put_object(data, key)
return scrape_date
else:
out_path = os.path.join(os.getcwd(), out_dir, filename)
if not os.path.exists(os.path.join(os.getcwd(), out_dir)):
os.mkdir(os.path.join(os.getcwd(), out_dir))
with open(out_path, mode='w', encoding='utf-8', errors='ignore') as f:
writer = csv.DictWriter(f, fieldnames=fieldnames)
writer.writeheader()
for event in events:
writer.writerow(event)
return scrape_date
def get_past_venues(out_dir='data', bucket=BUCKET):
'''
Returns a set of event venues frm current venue csv in temp/ (if it exists)
and then deletes that file (if it exists) as it will soon be replaced by
a new, more updated one.
Parameters:
out_dir (str): dir to write file.
bucket (str or None): the name of the S3 bucket. None by default
Returns:
past_venues (set): set of venues, or an empty set if there are none
'''
venues = []
if bucket:
try:
venue_key = next(get_matching_s3_keys(
prefix='data/cap-nature-venues')
)
except StopIteration:
return set()
venue_file = read_and_delete_object(venue_key)
with venue_file as f:
reader = csv.reader(f)
for i in reader:
venue = i[0]
venues.append(venue)
else:
data_path = os.path.join(os.getcwd(), out_dir)
if not os.path.exists(data_path):
os.mkdir(data_path)
data_files = []
for f in os.listdir(data_path):
if os.path.isfile(os.path.join(data_path, f)) and 'venues-' in f:
data_files.append(os.path.join(data_path, f))
try:
venue_file = data_files[0]
except IndexError:
# because there's no past file, so no past venues
return set()
with open(venue_file, errors='ignore') as f:
reader = csv.reader(f)
for i in reader:
venue = i[0]
venues.append(venue)
os.remove(venue_file)
past_venues = set(venues)
past_venues.remove('VENUE NAME')
return past_venues
def venues_to_csv(events, out_dir='data', bucket=BUCKET):
'''
Void function that writes unique event venues to csv, either locally or to
an S3 bucket.
Parameters:
events (list): a list of dicts, with each representing a single event.
out_dir (str): dir to write file.
bucket (str or None): the name of the S3 bucket. None by default
'''
venues = []
for event in events:
event_venue = event['Event Venue Name']
venues.append(event_venue)
past_venues = get_past_venues()
unique_venues = set(venues) | past_venues
now = datetime.now().strftime("%m-%d-%Y")
filename = f'cap-nature-venues-scraped-{now}.csv'
if bucket:
with StringIO() as f:
writer = csv.writer(f)
venues_to_write = list(unique_venues)
venues_to_write.insert(0, 'VENUE NAME')
for venue in venues_to_write:
writer.writerow([venue])
data = f.getvalue()
key = f'{out_dir}/{filename}'
put_object(data, key)
else:
out_path = os.path.join(os.getcwd(), out_dir, filename)
if not os.path.exists(os.path.join(os.getcwd(), out_dir)):
os.mkdir(os.path.join(os.getcwd(), out_dir))
with open(out_path, mode='w', encoding='utf-8', errors='ignore') as f:
writer = csv.writer(f)
venues_to_write = list(unique_venues)
venues_to_write.insert(0, 'VENUE NAME')
for venue in venues_to_write:
writer.writerow([venue])
def get_past_organizers(out_dir='data', bucket=BUCKET):
'''
Returns a set of event organizers from current organizer csv in temp/
(if it exists) and then deletes that file (if it exists) as it will soon
be replaced by a new, more updated one.
Parameters:
out_dir (str): dir to write file.
bucket (str or None): the name of the S3 bucket. None by default
Returns:
past_organizers (set): set of organizers, or an empty set if none
'''
organizers = []
if bucket:
try:
org_key = next(get_matching_s3_keys(
prefix='data/cap-nature-organizer')
)
except StopIteration:
return set()
organizer_file = read_and_delete_object(org_key)
with organizer_file as f:
reader = csv.reader(f)
for i in reader:
organizer = i[0]
organizers.append(organizer)
else:
data_path = os.path.join(os.getcwd(), out_dir)
if not os.path.exists(data_path):
os.mkdir(data_path)
data_files = []
for f in os.listdir(data_path):
if 'organizers-' in f:
data_files.append(os.path.join(data_path, f))
try:
organizer_file = data_files[0]
except IndexError:
# IndexError because there's no past file
return set()
with open(organizer_file) as f:
reader = csv.reader(f)
for i in reader:
organizer = i[0]
organizers.append(organizer)
os.remove(organizer_file)
past_organizers = set(organizers)
past_organizers.remove('Event Organizer Name(s) or ID(s)')
return past_organizers
def organizers_to_csv(events, out_dir='data', bucket=BUCKET):
'''
Void function that writes unique event organizers to csv, either locally
or to an S3 bucket.
Parameters:
events (list): a list of dicts, with each representing a single event.
out_dir (str): dir to write file.
bucket (str or None): the name of the S3 bucket. None by default.
'''
organizers = []
for event in events:
event_organizer = event['Event Organizers']
organizers.append(event_organizer)
past_organizers = get_past_organizers()
unique_organizers = set(organizers) | past_organizers
now = datetime.now().strftime("%m-%d-%Y")
filename = f'cap-nature-organizers-scraped-{now}.csv'
if bucket:
with StringIO() as f:
writer = csv.writer(f)
orgs_to_write = list(unique_organizers)
orgs_to_write.insert(0, 'Event Organizer Name(s) or ID(s)')
for org in orgs_to_write:
writer.writerow([org])
data = f.getvalue()
key = f'{out_dir}/{filename}'
put_object(data, key)
else:
out_path = os.path.join(os.getcwd(), out_dir, filename)
if not os.path.exists(os.path.join(os.getcwd(), out_dir)):
os.mkdir(os.path.join(os.getcwd(), out_dir))
with open(out_path, mode='w', encoding='utf-8', errors='ignore') as f:
writer = csv.writer(f)
orgs_to_write = list(unique_organizers)
orgs_to_write.insert(0, 'Event Organizer Name(s) or ID(s)')
for org in orgs_to_write:
writer.writerow([org])
class ScrapeReport():
def __init__(self, events, scrape_date, bucket=BUCKET):
self.bucket = bucket
self.scrape_df = pd.DataFrame(events)
if bucket:
self.report_path = f'reports/scrape-report-{scrape_date}.csv'
else:
reports_dir = os.path.join(os.getcwd(), 'reports')
if not os.path.exists(reports_dir):
os.mkdir(reports_dir)
self.report_path = os.path.join(
reports_dir,
f'scrape-report-{scrape_date}.csv'
)
self.log_df = ScrapeReport.get_log_df(scrape_date)
@staticmethod
def get_log_df(scrape_date):
log_file = None
global BUCKET
root_dir = '/tmp' if BUCKET else os.getcwd()
log_dir = os.path.join(root_dir, 'logs')
log_dfs = []
for f in os.listdir(log_dir):
if not f.endswith('.csv'):
continue
f_base = os.path.basename(f)
date_index = re.search(r'\d', f_base).start()
log_date = f_base[date_index:].replace(".csv",'')
if log_date == scrape_date:
log_file = os.path.join(log_dir, f)
try:
_log_df = pd.read_csv(log_file)
except EmptyDataError:
# no errors logged in the file so delete it
os.remove(log_file)
continue
log_dfs.append(_log_df)
if log_dfs:
log_df = pd.concat(log_dfs)
else:
# no errors logged in any files
cols = ['Time', 'Level', 'Event Source', 'Message', 'Exc Info']
log_df = pd.DataFrame(columns=cols)
return log_df
@staticmethod
def prep_log_df(log_df):
err_type_count_by_source = pd.DataFrame(
log_df.groupby(
by=['Event Source', 'Level']
)['Time'].count()
).reset_index()
cols = ['Event Organizers', 'Error Level', 'Number of Errors']
err_type_count_by_source.columns = cols
err_df = err_type_count_by_source.pivot(
index='Event Organizers',
columns='Error Level',
values='Number of Errors'
).reset_index()
return err_df
@staticmethod
def prep_scrape_df(scrape_df):
source_count = pd.DataFrame(scrape_df.groupby(
by='Event Organizers')['Event Name'].count()).reset_index()
source_count.columns = ['Event Organizers', 'Number of Events Scraped']
return source_count
@staticmethod
def get_status(row):
'''statuses can include
# - broken
# - a single CRITICAL error
# - any presence in the logs AND no events found
# - operational
# - events found and no errors
# - operational but with errors
# - events found and at least one non-critical error
# - operational but no events found
# - no errors and no events for the event source
'''
try:
is_logged = int(row['Number of Errors'])
except ValueError:
is_logged = 0
try:
n_events = int(row['Number of Events Scraped'])
except ValueError:
n_events = 0
try:
n_crit = int(row['CRITICAL'])
except (KeyError, ValueError):
n_crit = 0
if n_crit >= 1:
return 'Broken'
elif is_logged and not n_events:
return 'Broken'
elif not is_logged and n_events:
return 'Operational'
elif is_logged and n_events:
return 'Operational, but with errors'
else:
return 'Status-determiner is broken'
@staticmethod
def append_nonevents(report_df):
event_organizers = report_df['Event Organizers'].tolist()
data = [report_df]
n_err_cols = len(report_df.columns) - 4
for _, v in event_source_map.items():
if v not in event_organizers:
new_row = [v, 0]
for _ in range(n_err_cols):
new_row.append(0)
new_row.extend([0, 'Operational, but no events found'])
_df = pd.DataFrame(new_row).transpose()
_df.columns = report_df.columns
data.append(_df)
df = pd.concat(data, axis=0).fillna(0)
return df
def make_scrape_report(self):
'''Create an excel report based on data scraped and the logs'''
err_df = ScrapeReport.prep_log_df(self.log_df)
source_count = ScrapeReport.prep_scrape_df(self.scrape_df)
report_df = pd.merge(source_count, err_df, how='outer')
log_levels = ['CRITICAL', 'ERROR', 'WARNING']
err_cols = [x for x in report_df.columns if x in log_levels]
if not err_cols:
report_df['Number of Errors'] = 0
else:
report_df['Number of Errors'] = report_df[err_cols].sum(axis=1)
report_df['Status'] = report_df.apply(ScrapeReport.get_status, axis=1)
df = ScrapeReport.append_nonevents(report_df)
if self.bucket:
csv_buffer = StringIO()
df.to_csv(csv_buffer, index=False)
data = csv_buffer.getvalue()
if object_key_exists(self.report_path):
# Don't put the report if it already exists.
# This makes the lambda idempotent for the lambda listening
# for this PUT:object S3 event.
pass
else:
put_object(data, self.report_path)
else:
df.to_csv(self.report_path, index=False)
return self.log_df
def make_reports(events, bucket=BUCKET):
scrape_date = events_to_csv(events)
organizers_to_csv(events)
venues_to_csv(events)
sr = ScrapeReport(events, scrape_date)
log_df = sr.make_scrape_report()
return log_df
|
[
"noreply@github.com"
] |
Frankie-Figz.noreply@github.com
|
8753a921a15f6a43bf864b793500b8df7df5a232
|
bc437dc74647765b51996f64b35fda3d047daf93
|
/2_Intermediate/day18_The_Hirst_Painting_Project/main.py
|
4c03978fb420c12c9f275227d28b734e5c0a907b
|
[] |
no_license
|
macosta-42/100_days_of_code
|
e06720d57b6ed870a3dd4fa4e6d019296206a08f
|
5b527dc18bae2ef556c26f653ef3c4badf94bb82
|
refs/heads/main
| 2023-05-22T03:26:02.422275
| 2021-06-10T10:31:26
| 2021-06-10T10:31:26
| 328,963,362
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,187
|
py
|
# import colorgram
#
# Extract 30 colors from an image.
# colors = colorgram.extract('image.jpg', 30)
#
# rgb_colors = []
#
# for color in colors:
# r = color.rgb.r
# g = color.rgb.g
# b = color.rgb.b
# new_color = (r, g, b)
# rgb_colors.append(new_color)
#
#
# print(rgb_colors)
import turtle as t
import random
color_list = [
(8, 16, 67),
(63, 8, 28),
(192, 70, 22),
(144, 11, 35),
(248, 237, 242),
(13, 45, 142),
(30, 103, 175),
(123, 162, 201),
(249, 216, 64),
(170, 16, 5),
(204, 71, 124),
(62, 34, 12),
(224, 135, 86),
(12, 45, 32),
(200, 174, 38),
(143, 194, 173),
(213, 74, 55),
(174, 50, 76),
(59, 161, 118),
(252, 206, 0),
(215, 134, 145),
(78, 111, 80),
(82, 111, 199),
(12, 100, 4),
(177, 185, 218),
(231, 166, 180),
(237, 171, 160)
]
tim = t.Turtle()
tim.hideturtle()
tim.speed(0)
t.colormode(255)
tim.penup()
pos_x = -250
pos_y = -250
for pos in range(10):
tim.setpos(pos_x, pos_y)
for dot in range(10):
tim.dot(20, random.choice(color_list))
tim.forward(50)
pos_y += 50
screen = t.Screen()
screen.exitonclick()
|
[
"macosta@student.42.fr"
] |
macosta@student.42.fr
|
7c92061e65b904281c64601cd3cdbc135f8bd3a3
|
bb5a27d39bca1d2bb3eeb752b04a43b71801a110
|
/Python/PostgreTools/export2kml.py
|
b7203bcc15df58a32821126896af370fade4f68a
|
[] |
no_license
|
hulaba/datatools
|
0ed6bb40c6d3c3ca389ede1b80fa8888e760ae66
|
538ddb1d92a3fa8c9d9d87289ae00b9234c2bbef
|
refs/heads/master
| 2021-05-31T09:14:35.105356
| 2011-12-03T03:01:33
| 2011-12-03T03:01:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,740
|
py
|
import os,string,codecs,unicodedata
from psycopg2 import connect
from osgeo import ogr
from xml.dom import minidom
from datetime import *
def export_gl_outline(basin_code,outpath):
kml_file=outpath+'gl_'+basin_code+'.kml'
conn=connect("host=localhost dbname=GGLIS user=postgres password=postgres")
curs=conn.cursor()
sql_str="select a.gid,askml(a.the_geom) from \"HKH_Glacial_Lakes_final\" a, \"HKH_Basin_Pfaf_v3_poly\" b "\
"where st_within(centroid(a.the_geom),b.the_geom) and b.lv3_code='"+basin_code+"' order by a.gid;"
curs.execute(sql_str)
rows=curs.fetchall()
##
impl=minidom.getDOMImplementation()
dom=impl.createDocument(None,'kml',None)
root=dom.documentElement
ns='http://earth.google.com/kml/2.1'
Document_node=dom.createElement('Document')
root.appendChild(Document_node)
Doc_name=dom.createElement('name')
Document_node.appendChild(Doc_name)
##add style
Style_node=dom.createElement('Style')
Document_node.appendChild(Style_node)
Style_node.setAttribute('id','Default_PolyStyle')
LineStyle=dom.createElement('LineStyle')
Style_node.appendChild(LineStyle)
color_node=dom.createElement('color')
LineStyle.appendChild(color_node)
Text=dom.createTextNode('ff0000ff')
color_node.appendChild(Text)
width_node=dom.createElement('width')
LineStyle.appendChild(width_node)
Text=dom.createTextNode('1')
width_node.appendChild(Text)
PolyStyle=dom.createElement('PolyStyle')
Style_node.appendChild(PolyStyle)
color_node=dom.createElement('color')
PolyStyle.appendChild(color_node)
Text=dom.createTextNode('ff0000ff')
color_node.appendChild(Text)
fill_node=dom.createElement('fill')
PolyStyle.appendChild(fill_node)
Text=dom.createTextNode('0')
fill_node.appendChild(Text)
outline_node=dom.createElement('outline')
PolyStyle.appendChild(outline_node)
Text=dom.createTextNode('1')
outline_node.appendChild(Text)
for i in range(len(rows)):
gid=rows[i][0]
kml=rows[i][1]
Placemark_node=dom.createElement('Placemark')
Document_node.appendChild(Placemark_node)
name_node=dom.createElement('name')
Placemark_node.appendChild(name_node)
Text=dom.createTextNode(str(gid))
name_node.appendChild(Text)
kml_dom=minidom.parseString(kml)
kml_node=kml_dom.documentElement
Placemark_node.appendChild(kml_node)
f=open(kml_file,'w')
writer=codecs.lookup('utf8')[3](f)
dom.writexml(writer,encoding='utf8')
writer.close()
curs.close()
conn.close()
print '---glacial lake outline exported!----'
def export_gl_centroid(basin_code,outpath):
kml_file=outpath+'gl_'+basin_code+'_p.kml'
conn=connect("host=localhost dbname=GGLIS user=postgres password=postgres")
curs=conn.cursor()
sql_str="select a.\"Gl_Class\",askml(centroid(a.the_geom)) from \"HKH_Glacial_Lakes_final\" a, \"HKH_Basin_Pfaf_v3_poly\" b "\
"where st_within(centroid(a.the_geom),b.the_geom) and b.lv3_code='"+basin_code+"' order by a.gid;"
curs.execute(sql_str)
rows=curs.fetchall()
##
impl=minidom.getDOMImplementation()
dom=impl.createDocument(None,'kml',None)
root=dom.documentElement
ns='http://earth.google.com/kml/2.1'
Document_node=dom.createElement('Document')
root.appendChild(Document_node)
Doc_name=dom.createElement('name')
Document_node.appendChild(Doc_name)
##add style
Style_node=dom.createElement('Style')
Document_node.appendChild(Style_node)
Style_node.setAttribute('id','Default_PolyStyle')
LineStyle=dom.createElement('LineStyle')
Style_node.appendChild(LineStyle)
color_node=dom.createElement('color')
LineStyle.appendChild(color_node)
Text=dom.createTextNode('ff0000ff')
color_node.appendChild(Text)
width_node=dom.createElement('width')
LineStyle.appendChild(width_node)
Text=dom.createTextNode('1')
width_node.appendChild(Text)
PolyStyle=dom.createElement('PolyStyle')
Style_node.appendChild(PolyStyle)
color_node=dom.createElement('color')
PolyStyle.appendChild(color_node)
Text=dom.createTextNode('ff0000ff')
color_node.appendChild(Text)
fill_node=dom.createElement('fill')
PolyStyle.appendChild(fill_node)
Text=dom.createTextNode('0')
fill_node.appendChild(Text)
outline_node=dom.createElement('outline')
PolyStyle.appendChild(outline_node)
Text=dom.createTextNode('1')
outline_node.appendChild(Text)
for i in range(len(rows)):
gid=rows[i][0]
kml=rows[i][1]
Placemark_node=dom.createElement('Placemark')
Document_node.appendChild(Placemark_node)
name_node=dom.createElement('name')
Placemark_node.appendChild(name_node)
Text=dom.createTextNode(str(gid))
name_node.appendChild(Text)
kml_dom=minidom.parseString(kml)
kml_node=kml_dom.documentElement
Placemark_node.appendChild(kml_node)
f=open(kml_file,'w')
writer=codecs.lookup('utf8')[3](f)
dom.writexml(writer,encoding='utf8')
writer.close()
curs.close()
conn.close()
print '---glacial lake centroid exported!----'
if __name__=='__main__':
basin_code='Ir91'
outpath='C:\\gl_class\\'
export_gl_outline(basin_code,outpath)
export_gl_centroid(basin_code,outpath)
print 'end'
|
[
"wulizong@18d743b0-f649-0410-97d6-f906a9d518df"
] |
wulizong@18d743b0-f649-0410-97d6-f906a9d518df
|
d923942c79de930927cb13329ae09b97438abfd4
|
b7536e7d366949a34902da4426e0693293b3b99d
|
/projs/scrapy/demobot/spiders/imgsp.py
|
b30c6a894d956137a6e60508d6b5ff6c9f6d7c91
|
[
"MIT"
] |
permissive
|
peacexie/python
|
55fe4220e7d92e989bd6b76082bd3c1ebd8e010a
|
de776979030fdd537a0a9ec6d6933bb8b6da46eb
|
refs/heads/master
| 2021-01-22T00:45:27.614636
| 2019-11-13T00:59:43
| 2019-11-13T00:59:43
| 102,193,239
| 2
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 777
|
py
|
# -*- coding:utf-8 -*-
import scrapy
from demobot.items import ImagespiderItem
class ImgspiderSpider(scrapy.Spider):
name = 'imgsp'
custom_settings = {
'ITEM_PIPELINES': {'demobot.pipelines.ImagespiderPipeline':300}
}
allowed_domains = ['lab.scrapyd.cn']
start_urls = [
'http://lab.scrapyd.cn/archives/55.html',
'http://lab.scrapyd.cn/archives/57.html',
]
def parse(self, response):
item = ImagespiderItem() # 实例化item
# 注意这里是一个集合也就是多张图片
item['imgurl'] = response.css(".post img::attr(src)").extract()
# 抓取文章标题作为图集名称
item['imgname'] = response.css(".post-title a::text").extract_first()
yield item
pass
|
[
"xpigeon@163.com"
] |
xpigeon@163.com
|
2f1e39e5de40e2d9c364cac742d86ea64786c212
|
9886b515b02e595fbcea77ccd27e43d2c6c5261a
|
/tools/apigee-analytics-load-generator-demo/v2/load-generator/backend/services/loyalty/loyalty.py
|
acb7d6bd0c0e19b6b30e0729a97ffc9322451488
|
[
"Apache-2.0"
] |
permissive
|
igalonso/devrel
|
783b2dcd6c01d6f1517a0ad333666bc6c4c18afa
|
aecd1d281bc21aedb9d88611a2a61ed46a30f3ed
|
refs/heads/main
| 2023-06-16T03:18:25.113785
| 2021-07-06T14:51:32
| 2021-07-06T14:51:32
| 383,504,044
| 0
| 0
|
Apache-2.0
| 2021-07-06T14:50:02
| 2021-07-06T14:50:02
| null |
UTF-8
|
Python
| false
| false
| 1,191
|
py
|
import time
import random
def getTotalLoyaltyPoints(id):
sleepTime = random.randint(1,50)
print("random sleep: ",sleepTime)
if id == 1:
points = {
"total_rewards_points":43526244,
"healthy_choice_points":665446,
"transaction_id":"234099-324234-4324532"
}
elif id == 2:
points = {
"total_rewards_points":53465,
"healthy_choice_points":665,
"transaction_id":"234099-324234-4324532"
}
elif id == 3:
points = {
"total_rewards_points":4356,
"healthy_choice_points":54,
"transaction_id":"234099-324234-4324532"
}
elif id == 4:
points = {
"total_rewards_points":788769,
"healthy_choice_points":23,
"transaction_id":"234099-324234-4324532"
}
elif id == 5:
points = {
"total_rewards_points":8987087,
"healthy_choice_points":543,
"transaction_id":"234099-324234-4324532"
}
else:
return False
if sleepTime == id:
print("sleeping")
time.sleep (random.randint(0,7))
return points
|
[
"igalonso@gmail.com"
] |
igalonso@gmail.com
|
d5a21b5befbc6ab95420d33922549f98623497f7
|
7e7db897707bdf2f76467b640f89630ba6dd96c2
|
/network.py
|
dd2f59f69b391112be783a2df0cc08adfc717694
|
[
"MIT"
] |
permissive
|
sk364/handwritten-digit-recognizer
|
f72439ad95396bdf704dea08f5e2eab5dc92bc56
|
ece86d7f6b78bfaad69028b30879a6e92b1e50e2
|
refs/heads/master
| 2021-01-19T19:57:38.855010
| 2017-04-27T11:09:13
| 2017-04-27T11:09:13
| 88,470,675
| 0
| 1
| null | 2017-04-27T11:09:14
| 2017-04-17T04:41:31
|
Python
|
UTF-8
|
Python
| false
| false
| 7,722
|
py
|
import os
import numpy as np
import random
from activations import sigmoid, sigmoid_prime
class NeuralNetwork(object):
def __init__(self, sizes=list(), learning_rate=0.8, mini_batch_size=10,
epochs=10, lmbda=5.0):
"""Initialize a Neural Network model.
Parameters
----------
sizes : list, optional
A list of integers specifying number of neurns in each layer. Not
required if a pretrained model is used.
learning_rate : float, optional
Learning rate for gradient descent optimization. Defaults to 1.0
mini_batch_size : int, optional
Size of each mini batch of training examples as used by Stochastic
Gradient Descent. Denotes after how many examples the weights
and biases would be updated. Default size is 16.
"""
# Input layer is layer 0, followed by hidden layers layer 1, 2, 3...
self.sizes = sizes
self.num_layers = len(sizes)
# First term corresponds to layer 0 (input layer). No weights enter the
# input layer and hence self.weights[0] is redundant.
self.weights = [np.array([0])] + [np.random.randn(y, x) for y, x in
zip(sizes[1:], sizes[:-1])]
# Input layer does not have any biases. self.biases[0] is redundant.
self.biases = [np.random.randn(y, 1) for y in sizes]
# Input layer has no weights, biases associated. Hence z = wx + b is not
# defined for input layer. self.zs[0] is redundant.
self._zs = [np.zeros(bias.shape) for bias in self.biases]
# Training examples can be treated as activations coming out of input
# layer. Hence self.activations[0] = (training_example).
self._activations = [np.zeros(bias.shape) for bias in self.biases]
self.mini_batch_size = mini_batch_size
self.epochs = epochs
self.eta = learning_rate
self.lmbda = lmbda
def fit(self, training_data, validation_data=None):
"""Fit (train) the Neural Network on provided training data. Fitting is
carried out using Stochastic Gradient Descent Algorithm.
Parameters
----------
training_data : list of tuple
A list of tuples of numpy arrays, ordered as (image, label).
validation_data : list of tuple, optional
Same as `training_data`, if provided, the network will display
validation accuracy after each epoch.
"""
for epoch in range(self.epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k + self.mini_batch_size] for k in
range(0, len(training_data), self.mini_batch_size)]
for mini_batch in mini_batches:
nabla_b = [np.zeros(bias.shape) for bias in self.biases]
nabla_w = [np.zeros(weight.shape) for weight in self.weights]
for x, y in mini_batch:
self._forward_prop(x)
delta_nabla_b, delta_nabla_w = self._back_prop(x, y)
nabla_b = [nb + dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw + dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [
(1-self.eta*(self.lmbda/len(training_data)))*w-(self.eta/len(mini_batch))*dw
for w, dw in zip(self.weights, nabla_w)]
self.biases = [
b-(self.eta/len(mini_batch))*db
for b, db in zip(self.biases, nabla_b)]
if validation_data:
accuracy = self.validate(validation_data) / 100.0
print("Epoch {0}, accuracy {1} %.".format(epoch + 1, accuracy))
else:
print("Processed epoch {0}.".format(epoch))
def validate(self, validation_data):
"""Validate the Neural Network on provided validation data. It uses the
number of correctly predicted examples as validation accuracy metric.
Parameters
----------
validation_data : list of tuple
Returns
-------
int
Number of correctly predicted images.
"""
validation_results = [(self.predict(x) == y) for x, y in validation_data]
return sum(result for result in validation_results)
def predict(self, x):
"""Predict the label of a single test example (image).
Parameters
----------
x : numpy.array
Returns
-------
int
Predicted label of example (image).
"""
self._forward_prop(x)
return np.argmax(self._activations[-1])
def _forward_prop(self, x):
self._activations[0] = x
for i in range(1, self.num_layers):
self._zs[i] = (
self.weights[i].dot(self._activations[i - 1]) + self.biases[i]
)
self._activations[i] = sigmoid(self._zs[i])
def _back_prop(self, x, y):
nabla_b = [np.zeros(bias.shape) for bias in self.biases]
nabla_w = [np.zeros(weight.shape) for weight in self.weights]
#error = (self._activations[-1] - y) * sigmoid_prime(self._zs[-1])
error = (self._activations[-1] - y)
nabla_b[-1] = error
nabla_w[-1] = error.dot(self._activations[-2].transpose())
for l in range(self.num_layers - 2, 0, -1):
error = np.multiply(
self.weights[l + 1].transpose().dot(error),
sigmoid_prime(self._zs[l])
)
nabla_b[l] = error
nabla_w[l] = error.dot(self._activations[l - 1].transpose())
return nabla_b, nabla_w
def load(self, filename='model.npz'):
"""Prepare a neural network from a compressed binary containing weights
and biases arrays. Size of layers are derived from dimensions of
numpy arrays.
Parameters
----------
filename : str, optional
Name of the ``.npz`` compressed binary in models directory.
"""
npz_members = np.load(os.path.join(os.curdir, 'models', filename))
self.weights = list(npz_members['weights'])
self.biases = list(npz_members['biases'])
# Bias vectors of each layer has same length as the number of neurons
# in that layer. So we can build `sizes` through biases vectors.
self.sizes = [b.shape[0] for b in self.biases]
self.num_layers = len(self.sizes)
# These are declared as per desired shape.
self._zs = [np.zeros(bias.shape) for bias in self.biases]
self._activations = [np.zeros(bias.shape) for bias in self.biases]
# Other hyperparameters are set as specified in model. These were cast
# to numpy arrays for saving in the compressed binary.
self.mini_batch_size = int(npz_members['mini_batch_size'])
self.epochs = int(npz_members['epochs'])
self.eta = float(npz_members['eta'])
def save(self, filename='model.npz'):
"""Save weights, biases and hyperparameters of neural network to a
compressed binary. This ``.npz`` binary is saved in 'models' directory.
Parameters
----------
filename : str, optional
Name of the ``.npz`` compressed binary in to be saved.
"""
np.savez_compressed(
file=os.path.join(os.curdir, 'models', filename),
weights=self.weights,
biases=self.biases,
mini_batch_size=self.mini_batch_size,
epochs=self.epochs,
eta=self.eta
)
|
[
"skad5455[at]gmail.com"
] |
skad5455[at]gmail.com
|
4fd1ad6f5adbd7ceebecd3beab071b89dc4786e0
|
4da2982f763d784b3e42d9968b3e726b7e70d06b
|
/Program Fungsi 5.py
|
6f673fdff6775308a4177d7841ccf1165edd7594
|
[] |
no_license
|
adelliamaharanip/Python-X2-AdelliaMaharaniPutri
|
acb5c2ade4a5a2de2e1d89e5fac3c2337722b168
|
23e48c14a5b67afaa89583777aa9541ef2376917
|
refs/heads/master
| 2023-04-19T16:31:27.152809
| 2021-05-06T23:38:58
| 2021-05-06T23:38:58
| 335,598,686
| 3
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 448
|
py
|
# Definisi fungsi
def penjumlahan( *vartuple ):
print ("Jumlahnya adalah: ")
jumlah = 0
for var in vartuple:
jumlah = jumlah + var
print(jumlah)
def rata(*vartuple):
print("Rata-ratanya adalah : ")
rerata = 0
tot = 0
for var in vartuple:
tot = tot + var
rerata = tot / len(vartuple)
print(rerata)
# Empat argumen
penjumlahan( 10, 30, 50, 70 )
rata( 10, 30, 50, 70 )
|
[
"noreply@github.com"
] |
adelliamaharanip.noreply@github.com
|
41e72a68572be0fbf05ea4f8cca49a634c8412f1
|
101afac6526d9756905dc90467cef894fff3ab40
|
/rpa_basic/1_excel/1_create_file.py
|
ee2cd8040c44038f170e7d2a29a1a25ebed522a5
|
[] |
no_license
|
IMRaccoon/Python-study
|
a1353a6b6fdbe3ea66db667a9199e43f1582e4fb
|
e60a021d2968f04d6098acf751c20283ea193bb4
|
refs/heads/main
| 2023-07-09T22:18:04.527124
| 2021-08-11T19:10:33
| 2021-08-11T19:10:33
| 320,856,555
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 219
|
py
|
from openpyxl import Workbook
wb = Workbook() # 새 워크북 생성
ws = wb.active # 현재 활성화된 sheet 가져옴
ws.title = "NadoSheet" # sheet 의 이름을 변경
wb.save('sample.xlsx')
wb.close()
|
[
"whdies816@gmail.com"
] |
whdies816@gmail.com
|
7a3726afe8b02197bef84696ceada8f8fae392c1
|
ea68fad8ce90c488fe9846d6fe636ef94d35fc8c
|
/pyiele/fetchFunctionData.py
|
497f218a1d575941d87368f2eee3ad8259c60928
|
[
"NCSA",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
runtimeverification/iele-semantics
|
03eb1bf28c1afbf682bb83f86f269f36f374adaf
|
e030f7592753ee3dd6452757a5fdcfe3baacf037
|
refs/heads/master
| 2023-08-04T15:02:32.863523
| 2023-07-20T13:54:42
| 2023-07-20T13:54:42
| 105,186,520
| 131
| 39
|
NOASSERTION
| 2023-07-18T16:39:18
| 2017-09-28T18:44:13
|
HTML
|
UTF-8
|
Python
| false
| false
| 2,453
|
py
|
#!/usr/bin/python3
import sys
import os
import json
import re
from Crypto.Hash import keccak
def generate_signature(name:str, inputs:dict) -> str:
''' A function signature looks like `name(arg_type1,arg_type2)` '''
args= "(" +','.join([x["type"] for x in inputs]) + ")"
return name+args
def process_file(file: str) -> dict:
''' Creates a dict containing information about the names and inputs of the constructor and functions '''
result = []
with open(file, "r") as in_file:
in_dict = json.load(in_file)
for (contract_key, contract_dict) in in_dict["contracts"].items():
out_dict = {}
contract_name = contract_key
out_dict["contractName"] = contract_name
out_dict["contractConstructor"] = [
{
"name": contract_name,
"input": [
{"name": y["name"], "type": y["type"]} for y in x["inputs"]
]
}
for x in contract_dict["abi"]
if x["type"] == "constructor"
]
out_dict["functionData"] = [
{
"name": x["name"],
"signatureHash": generate_signature(x["name"], x["inputs"]),
"input": [
{"name": y["name"], "type": y["type"]} for y in x["inputs"]
],
"output": [
{"name": y["name"], "type": y["type"]} for y in x["outputs"]
],
}
for x in contract_dict["abi"]
if x["type"] == "function"
]
result.append(out_dict)
return result
def process_folder(folder_path: str) -> list:
''' Runs process_file on all files from a folder '''
result = []
for file_name in os.listdir(folder_path):
if False == os.path.isfile(os.path.join(folder_path, file_name)):
continue
result.append(process_file(os.path.join(folder_path, file_name)))
return result
def fetch_function_data(arg: str) -> dict:
''' Returns information about all the functions and constructors of Solidity Contracts '''
if os.path.isdir(arg):
result = process_folder(arg)
elif os.path.isfile(arg):
result = process_file(arg)
else:
raise Exception("Invalid input", arg)
sys.exit(2)
return result
|
[
"noreply@github.com"
] |
runtimeverification.noreply@github.com
|
15b0244d6e97ea5157fc8b3fca78e83c8ad29fa7
|
08d4d33f7f44751199dcd6e9c6cbe940c8d20e6b
|
/movie_review.py
|
df748e0e45481f0df2c9264c3a80002da6628c37
|
[] |
no_license
|
yogesh-kamble/kaggle-submission
|
385428da580430a7aef40e03aec004340967620c
|
e970c789a304e78b34f8c69e5dbe870428d8d771
|
refs/heads/master
| 2021-03-12T20:10:49.040115
| 2015-01-18T16:28:31
| 2015-01-18T16:28:31
| 29,432,153
| 0
| 5
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,510
|
py
|
import csv
#from textblob.classifiers import NaiveBayesClassifier
#from nltk import NaiveBayesClassifier, classify
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsRestClassifier, OneVsOneClassifier
import numpy as np
def get_features(item):
words = docclass.getwords(item)
return dict((word, 'true') for word in words)
def get_accuracy(cl, test_set):
success = 0
for feature, label in test_set:
guess = cl.classify(feature)
if guess == label:
success+=1
return float(success/len(test_set))
def process_csv():
# Train the classifier
train_set = []
test_set = []
##############################TRAIN DATA#####################
with open('train.tsv', 'r') as f:
sentimentreader = csv.reader(f, delimiter='\t')
header = sentimentreader.next()
cnt = 0
for row in sentimentreader:
sentence = row[2]
sentiment = row[3]
if (cnt < 30):
test_set.append((sentence, sentiment))
elif (cnt > 30):
#cl.train(sentence, sentiment)
train_set.append((sentence, sentiment))
cnt += 1
sentence_set = [sentence for sentence, label in train_set]
label_set = [int(label) for sentence, label in train_set]
test_sentence = [sentence for sentence, label in test_set]
label_test = [int(label) for sentence, label in test_set]
text_clf = scikit_learn(sentence_set, label_set)
#Predict the test data
#predicted = text_clf.predict(test_sentence)
#print np.mean(predicted == np.asarray(label_test))
#for doc, category in zip(test_sentence, predicted):
# print('%r => %s' % (doc, category))
#cl = NaiveBayesClassifier.train(train_set)
#############################TEST DATA##################
# Read test data and predict phrase based on train set
with open('test.tsv', 'r') as f:
testreader = csv.reader(f, delimiter='\t')
submission = open('scikit_submission.csv', 'w')
csvwriter = csv.writer(submission, delimiter=',')
csvwriter.writerow(['PhraseId', 'Sentiment'])
header = testreader.next()
phraseid_list = []
phrase_list = []
for row in testreader:
phraseid = row[0]
phrase = row[2]
phraseid_list.append(phraseid)
phrase_list.append(phrase)
#rating = cl.classify(phrase, default='0')
#write_row = [str(phraseid), str(rating)]
#csvwriter.writerow(write_row)
predicted = text_clf.predict(np.asarray(phrase_list))
for i in range(len(predicted)):
sentiment_label = predicted[i]
phraseid = phraseid_list[i]
csvwriter.writerow([str(phraseid), str(sentiment_label)])
return
def scikit_learn(train_set, train_labels):
text_clf = Pipeline([('vect', CountVectorizer()),
('tfidf', TfidfTransformer()),
('clf', OneVsOneClassifier(LinearSVC())),
])
X_train = np.asarray(train_set)
text_clf = text_clf.fit(X_train, np.asarray(train_labels))
return text_clf
if __name__ == '__main__':
process_csv()
#scikit_learn(train_set)
|
[
"yogesh.kamble102@gmail.com"
] |
yogesh.kamble102@gmail.com
|
cc9d70872b0b7736cd51c99e19f150a06d3ed609
|
2a94e94c4e3f75aa1e75a6a4287def0d886dc2f3
|
/accounts/migrations/0002_order_product.py
|
a8d54fe9216f678e13075c89b0434259b511d539
|
[] |
no_license
|
zero-one-0/CRM
|
bf721d456eca9da4003b948483589cc382959dfc
|
c30f777c8d3413d98d53ec9d12fe4c2924e9dfd7
|
refs/heads/main
| 2023-05-09T04:54:41.039244
| 2021-06-03T14:50:42
| 2021-06-03T14:50:42
| 373,542,911
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,359
|
py
|
# Generated by Django 3.2.3 on 2021-06-02 04:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
('status', models.CharField(choices=[('Pending', 'Pending'), ('Out for delivery', 'Out for delivery'), ('Delivered', 'Delivered')], max_length=200, null=True)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200, null=True)),
('price', models.FloatField(null=True)),
('category', models.CharField(choices=[('Indoor', 'Indoor'), ('Out Door', 'Out Door')], max_length=200, null=True)),
('description', models.CharField(max_length=200, null=True)),
('date_created', models.DateTimeField(auto_now_add=True, null=True)),
],
),
]
|
[
"zeros_ones@pop-os.localdomain"
] |
zeros_ones@pop-os.localdomain
|
63a3859655163da8d1b68d6100318174b51087b3
|
fa69eadde7b449647ebd976214d2f99886b6db18
|
/FireHydrant/common/enum/task/type.py
|
da3d9f0df70ac9947f6d9e55b12919b3b7ed67be
|
[] |
no_license
|
shoogoome/FireHydrant
|
0da1d6e06aa9e853837f6435a30ac4ef73118764
|
7467cd66e1fc91f0b3a264f8fc9b93f22f09fe7b
|
refs/heads/master
| 2020-06-21T01:29:25.711595
| 2019-12-18T00:31:01
| 2019-12-18T00:31:01
| 197,309,304
| 4
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 265
|
py
|
# -*- coding: utf-8 -*-
# coding:utf-8
from common.core.dao.enumBase import EnumBase
class TaskTypeEnum(EnumBase):
PERSONAL = 0
TEAM = 1
__default__ = PERSONAL
__desc__ = {
'PERSONAL': '个人任务',
'TEAM': '团队任务',
}
|
[
"shoogoome@sina.com"
] |
shoogoome@sina.com
|
3ae079ee27ae66672e7f2c9fb20860d317f0af8e
|
3e6c00aa1be93f36e2fc674ccdf6773f076424a9
|
/blogapp/migrations/0002_alter_blog_id.py
|
be8890f29e3ddf6cca398ba49191e17df92e771c
|
[] |
no_license
|
eunji0221/0625
|
2da984a7f3a1a2baf3abe734d827f3671663752a
|
a3afb277d0d54725bfe08296f3fabf495b8a54a8
|
refs/heads/main
| 2023-06-10T05:39:11.160829
| 2021-07-02T08:29:18
| 2021-07-02T08:29:18
| 382,278,568
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 430
|
py
|
# Generated by Django 3.2.2 on 2021-06-25 10:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blogapp', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='blog',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
[
"{em309@naver.com}"
] |
{em309@naver.com}
|
bf5759cafd97195415fe90bc53d0e1a21911ab09
|
63a7376693125bfeb9f58961539bb516e3102206
|
/prob50.py
|
1027ab724c5ae172da5be61fde484505b8a5d8c1
|
[] |
no_license
|
guruprasadpr/Euler_Python
|
8108283ab5eb4ca12491ae62d613c34bb5696c3c
|
208604206f41fddf37f2cd4564eba439aaa23a5e
|
refs/heads/master
| 2021-01-01T17:33:37.406142
| 2013-03-27T07:51:48
| 2013-03-27T07:51:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 77
|
py
|
#!/usr/bin/python
from gprfuncs import *
lprm=getPrimeBel(100)
print lprm
|
[
"guruprasadpr@yahoo.com"
] |
guruprasadpr@yahoo.com
|
de3d50a87a32fe3546e5f17888e1fe94e450ebf7
|
3a23439178cb24d0b4985db17623227d09711535
|
/optdemo.py
|
1cc20a576a405094a9cc0c0a5e213c7ecff454bb
|
[
"MIT"
] |
permissive
|
raeidsaqur/ml-gradientdescent
|
4ff6f4825e305069d0de8fc30877db70abe181f9
|
a061423c8e9c131ad05970d7cbaf2f42f646db1c
|
refs/heads/master
| 2021-07-01T16:27:34.131577
| 2017-09-16T04:30:26
| 2017-09-16T04:30:26
| 103,724,491
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,713
|
py
|
import numpy as np
import pylab
from scipy.optimize import line_search
def steepest_descent(grad_fun,params,num_iters, *varargs):
## Learning Rates
#eta = 0.1
eta = 2
#eta = 3
## Momentum
alpha=0.7
momentum=True
d = np.ones(params.shape)
d = d / np.linalg.norm(d)
mom = np.zeros(params.shape)
for i in range(num_iters):
grad = grad_fun(params,*varargs)
params_old = params
if momentum:
# Add momentum to the update
mom = -eta*grad + alpha*mom
else:
# Just use the gradient
mom = -eta*grad
params = params + mom
pylab.plot([params_old[0],params[0]],[params_old[1],params[1]],'-k',lw=2)
raw_input("Press Enter to continue...")
def ls_fun(params,A):
return ls(params,A)[0]
def ls_grad(params,A):
return ls(params,A)[1]
def ls(params,A):
f = 0.5*np.dot(params,A).dot(params)
df = np.dot(A,params)
return f,df
def ls_contour(X,Y,A):
x = X.ravel()[:,None]
y = Y.ravel()[:,None]
data = np.hstack((x,y))
z = 0.5*(np.dot(data,A)*data).sum(1)
return z.reshape(X.shape)
if __name__ == '__main__':
np.random.seed(0)
A = np.random.randn(2,2)
A = np.dot(A.T,A)
A = np.dot(A.T,A)
A = A / np.linalg.norm(A)
x = np.linspace(-5,5,100)
X,Y = np.meshgrid(x,x)
Z = ls_contour(X,Y,A)
#Z = rosenbrock_contour(x)
pylab.ion()
pylab.contour(X,Y,Z,100)
pylab.show()
init_params = np.array([4,-4])
#init_params = np.array([-3,-4])
pylab.plot(init_params[0],init_params[1],'.r',ms=25)
raw_input("Press Enter to continue...")
steepest_descent(ls_grad,init_params,1000,A)
|
[
"raeidsaqur@gmail.com"
] |
raeidsaqur@gmail.com
|
f988ab25a0b885c399e49b122f64041b27982268
|
d477a7c859a49b2bcb7e23a8f24f6f1cb715643c
|
/lib/colorPuzzle.py
|
1def83aaaffe068a8df071e533db7913f7ff0d44
|
[
"MIT"
] |
permissive
|
scriptslay3r/MyRose
|
3e691f09bce695a75c5229d64db7893d943a7f83
|
b716969b6bb424be3125c6370b0c9f450cf76151
|
refs/heads/master
| 2022-10-01T23:40:49.038235
| 2020-06-09T12:53:47
| 2020-06-09T12:53:47
| 259,688,194
| 1
| 0
|
MIT
| 2020-06-09T12:53:48
| 2020-04-28T16:12:09
|
HTML
|
UTF-8
|
Python
| false
| false
| 4,972
|
py
|
import pygame, sys, random
from pygame.locals import *
pygame.init()
tickTock = pygame.time.Clock()
FPS = 30
WINLENGTH = 500
WINBREADTH = 500
BOXSIZE = 40
GAPSIZE = 7
BOARDLENGTH = 5
BOARDBREADTH = 5
XMARGIN = (WINLENGTH - (BOXSIZE + GAPSIZE) * BOARDLENGTH) / 2
YMARGIN = (WINBREADTH - (BOXSIZE + GAPSIZE) * BOARDBREADTH) / 2
assert XMARGIN > 0 and YMARGIN > 0, 'The margins can never be negetive'
#Colors---------------------( R, G, B)
RED = pygame.Color(255, 0, 0)
LIGHTRED = pygame.Color(255, 138, 138)
GREEN = pygame.Color( 0, 255, 0)
LIGHTGREEN = pygame.Color(138, 255, 138)
BLUE = pygame.Color( 0, 0, 255)
LIGHTBLUE = pygame.Color(138, 138, 255)
BKGD = pygame.Color(255, 255, 255) #The yellow Color
ALLCOLORS = (RED, GREEN, BLUE, LIGHTRED, LIGHTGREEN, LIGHTBLUE)
R = G = B = 0
def getFuckingBoard():
global R, G, B
COLORS = ALLCOLORS[:3]
result = []
for x in range(BOARDLENGTH):
col = []
for y in range(BOARDBREADTH):
rand = random.randint(0, 1000) % 3
col.append(COLORS[rand])
if rand == 0:
R += 1
elif rand == 1:
G += 1
else:
B += 1
result.append(col)
return result
def showFuckingBoard():
for x in range(BOARDLENGTH):
for y in range(BOARDBREADTH):
COORDINATE = getXYofBox(x, y)
pygame.draw.rect(DISPLAY, BOARD[x][y], (COORDINATE[0], COORDINATE[1], BOXSIZE, BOXSIZE))
def getXYofBox(x, y):
return (XMARGIN + x * (BOXSIZE + GAPSIZE), YMARGIN + y * (BOXSIZE + GAPSIZE))
def getBoxAtPixel(mousex, mousey):
for x in range(BOARDLENGTH):
for y in range(BOARDBREADTH):
fuckinRect = pygame.Rect(XMARGIN + x * (BOXSIZE + GAPSIZE), YMARGIN + y * (BOXSIZE + GAPSIZE), BOXSIZE, BOXSIZE)
if fuckinRect.collidepoint((mousex, mousey)):
return (x, y)
return (None, None)
def highLightBox(BOXX, BOXY):
if BOARD[BOXX][BOXY] == RED:
BOARD[BOXX][BOXY] = LIGHTRED
elif BOARD[BOXX][BOXY] == GREEN:
BOARD[BOXX][BOXY] = LIGHTGREEN
elif BOARD[BOXX][BOXY] == BLUE:
BOARD[BOXX][BOXY] = LIGHTBLUE
def changeFuckingColor(BOXX, BOXY):
if BOARD[BOXX][BOXY] == LIGHTRED:
BOARD[BOXX][BOXY] = LIGHTGREEN
elif BOARD[BOXX][BOXY] == LIGHTGREEN:
BOARD[BOXX][BOXY] = LIGHTBLUE
elif BOARD[BOXX][BOXY] == LIGHTBLUE:
BOARD[BOXX][BOXY] = LIGHTRED
### Mouse hover currently indicates which color to print
### Reuse this program to have three basic sqaures that will show the color of them and speak the color
def resetFuckingBoard():
for BOXX in range(BOARDLENGTH):
for BOXY in range(BOARDBREADTH):
if BOARD[BOXX][BOXY] == LIGHTRED:
BOARD[BOXX][BOXY] = RED
print("The Color was Red")
elif BOARD[BOXX][BOXY] == LIGHTGREEN:
BOARD[BOXX][BOXY] = GREEN
print("The Color was green")
elif BOARD[BOXX][BOXY] == LIGHTBLUE:
BOARD[BOXX][BOXY] = BLUE
print("The Color Is Blue")
def hasWon():
BASE = BOARD[0][0]
for x in range(BOARDLENGTH):
for y in range(BOARDBREADTH):
if BASE != BOARD[x][y]:
return False
return True
def wonAnimation():
pass
def predatorTry():
stepsR = 2 * G + B
stepsG = 2 * B + R
stepsB = 2 * R + G
steps = [stepsR, stepsG, stepsB]
steps.sort()
return steps[0]
def main():
global BOARD, DISPLAY
DISPLAY = pygame.display.set_mode((WINLENGTH, WINBREADTH))
pygame.display.set_caption("Color Puzzle")
DISPLAY.fill(BKGD)
BOARD = getFuckingBoard()
predator_try = predatorTry()
showFuckingBoard()
pygame.display.update()
PREVIOUS = (None, None)
mousex, mousey = 0, 0
while True:
CLICKED = False
DISPLAY.fill(BKGD)
for event in pygame.event.get():
if event.type == QUIT or (event.type == KEYUP and event.key == K_ESCAPE):
pygame.quit()
sys.exit()
elif event.type == MOUSEMOTION:
mousex, mousey = event.pos
elif event.type == MOUSEBUTTONUP:
mousex, mousey = event.pos
CLICKED = True
BOXX, BOXY = getBoxAtPixel(mousex, mousey)
if BOXX != None and BOXY != None:
PREVIOUS = (BOXX, BOXY)
highLightBox(BOXX, BOXY)
if CLICKED:
changeFuckingColor(BOXX, BOXY)
highLightBox(BOXX, BOXY)
else:
resetFuckingBoard()
showFuckingBoard()
pygame.display.update()
if hasWon():
wonAnimation()
print ('You Won!!!, The Predator would have done it in just %d tries' %(predator_try))
BOARD = getFuckingBoard()
if __name__ == '__main__':
main()
|
[
"noreply@github.com"
] |
scriptslay3r.noreply@github.com
|
43daa47323e7acbe7a228bde6ae71b319b1d9a2d
|
549069741a2ab84f9dea7b2a95b5e1024ab9c1e5
|
/RecklessPilot.py
|
2d3154f878c4b65adc2b6ae3a0b1cdba8317bf78
|
[] |
no_license
|
fitzscott/BattleCruisers
|
869a7938f7130c85b9fc485054e64c988373b7b5
|
26c2dd3deca3a9064459dd3cab91f3a8d12602a3
|
refs/heads/master
| 2020-05-22T15:19:39.757344
| 2017-04-20T13:14:52
| 2017-04-20T13:14:52
| 84,698,576
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,816
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 11 22:05:57 2017
@author: bushnelf
"""
import Card
class RecklessPilot(Card.Card):
"""
Reckless Pilot - discard a card, get 4 VP
"""
def __init__(self):
Card.Card.__init__(self, "Reckless Pilot", 11)
self.add_symbol(self.Symbols[2])
def main_effect(self, game, pbidx):
"""
Discard 1 card + get 4 VP
"""
myboard = game.playerboards[pbidx]
# cards that prevent main effects from causing discards
# also cause this card's main effect not to discard itself.
if not myboard.defense(game, pbidx, ["card_discard"]):
card = myboard.player.choosecardtodiscard(game, pbidx,
["hand", "recovery"])
myboard.discard(card, ["hand", "recovery"])
myboard.victorypoints += 4
def clash_effect(self, game, pbidx):
myboard = game.playerboards[pbidx]
card = myboard.player.choosecardtodiscard(game, pbidx,
["hand", "inplay"])
myboard.discard(card, ["hand", "inplay"])
def end_of_turn_effect(self, game, pbidx):
pass
if __name__ == '__main__':
rp = RecklessPilot()
print("Created " + rp.title + " card.")
if "People" in rp.symbols:
print("Contains People symbol.")
import Game
import RandomComputerPlayer
g = Game.Game(3)
zp = RandomComputerPlayer.RandomComputerPlayer("Zero to hero")
g.playerboards[0].player = zp
g.playerboards[1].player = zp
g.playerboards[2].player = zp
c1 = Card.Card("No such card", 88)
c2 = Card.Card("Still no such card", 89)
c3 = Card.Card("Again no such card", 90)
g.addtocardlist(rp)
g.addtocardlist(c1)
g.addtocardlist(c2)
g.addtocardlist(c3)
g.sendcardlisttoboards()
g.playerboards[2].readytoplay(rp)
# g.playallcards()
g.playcards()
print("After 1 reckless pilot:")
print(g.playerboards[2])
g.playerboards[0].readytoplay(rp)
g.playerboards[1].readytoplay(rp)
# manually move the 2nd player's hand to RZ
tomv = []
for card in g.playerboards[1].hand:
# print("Checking card " + card.title)
if card.title != "Reckless Pilot": # shouldn't be
tomv.append(card)
for card in tomv:
# print("Moving " + card.title + " from player 1 to RZ")
g.playerboards[1].recoveryzone.append(card)
g.playerboards[1].hand.remove(card)
print("Before 3 reckless pilots:")
print(g.playerboards[0])
print(g.playerboards[1])
print(g.playerboards[2])
# g.playallcards()
g.playcards()
print("After 3 reckless pilots:")
print(g.playerboards[0])
print(g.playerboards[1])
print(g.playerboards[2])
|
[
"bushnelf@gmail.com"
] |
bushnelf@gmail.com
|
aea3a9a0fed5a05744f4626fdc556ebdf7532d35
|
7ad2cca42292067c2707f6f49722fe9b568c0539
|
/no-hashing-methods/CMDN_IJCAI2016-master/deepnet-master/deepnet/fx_util.py
|
d12e60f52900eac2a7b8d44e42165d9593d5c2a2
|
[
"BSD-3-Clause"
] |
permissive
|
qiqi545/gcn-cross-modal
|
4de16de800620ac2ed96fc5d8ca7409d20255b25
|
29e9b0d0e9abebc0d359c6a727d180d57f45832b
|
refs/heads/master
| 2021-05-20T16:57:30.113968
| 2020-03-21T14:28:03
| 2020-03-21T14:28:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,149
|
py
|
import time
import sys
import os
import numpy
import gzip
import zipfile
import cPickle
import random
import PIL.Image
try:
import magic
ms = magic.open(magic.MAGIC_NONE)
ms.load()
except ImportError: # no magic module
ms = None
class fx_UnpickleError(Exception):
pass
def fx_pickle(filename, data, compress=False):
if compress:
fo = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED, allowZip64=True)
fo.writestr('data', cPickle.dumps(data, -1))
else:
fo = open(filename, "wb")
cPickle.dump(data, fo, protocol=cPickle.HIGHEST_PROTOCOL)
fo.close()
def fx_unpickle(filename):
if not os.path.exists(filename):
raise fx_UnpickleError("Path '%s' does not exist." % filename)
if ms is not None and ms.file(filename).startswith('gzip'):
fo = gzip.open(filename, 'rb')
dict = cPickle.load(fo)
elif ms is not None and ms.file(filename).startswith('Zip'):
fo = zipfile.ZipFile(filename, 'r', zipfile.ZIP_DEFLATED)
dict = cPickle.loads(fo.read('data'))
else:
fo = open(filename, 'rb')
dict = cPickle.load(fo)
fo.close()
return dict
def fx_squre_distant(p, q, pSOS=None, qSOS=None):
if pSOS is None:
pSOS = (p**2).sum(axis=1)
qSOS = (q**2).sum(axis=1)
return pSOS.reshape(-1,1) + qSOS - 2 * numpy.dot(p, q.T)
def fx_cos_distant(p, q):
dist = numpy.dot(p, q.T) / numpy.sqrt((p ** 2).sum(axis=1)).reshape((p.shape[0],1))
return dist / numpy.sqrt((q ** 2).sum(axis=1))
def fx_distant(p, q, type='L1'):
if type == 'L1':
prows, pcols = p.shape
qrows, qcols = q.shape
res = numpy.zeros((prows, qrows))
for i in range(qrows):
res[:,i] = (numpy.abs(p - q[i,:])).sum(axis=1)
return res
def fx_scale_to_unit_interval(ndar, eps=1e-8):
''' Scales all values in the ndarray ndar to be between 0 and 1 '''
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def fx_tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
'''
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
:type X: a 2-D ndarray or a tuple of 4 channels, elements of which can
be 2-D ndarrays or None;
:param X: a 2-D array in which every row is a flattened image.
:type img_shape: tuple; (height, width)
:param img_shape: the original shape of each image
:type tile_shape: tuple; (rows, cols)
:param tile_shape: the number of images to tile (rows, cols)
:param output_pixel_vals: if output should be pixel values (i.e. int8
values) or floats
:param scale_rows_to_unit_interval: if the values need to be scaled before
being plotted to [0,1] or not
:returns: array suitable for viewing as an image.
(See:`PIL.Image.fromarray`.)
:rtype: a 2-d array with same dtype as X.
'''
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output numpy ndarray to store the image
if output_pixel_vals:
out_array = numpy.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = numpy.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
#colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = numpy.zeros(out_shape,
dtype=dt) + channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = numpy.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = fx_scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
|
[
"hbzhang@vt.edu"
] |
hbzhang@vt.edu
|
7db647d25a21499083092c001e5dbe7f68539f5a
|
f07a42f652f46106dee4749277d41c302e2b7406
|
/Data Set/bug-fixing-5/506ae8f067379afa4417a57db5814487ea198a23-<_ldflags>-fix.py
|
e74dc5c62be559f47e3819254ac49089008a296f
|
[] |
no_license
|
wsgan001/PyFPattern
|
e0fe06341cc5d51b3ad0fe29b84098d140ed54d1
|
cc347e32745f99c0cd95e79a18ddacc4574d7faa
|
refs/heads/main
| 2023-08-25T23:48:26.112133
| 2021-10-23T14:11:22
| 2021-10-23T14:11:22
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,311
|
py
|
@utils.memoize
def _ldflags(ldflags_str, libs, flags, libs_dir, include_dir):
'Extract list of compilation flags from a string.\n\n Depending on the options, different type of flags will be kept.\n\n Parameters\n ----------\n ldflags_str : string\n The string to process. Typically, this will be the content of\n `theano.config.blas.ldflags`.\n libs : bool\n Extract flags starting with "-l".\n flags: bool\n Extract all the other flags.\n libs_dir: bool\n Extract flags starting with "-L".\n include_dir: bool\n Extract flags starting with "-I".\n\n Returns\n -------\n list of strings\n Extracted flags.\n\n '
rval = []
if libs_dir:
found_dyn = False
dirs = [x[2:] for x in ldflags_str.split() if x.startswith('-L')]
l = _ldflags(ldflags_str=ldflags_str, libs=True, flags=False, libs_dir=False, include_dir=False)
for d in dirs:
for f in os.listdir(d.strip('"')):
if (f.endswith('.so') or f.endswith('.dylib') or f.endswith('.dll')):
if any([(f.find(ll) >= 0) for ll in l]):
found_dyn = True
if ((not found_dyn) and dirs):
_logger.warning('We did not found a dynamic library into the library_dir of the library we use for blas. If you use ATLAS, make sure to compile it with dynamics library.')
for t in ldflags_str.split():
if ((t.startswith("'") and t.endswith("'")) or (t.startswith('"') and t.endswith('"'))):
t = t[1:(- 1)]
try:
(t0, t1, t2) = t[0:3]
assert (t0 == '-')
except Exception:
raise ValueError(('invalid token "%s" in ldflags_str: "%s"' % (t, ldflags_str)))
if (libs_dir and (t1 == 'L')):
rval.append(t[2:])
elif (include_dir and (t1 == 'I')):
raise ValueError('Include dirs are not used for blas. We disable this as this can hide other headers and this is not wanted.', t)
rval.append(t[2:])
elif (libs and (t1 == 'l')):
rval.append(t[2:])
elif (flags and (t1 not in ['L', 'I', 'l'])):
rval.append(t)
elif (flags and (t1 == 'L')):
rval.append(('-Wl,-rpath,' + t[2:]))
return rval
|
[
"dg1732004@smail.nju.edu.cn"
] |
dg1732004@smail.nju.edu.cn
|
48139347ca38ff413663fe2c88ee25ff2ed40945
|
c1c4dfdb8df223e19b811b18cce709ff47afe336
|
/node_modules/gulp-sass/node_modules/node-sass/build/config.gypi
|
438b9289b86a205279ccc357b01c9c83d5168d18
|
[
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
hgagliardi/artapp
|
a4bb7664ecfb091c20f95921453ae86e8c4548cb
|
d27ac93000c9dcf065ce585dc9c611551f687a27
|
refs/heads/master
| 2021-05-06T13:27:12.188706
| 2017-12-13T12:24:43
| 2017-12-13T12:24:43
| 113,225,847
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 4,323
|
gypi
|
# Do not edit. File was generated by node-gyp's "configure" step
{
"target_defaults": {
"cflags": [],
"default_configuration": "Release",
"defines": [],
"include_dirs": [],
"libraries": []
},
"variables": {
"asan": 0,
"coverage": "false",
"debug_devtools": "node",
"debug_http2": "false",
"debug_nghttp2": "false",
"force_dynamic_crt": 0,
"host_arch": "x64",
"icu_gyp_path": "tools/icu/icu-system.gyp",
"icu_small": "false",
"llvm_version": 0,
"node_byteorder": "little",
"node_enable_d8": "false",
"node_enable_v8_vtunejit": "false",
"node_install_npm": "false",
"node_module_version": 57,
"node_no_browser_globals": "false",
"node_prefix": "/usr/local/Cellar/node/8.7.0",
"node_release_urlbase": "",
"node_shared": "false",
"node_shared_cares": "false",
"node_shared_http_parser": "false",
"node_shared_libuv": "false",
"node_shared_openssl": "false",
"node_shared_zlib": "false",
"node_tag": "",
"node_use_bundled_v8": "true",
"node_use_dtrace": "true",
"node_use_etw": "false",
"node_use_lttng": "false",
"node_use_openssl": "true",
"node_use_perfctr": "false",
"node_use_v8_platform": "true",
"node_without_node_options": "false",
"openssl_fips": "",
"openssl_no_asm": 0,
"shlib_suffix": "57.dylib",
"target_arch": "x64",
"uv_parent_path": "/deps/uv/",
"uv_use_dtrace": "true",
"v8_enable_gdbjit": 0,
"v8_enable_i18n_support": 1,
"v8_enable_inspector": 1,
"v8_no_strict_aliasing": 1,
"v8_optimized_debug": 0,
"v8_promise_internal_field_count": 1,
"v8_random_seed": 0,
"v8_trace_maps": 0,
"v8_use_snapshot": "true",
"want_separate_host_toolset": 0,
"xcode_version": "9.0",
"nodedir": "/Users/DISENO-03/.node-gyp/8.7.0",
"standalone_static_library": 1,
"libsass_ext": "",
"libsass_cflags": "",
"libsass_ldflags": "",
"libsass_library": "",
"save_dev": "",
"viewer": "man",
"browser": "",
"rollback": "true",
"usage": "",
"globalignorefile": "/Applications/MAMP/htdocs/mbajour/mbajour/etc/npmignore",
"shell": "/bin/bash",
"maxsockets": "50",
"init_author_url": "",
"shrinkwrap": "true",
"parseable": "",
"init_license": "ISC",
"if_present": "",
"sign_git_tag": "",
"init_author_email": "",
"cache_max": "Infinity",
"long": "",
"local_address": "",
"git_tag_version": "true",
"cert": "",
"registry": "https://registry.npmjs.org/",
"npat": "",
"fetch_retries": "2",
"versions": "",
"message": "%s",
"key": "",
"globalconfig": "/Applications/MAMP/htdocs/mbajour/mbajour/etc/npmrc",
"loaded": "",
"always_auth": "",
"spin": "true",
"cache_lock_retries": "10",
"heading": "npm",
"proprietary_attribs": "true",
"fetch_retry_mintimeout": "10000",
"json": "",
"access": "",
"https_proxy": "",
"engine_strict": "",
"description": "true",
"userconfig": "/Users/DISENO-03/.npmrc",
"init_module": "/Users/DISENO-03/.npm-init.js",
"user": "501",
"node_version": "8.7.0",
"save": "",
"editor": "vi",
"tag": "latest",
"global": "",
"optional": "true",
"force": "",
"bin_links": "true",
"searchopts": "",
"depth": "Infinity",
"searchsort": "name",
"rebuild_bundle": "true",
"unicode": "true",
"fetch_retry_maxtimeout": "60000",
"tag_version_prefix": "v",
"strict_ssl": "true",
"save_prefix": "^",
"ca": "",
"save_exact": "",
"group": "20",
"fetch_retry_factor": "10",
"dev": "",
"version": "",
"cache_lock_stale": "60000",
"cache_min": "10",
"searchexclude": "",
"cache": "/Users/DISENO-03/.npm",
"color": "true",
"save_optional": "",
"ignore_scripts": "",
"user_agent": "npm/2.15.12 node/v8.7.0 darwin x64",
"cache_lock_wait": "10000",
"production": "",
"save_bundle": "",
"umask": "0022",
"init_version": "1.0.0",
"scope": "",
"init_author_name": "",
"git": "git",
"unsafe_perm": "true",
"tmp": "/var/folders/v3/tw6mc4qx3qx9_vmbjwpg084w0000gn/T",
"onload_script": "",
"link": "",
"prefix": "/Applications/MAMP/htdocs/mbajour/mbajour"
}
}
|
[
"DISENO-03@macMiniHernan.local"
] |
DISENO-03@macMiniHernan.local
|
1ae172ee905717997bd46629d8738ca156c1ed5c
|
afc8d5a9b1c2dd476ea59a7211b455732806fdfd
|
/Configurations/EFT/ggH/tools/2017_MakeAnaTemplates.py
|
6a081846b8ae6c05a502216a36fb9d23aaf6807a
|
[] |
no_license
|
latinos/PlotsConfigurations
|
6d88a5ad828dde4a7f45c68765081ed182fcda21
|
02417839021e2112e740607b0fb78e09b58c930f
|
refs/heads/master
| 2023-08-18T20:39:31.954943
| 2023-08-18T09:23:34
| 2023-08-18T09:23:34
| 39,819,875
| 10
| 63
| null | 2023-08-10T14:08:04
| 2015-07-28T07:36:50
|
Python
|
UTF-8
|
Python
| false
| false
| 31,671
|
py
|
import sys
import ROOT
import numpy as np
import shutil
import math
import os
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetOptTitle(0)
############# Couplings of mixed samples (g)
cons = {"VBF_H0M" : 0.29797901870, "VBF_H0PH" : 0.27196538, "VBF_H0L1" : -2158.21307286,
"WH_H0M" : 0.1236136, "WH_H0PH" : 0.0998956, "WH_H0L1" : -525.274,
"ZH_H0M" : 0.144057, "ZH_H0PH" : 0.112481, "ZH_H0L1" : -517.788,
"H0M" : 1.76132, "H0PH" : 1.133582, "H0L1" : -13752.22 }
############### Matrix of couplings for H(g1, gi) hypotheses - Ewk H (2 Vertices)
Gai = np.array(([1**4,0,0,0,0],
[0,0,0,0,1**4],
[1,.25,.25**2,.25**3,.25**4],
[1,.5, .5**2, .5**3, .5**4],
[1,.75,.75**2,.75**3,.75**4] ))
l1s = -10000
Gl1 = np.array(([1**4,0,0,0,0],
[0,0,0,0,(1*l1s)**4],
[1,.25*l1s,(.25*l1s)**2,(.25*l1s)**3,(.25*l1s)**4],
[1,.5*l1s, ( .5*l1s)**2,( .5*l1s)**3,( .5*l1s)**4],
[1,.75*l1s,(.75*l1s)**2,(.75*l1s)**3,(.75*l1s)**4] ))
###### test params #####
Fai = 0.5
Mu = 2.0
Fa1 = 1-abs(Fai)
MuSc = 1.0
MuSc2D = [0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.]
Scan = [-1, -0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
forQuickPlotter = False
if forQuickPlotter:
print("WARNING!!! TEMPLATES NOT PRE-SCALED. USE FOR PLOTTING ONLY")
###################################################################
#2017
src = "rootFile_ggH2017_allSignals_fullNuis_withCR/plots_ggH2017_allSignals_fullNuis_withCR.root"
dst = "rootFile_ggH2017_allSignals_fullNuis_withCR/plots_ggH2017_allSignals_fullNuis_Ana_withCR.root"
if os.path.exists(dst):
os.remove(dst)
print "- For Ewk H (2V) need templates : T1 -(4,0), T2 -(3,1), T3 -(2,2), T4 -(1,3), T5 -(0,4)"
print "Get from SM-BSM mixture hypotheses : SM(1,0), M0(0,1), M1(1,.25), M2(1,.5), M3(1,.75) "
print "and G matrices ", Gai, Gl1
print "- Will create new file : "+dst+" with analysis templates"
print " "
print "- For ggH (1V) need templates T1 -(2,0), T2 -(1,1), T3 -(0,2)"
print "Get from SM-BSM MC : SM(1,0), BSM(0,1), M1(1,gMix) "
print "- Will create new file : "+dst+" with analysis templates"
print " "
###################################################
def AddOtherTemplates(Cat, Var, AC):
print " "
print " ", Cat, Var
print " "
f = ROOT.TFile.Open(''+src+'', 'read')
HistList = ROOT.TObjArray()
f.cd("hww2l2v_13TeV_"+Cat+"/"+Var+"/")
d = ROOT.gDirectory
for ih in d.GetListOfKeys():
h = ih.ReadObj()
if "H0" not in h.GetName() :
h.SetDirectory(0)
HistList.Add(h)
f.Close()
fout = ROOT.TFile.Open(''+dst+'', 'update')
fout.cd("hww2l2v_13TeV_"+Cat+"/var"+AC+"/")
HistList.Write()
d = ROOT.gDirectory
for ih in d.GetListOfKeys():
h = ih.ReadObj()
if "Up" not in h.GetName() and "Down" not in h.GetName() :
print h.GetName()
fout.Close()
############################################
def getSumOfRWSamples(f, BaseN, Hyp, Sys):
if Hyp == "H0PM" : H1 = f.Get(''+BaseN+'H0PM'+Sys+'')
else : H1 = f.Get(''+BaseN+'H0PM_'+Hyp+Sys+'')
if Hyp == "H0M" : H2 = f.Get(''+BaseN+'H0M'+Sys+'')
else : H2 = f.Get(''+BaseN+'H0M_'+Hyp+Sys+'')
if Hyp == "H0PH" : H3 = f.Get(''+BaseN+'H0PH'+Sys+'')
else : H3 = f.Get(''+BaseN+'H0PH_'+Hyp+Sys+'')
if Hyp == "H0L1" : H4 = f.Get(''+BaseN+'H0L1'+Sys+'')
else : H4 = f.Get(''+BaseN+'H0L1_'+Hyp+Sys+'')
if Hyp == "H0Mf05" : H5 = f.Get(''+BaseN+'H0Mf05'+Sys+'')
else : H5 = f.Get(''+BaseN+'H0Mf05_'+Hyp+Sys+'')
if Hyp == "H0PHf05" : H6 = f.Get(''+BaseN+'H0PHf05'+Sys+'')
else : H6 = f.Get(''+BaseN+'H0PHf05_'+Hyp+Sys+'')
if Hyp == "H0L1f05" : H7 = f.Get(''+BaseN+'H0L1f05'+Sys+'')
else : H7 = f.Get(''+BaseN+'H0L1f05_'+Hyp+Sys+'')
H1.SetDirectory(0)
H2.SetDirectory(0)
H3.SetDirectory(0)
H4.SetDirectory(0)
H5.SetDirectory(0)
H6.SetDirectory(0)
H7.SetDirectory(0)
Sum = H1.Clone()
Sum.SetDirectory(0)
H1.SetBit(ROOT.TH1.kIsAverage)
H2.SetBit(ROOT.TH1.kIsAverage)
H3.SetBit(ROOT.TH1.kIsAverage)
H4.SetBit(ROOT.TH1.kIsAverage)
H5.SetBit(ROOT.TH1.kIsAverage)
H6.SetBit(ROOT.TH1.kIsAverage)
H7.SetBit(ROOT.TH1.kIsAverage)
H1.Add(H2,1)
H1.Add(H3,1)
H1.Add(H4,1)
H1.Add(H5,1)
H1.Add(H6,1)
H1.Add(H7,1)
for i in range(1, H1.GetXaxis().GetNbins()+1):
n = H1.GetBinContent(i)
e = H1.GetBinError(i)
Sum.SetBinContent(i, n)
Sum.SetBinError(i, e)
return Sum
#########################################################
def create2VIntTemplates(Cat, Var, Prod, AC, Sys, Test):
print " "
print " ", Cat, Var, Prod, AC, Sys, Test
print " "
f = ROOT.TFile.Open(''+src+'', 'read')
BaseN = "hww2l2v_13TeV_"+Cat+"/"+Var+"/histo_"+Prod+""
SM = getSumOfRWSamples(f, BaseN, "H0PM", Sys)
M0 = getSumOfRWSamples(f, BaseN, AC+"_M0",Sys)
M1 = getSumOfRWSamples(f, BaseN, AC+"_M1",Sys)
M2 = getSumOfRWSamples(f, BaseN, AC+"_M2",Sys)
M3 = getSumOfRWSamples(f, BaseN, AC+"_M3",Sys)
SM_Org = f.Get(''+BaseN+'H0PM'+Sys+'')
BSM_Org = f.Get(''+BaseN+AC+Sys+'')
f05_Org = f.Get(''+BaseN+AC+'f05'+Sys+'')
SM_Org.SetDirectory(0)
BSM_Org.SetDirectory(0)
f05_Org.SetDirectory(0)
f.Close()
T1 = SM.Clone() # 4,0
T2 = SM.Clone() # 3,1
T3 = SM.Clone() # 2,2
T4 = SM.Clone() # 1,3
T5 = SM.Clone() # 0,4
T1.SetDirectory(0)
T2.SetDirectory(0)
T3.SetDirectory(0)
T4.SetDirectory(0)
T5.SetDirectory(0)
InvG = np.linalg.inv(Gai)
if AC == "H0L1" : InvG = np.linalg.inv(Gl1)
T1.Scale( InvG[0][0])
T1.Add(M0,InvG[0][1])
T1.Add(M1,InvG[0][2])
T1.Add(M2,InvG[0][3])
T1.Add(M3,InvG[0][4])
T2.Scale( InvG[1][0])
T2.Add(M0,InvG[1][1])
T2.Add(M1,InvG[1][2])
T2.Add(M2,InvG[1][3])
T2.Add(M3,InvG[1][4])
T3.Scale( InvG[2][0])
T3.Add(M0,InvG[2][1])
T3.Add(M1,InvG[2][2])
T3.Add(M2,InvG[2][3])
T3.Add(M3,InvG[2][4])
T4.Scale( InvG[3][0])
T4.Add(M0,InvG[3][1])
T4.Add(M1,InvG[3][2])
T4.Add(M2,InvG[3][3])
T4.Add(M3,InvG[3][4])
T5.Scale( InvG[4][0])
T5.Add(M0,InvG[4][1])
T5.Add(M1,InvG[4][2])
T5.Add(M2,InvG[4][3])
T5.Add(M3,InvG[4][4])
####################################
if Test == True :
G = cons[Prod+AC]
N1 = Mu**2*Fa1**2
N2 = Mu**2*np.sign(Fai)*math.sqrt(abs(Fai))*(math.sqrt(Fa1)**3)*G
N3 = Mu**2*abs(Fai)*Fa1*G**2
N4 = Mu**2*np.sign(Fai)*(math.sqrt(abs(Fai))**3)*math.sqrt(Fa1)*G**3
N5 = Mu**2*Fai**2*G**4
f05T = T1.Clone()
f05T.SetDirectory(0)
f05T.Scale( N1)
f05T.Add(T2, N2)
f05T.Add(T3, N3)
f05T.Add(T4, N4)
f05T.Add(T5, N5)
SM_Org.SetLineColor(ROOT.kBlack)
BSM_Org.SetLineColor(ROOT.kBlack)
f05_Org.SetLineColor(ROOT.kBlack)
SM_Org.SetLineWidth(2)
BSM_Org.SetLineWidth(2)
f05_Org.SetLineWidth(2)
f05T.SetLineColor(ROOT.kRed)
f05T.SetFillColor(ROOT.kRed)
T1.SetLineColor(ROOT.kRed)
T1.SetFillColor(ROOT.kRed)
T2.SetLineColor(ROOT.kOrange)
T2.SetFillColor(ROOT.kOrange)
T3.SetLineColor(ROOT.kCyan)
T3.SetFillColor(ROOT.kCyan)
T4.SetLineColor(ROOT.kBlue)
T4.SetFillColor(ROOT.kBlue)
T5.SetLineColor(ROOT.kGreen)
T5.SetFillColor(ROOT.kGreen)
T1.SetLineWidth(2)
T2.SetLineWidth(2)
T3.SetLineWidth(2)
T4.SetLineWidth(2)
T5.SetLineWidth(2)
canvasf05 = ROOT.TCanvas('canvasf05', '', 500, 500)
f05T.SetMinimum(0.001)
f05T.SetMaximum(1.5*f05T.GetMaximum())
f05T.GetXaxis().SetTitle(""+Var+"")
f05T.Draw("e")
f05_Org.Draw("same e")
legend = ROOT.TLegend(0.3,0.75,0.7,0.9)
legend.AddEntry(f05_Org,"SM-BSM Mix MC ","l")
legend.AddEntry(f05T,"T1-T5 combination","f")
legend.Draw()
canvasf05.SaveAs("plot_ggF/f05_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".pdf")
canvasf05.SaveAs("plot_ggF/f05_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".png")
canvasT1 = ROOT.TCanvas('canvasT1', '', 500, 500)
T1.SetMinimum(0.001)
T1.SetMaximum(1.5*T1.GetMaximum())
T1.GetXaxis().SetTitle(""+Var+"")
T1.Draw("e")
SM_Org.Draw("same e")
legend = ROOT.TLegend(0.3,0.75,0.7,0.9)
legend.AddEntry(SM_Org,"pure SM MC","l")
legend.AddEntry(T1,"T1 template","f")
legend.Draw()
canvasT1.SaveAs("plot_ggF/T1_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".pdf")
canvasT1.SaveAs("plot_ggF/T1_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".png")
canvasT2 = ROOT.TCanvas('canvasT2', '', 500, 500)
T2.GetXaxis().SetTitle(""+Var+"")
T2.Draw("e")
legend = ROOT.TLegend(0.3,0.8,0.7,0.9)
legend.AddEntry(T2,"T2 template","f")
legend.SetTextSize(.04)
legend.Draw()
canvasT2.SaveAs("plot_ggF/T2_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".pdf")
canvasT2.SaveAs("plot_ggF/T2_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".png")
canvasT3 = ROOT.TCanvas('canvasT3', '', 500, 500)
T3.GetXaxis().SetTitle(""+Var+"")
T3.Draw("e")
legend = ROOT.TLegend(0.3,0.8,0.7,0.9)
legend.AddEntry(T3,"T3 template","f")
legend.SetTextSize(.04)
legend.Draw()
canvasT3.SaveAs("plot_ggF/T3_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".pdf")
canvasT3.SaveAs("plot_ggF/T3_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".png")
canvasT4 = ROOT.TCanvas('canvasT4', '', 500, 500)
T4.GetXaxis().SetTitle(""+Var+"")
T4.Draw("e")
legend = ROOT.TLegend(0.3,0.8,0.7,0.9)
legend.AddEntry(T4,"T4 template","f")
legend.SetTextSize(.04)
legend.Draw()
canvasT4.SaveAs("plot_ggF/T4_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".pdf")
canvasT4.SaveAs("plot_ggF/T4_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".png")
canvasT5 = ROOT.TCanvas('canvasT5', '', 500, 500)
T5.SetMinimum(0.001)
T5.SetMaximum(1.5*T5.GetMaximum())
T5.GetXaxis().SetTitle(""+Var+"")
T5.Draw("e")
BSM_Org.Draw("same e")
legend = ROOT.TLegend(0.3,0.75,0.7,0.9)
legend.AddEntry(BSM_Org,"pure BSM MC","l")
legend.AddEntry(T5,"T5 template","f")
legend.Draw()
canvasT5.SaveAs("plot_ggF/T5_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".pdf")
canvasT5.SaveAs("plot_ggF/T5_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".png")
######### Template tricks for combine #######
Gsc = cons[AC] # Fai in terms of WW decay vertex
if not forQuickPlotter:
T2.Scale(Gsc)
T3.Scale(Gsc**2)
T4.Scale(Gsc**3)
T5.Scale(Gsc**4)
if AC == "H0M" :
print "--------- Force H0M T2 and T4 = 0"
T2.Scale(0)
T4.Scale(0)
if AC == "H0PH" and (Prod == "WH_" or Prod == "ZH_") :
print "--------- Force VH H0PH T2 positive - Compensate in model! "
T2.Scale(-1)
if AC == "H0PH" and (Prod == "VBF_") :
if "0j" in Cat:
print "--------- Force VBF H0PH T4 = 0 "
T4.Scale(0) #KELLO extra to prevent negative yield
if AC == "H0L1" and (Prod == "WH_" or Prod == "ZH_") :
print "--------- Force VH H0L1 T2 and T4 positive - Compensate in model! "
T2.Scale(-1)
T4.Scale(-1)
if AC == "H0L1" and (Prod == "VBF_") :
print "--------- Force VBF H0L1 T4 positive - Compensate in model! "
T4.Scale(-1)
if "0j" in Cat:
print "--------- Force VBF H0L1 T2 = 0 "
print "--------- Force VBF H0L1 T3 = 0 "
T2.Scale(0) #KELLO extra to prevent negative yield
T3.Scale(0) #KELLO extra to prevent negative yield
if Test == True :
gr = ROOT.TGraph(len(Scan))
gr2D = ROOT.TGraph2D(len(Scan)*len(MuSc2D))
gr.SetLineColor(ROOT.kRed)
gr.SetLineWidth(2)
for i in range(len(Scan)):
FaiSc = Scan[i]
Fa1Sc = 1-abs(FaiSc)
N1Sc = MuSc**2*Fa1Sc**2
N2Sc = MuSc**2*np.sign(FaiSc)*math.sqrt(abs(FaiSc))*(math.sqrt(Fa1Sc)**3)
N3Sc = MuSc**2*abs(FaiSc)*Fa1Sc
N4Sc = MuSc**2*np.sign(FaiSc)*(math.sqrt(abs(FaiSc))**3)*math.sqrt(Fa1Sc)
N5Sc = MuSc**2*FaiSc**2
if AC == "H0PH" and (Prod == "WH_" or Prod == "ZH_") :
N2Sc = N2Sc*-1
if AC == "H0L1" and (Prod == "WH_" or Prod == "ZH_") :
N4Sc = N4Sc*-1
N2Sc = N2Sc*-1
if AC == "H0L1" and (Prod == "VBF_") :
N4Sc = N4Sc*-1
f05TSc = T1.Clone()
f05TSc.SetDirectory(0)
f05TSc.Scale( N1Sc)
f05TSc.Add(T2, N2Sc)
f05TSc.Add(T3, N3Sc)
f05TSc.Add(T4, N4Sc)
f05TSc.Add(T5, N5Sc)
gr.SetPoint(i, Scan[i], f05TSc.Integral())
for j in range(len(MuSc2D)):
N1Sc2D = MuSc2D[j]**2*Fa1Sc**2
N2Sc2D = MuSc2D[j]**2*np.sign(FaiSc)*math.sqrt(abs(FaiSc))*(math.sqrt(Fa1Sc)**3)
N3Sc2D = MuSc2D[j]**2*abs(FaiSc)*Fa1Sc
N4Sc2D = MuSc2D[j]**2*np.sign(FaiSc)*(math.sqrt(abs(FaiSc))**3)*math.sqrt(Fa1Sc)
N5Sc2D = MuSc2D[j]**2*FaiSc**2
if AC == "H0PH" and (Prod == "WH_" or Prod == "ZH_") :
N2Sc2D = N2Sc2D*-1
if AC == "H0L1" and (Prod == "WH_" or Prod == "ZH_") :
N4Sc2D = N4Sc2D*-1
N2Sc2D = N2Sc2D*-1
if AC == "H0L1" and (Prod == "VBF_") :
N4Sc2D = N4Sc2D*-1
f05TSc2D = T1.Clone()
f05TSc2D.SetDirectory(0)
f05TSc2D.Scale( N1Sc2D)
f05TSc2D.Add(T2, N2Sc2D)
f05TSc2D.Add(T3, N3Sc2D)
f05TSc2D.Add(T4, N4Sc2D)
f05TSc2D.Add(T5, N5Sc2D)
gr2D.SetPoint(i+j*len(Scan),Scan[i],MuSc2D[j],f05TSc2D.Integral())
canvasFinal = ROOT.TCanvas('canvasFinal', '', 500, 500)
canvasFinal.Divide(3,2)
canvasFinal.cd(1)
T1.Draw("hist")
canvasFinal.cd(2)
T2.Draw("hist")
canvasFinal.cd(3)
T3.Draw("hist")
canvasFinal.cd(4)
T4.Draw("hist")
canvasFinal.cd(5)
T5.Draw("hist")
canvasFinal.cd(6)
legend = ROOT.TLegend(0.2,0.2,1.0,1.0)
legend.AddEntry(T1,"T1","f")
legend.AddEntry(T2,"T2","f")
legend.AddEntry(T3,"T3","f")
legend.AddEntry(T4,"T4","f")
legend.AddEntry(T5,"T5","f")
legend.Draw()
canvasFinal.SaveAs("plot_ggF/FinalT_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".pdf")
canvasFinal.SaveAs("plot_ggF/FinalT_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".png")
canvasScan = ROOT.TCanvas('canvasScan', '', 500, 500)
gr.Draw("ALP")
# canvasScan.SetLogy()
canvasScan.SaveAs("plot_ggF/FinalS_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".pdf")
canvasScan.SaveAs("plot_ggF/FinalS_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".png")
canvasScan2D = ROOT.TCanvas('canvasScan2D', '', 500, 500)
gr2D.Draw("COLZ")
canvasScan2D.SaveAs("plot_ggF/FinalS2D_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".pdf")
canvasScan2D.SaveAs("plot_ggF/FinalS2D_"+Cat+"_"+Var+"_"+Prod+AC+Sys+".png")
fout = ROOT.TFile.Open(''+dst+'', 'update')
ROOT.gDirectory.mkdir("hww2l2v_13TeV_"+Cat+"/var"+AC+"/")
fout.cd("hww2l2v_13TeV_"+Cat+"/var"+AC+"/")
fyield = open("plot_ggF/Yields_"+Prod+"_"+Cat+"_"+Var+"_"+AC+".txt","a+")
fyield.write(Sys+"@T1: "+str(T1.Integral())+"\n")
fyield.write(Sys+"@T2: "+str(T2.Integral())+"\n")
fyield.write(Sys+"@T3: "+str(T3.Integral())+"\n")
fyield.write(Sys+"@T4: "+str(T4.Integral())+"\n")
fyield.write(Sys+"@T5: "+str(T5.Integral())+"\n")
T1.SetName("histo_"+Prod+"T1"+Sys+"")
T2.SetName("histo_"+Prod+"T2"+Sys+"")
T3.SetName("histo_"+Prod+"T3"+Sys+"")
T4.SetName("histo_"+Prod+"T4"+Sys+"")
T5.SetName("histo_"+Prod+"T5"+Sys+"")
T1.Write()
T2.Write()
T3.Write()
T4.Write()
T5.Write()
fout.Close()
##########################################################
def create1VIntTemplates(Cat, Var, AC, Sys, Test):
print " "
print " ", Cat, Var, AC, Sys, Test
print " "
f = ROOT.TFile.Open(''+src+'', 'read')
BaseN = "hww2l2v_13TeV_"+Cat+"/"+Var+"/histo_"
SM = getSumOfRWSamples(f, BaseN, "H0PM", Sys)
BSM = getSumOfRWSamples(f, BaseN, AC, Sys)
f05 = getSumOfRWSamples(f, BaseN, AC+"f05",Sys)
SM_Org = f.Get(''+BaseN+'H0PM'+Sys+'')
BSM_Org = f.Get(''+BaseN+AC+Sys+'')
f05_Org = f.Get(''+BaseN+AC+'f05'+Sys+'')
SM_Org.SetDirectory(0)
BSM_Org.SetDirectory(0)
f05_Org.SetDirectory(0)
f.Close()
G = cons[AC]
T1 = SM.Clone() # 2,0
T2 = SM.Clone() # 1,1
T3 = BSM.Clone() # 0,2
T1.SetDirectory(0)
T2.SetDirectory(0)
T3.SetDirectory(0)
T2.Scale(-1/G)
T2.Add(BSM, -G)
T2.Add(f05, 1/G)
if Test == True :
N1 = Mu*Fa1
N2 = Mu*np.sign(Fai)*math.sqrt(abs(Fai)*Fa1)*G
N3 = Mu*abs(Fai)*G**2
f05T = T1.Clone()
f05T.SetDirectory(0)
f05T.Scale( N1)
f05T.Add(T2, N2)
f05T.Add(T3, N3)
T1.SetLineColor(ROOT.kRed)
T1.SetFillColor(ROOT.kRed)
T2.SetLineColor(ROOT.kBlue)
T2.SetFillColor(ROOT.kBlue)
T3.SetLineColor(ROOT.kGreen)
T3.SetFillColor(ROOT.kGreen)
T1.SetLineWidth(3)
T2.SetLineWidth(3)
T3.SetLineWidth(3)
SM_Org.SetLineColor(ROOT.kBlack)
BSM_Org.SetLineColor(ROOT.kBlack)
f05_Org.SetLineColor(ROOT.kBlack)
SM_Org.SetLineWidth(2)
BSM_Org.SetLineWidth(2)
f05_Org.SetLineWidth(2)
f05T.SetLineColor(ROOT.kRed)
f05T.SetFillColor(ROOT.kRed)
f05T.SetLineWidth(2)
canvasf05 = ROOT.TCanvas('canvasf05', '', 500, 500)
f05T.SetMinimum(0.001)
f05T.SetMaximum(1.5*f05T.GetMaximum())
f05T.GetXaxis().SetTitle(""+Var+"")
f05T.Draw("e")
f05_Org.Draw("same e")
legend = ROOT.TLegend(0.3,0.75,0.7,0.9)
legend.AddEntry(f05_Org,"SM-BSM Mix MC ","l")
legend.AddEntry(f05T,"T1-T3 combination","f")
legend.Draw()
canvasf05.SaveAs("plot_ggF/f05_"+Cat+"_"+Var+"_"+AC+Sys+".pdf")
canvasf05.SaveAs("plot_ggF/f05_"+Cat+"_"+Var+"_"+AC+Sys+".png")
canvasT1 = ROOT.TCanvas('canvasT1', '', 500, 500)
T1.SetMinimum(0.001)
T1.SetMaximum(1.5*T1.GetMaximum())
T1.GetXaxis().SetTitle(""+Var+"")
T1.Draw("e")
SM_Org.Draw("same e")
legend = ROOT.TLegend(0.3,0.75,0.7,0.9)
legend.AddEntry(SM_Org,"pure SM MC","l")
legend.AddEntry(T1,"T1 template","f")
legend.Draw()
canvasT1.SaveAs("plot_ggF/T1_"+Cat+"_"+Var+"_"+AC+Sys+".pdf")
canvasT1.SaveAs("plot_ggF/T1_"+Cat+"_"+Var+"_"+AC+Sys+".png")
canvasT2 = ROOT.TCanvas('canvasT2', '', 500, 500)
T2.GetXaxis().SetTitle(""+Var+"")
T2.Draw("e")
legend = ROOT.TLegend(0.3,0.8,0.7,0.9)
legend.AddEntry(T2,"T2 template","f")
legend.SetTextSize(.04)
legend.Draw()
canvasT2.SaveAs("plot_ggF/T2_"+Cat+"_"+Var+"_"+AC+Sys+".pdf")
canvasT2.SaveAs("plot_ggF/T2_"+Cat+"_"+Var+"_"+AC+Sys+".png")
canvasT3 = ROOT.TCanvas('canvasT3', '', 500, 500)
T3.SetMinimum(0.001)
T3.SetMaximum(1.5*BSM.GetMaximum())
T3.GetXaxis().SetTitle(""+Var+"")
T3.Draw("e")
BSM_Org.Draw("same e")
legend = ROOT.TLegend(0.3,0.75,0.7,0.9)
legend.AddEntry(BSM_Org,"pure BSM MC","l")
legend.AddEntry(T3,"T3 template","f")
legend.Draw()
canvasT3.SaveAs("plot_ggF/T3_"+Cat+"_"+Var+"_"+AC+Sys+".pdf")
canvasT3.SaveAs("plot_ggF/T3_"+Cat+"_"+Var+"_"+AC+Sys+".png")
######### Template tricks for combine #######
Gsc = cons[AC] # Fai in terms of WW decay vertex
if not forQuickPlotter:
T2.Scale(Gsc)
T3.Scale(Gsc**2)
if AC == "H0M" :
print "--------- Force H0M T2 = 0"
T2.Scale(0)
if AC == "H0L1" :
print "--------- Force H0L1 T2 positive - Compensate in model! "
T2.Scale(-1)
if Test == True :
gr = ROOT.TGraph(len(Scan))
gr2D = ROOT.TGraph2D(len(Scan)*len(MuSc2D))
gr.SetLineColor(ROOT.kRed)
gr.SetLineWidth(2)
for i in range(len(Scan)):
FaiSc = Scan[i]
Fa1Sc = 1-abs(FaiSc)
N1Sc = MuSc*Fa1Sc
N2Sc = MuSc*np.sign(FaiSc)*math.sqrt(abs(FaiSc)*Fa1Sc)
N3Sc = MuSc*abs(FaiSc)
if AC == "H0L1" :
N2Sc = N2Sc*-1
f05TSc = T1.Clone()
f05TSc.SetDirectory(0)
f05TSc.Scale( N1Sc)
f05TSc.Add(T2, N2Sc)
f05TSc.Add(T3, N3Sc)
gr.SetPoint(i, Scan[i], f05TSc.Integral())
for j in range(len(MuSc2D)):
N1Sc2D = MuSc2D[j]*Fa1Sc
N2Sc2D = MuSc2D[j]*np.sign(FaiSc)*math.sqrt(abs(FaiSc)*Fa1Sc)
N3Sc2D = MuSc2D[j]*abs(FaiSc)
if AC == "H0L1" :
N2Sc2D = N2Sc2D*-1
f05TSc2D = T1.Clone()
f05TSc2D.SetDirectory(0)
f05TSc2D.Scale( N1Sc2D)
f05TSc2D.Add(T2, N2Sc2D)
f05TSc2D.Add(T3, N3Sc2D)
gr2D.SetPoint(i+j*len(Scan),Scan[i],MuSc2D[j],f05TSc2D.Integral())
canvasFinal = ROOT.TCanvas('canvasFinal', '', 500, 500)
canvasFinal.Divide(2,2)
canvasFinal.cd(1)
T1.Draw("hist")
canvasFinal.cd(2)
T2.Draw("hist")
canvasFinal.cd(3)
T3.Draw("hist")
canvasFinal.cd(4)
legend = ROOT.TLegend(0.2,0.2,1.0,1.0)
legend.AddEntry(T1,"T1","f")
legend.AddEntry(T2,"T2","f")
legend.AddEntry(T3,"T3","f")
legend.Draw()
canvasFinal.SaveAs("plot_ggF/FinalT_"+Cat+"_"+Var+"_"+AC+Sys+".pdf")
canvasFinal.SaveAs("plot_ggF/FinalT_"+Cat+"_"+Var+"_"+AC+Sys+".png")
canvasScan = ROOT.TCanvas('canvasScan', '', 500, 500)
gr.Draw("ALP")
# canvasScan.SetLogy()
canvasScan.SaveAs("plot_ggF/FinalS_"+Cat+"_"+Var+"_"+AC+Sys+".pdf")
canvasScan.SaveAs("plot_ggF/FinalS_"+Cat+"_"+Var+"_"+AC+Sys+".png")
canvasScan2D = ROOT.TCanvas('canvasScan2D', '', 500, 500)
gr2D.Draw("COLZ")
canvasScan2D.SaveAs("plot_ggF/FinalS2D_"+Cat+"_"+Var+"_"+AC+Sys+".pdf")
canvasScan2D.SaveAs("plot_ggF/FinalS2D_"+Cat+"_"+Var+"_"+AC+Sys+".png")
fout = ROOT.TFile.Open(''+dst+'', 'update')
ROOT.gDirectory.mkdir("hww2l2v_13TeV_"+Cat+"/var"+AC+"/")
fout.cd("hww2l2v_13TeV_"+Cat+"/var"+AC+"/")
fyield = open("plot_ggF/Yields_ggH_"+Cat+"_"+Var+"_"+AC+".txt","a+")
fyield.write(Sys+"@T1: "+str(T1.Integral())+"\n")
fyield.write(Sys+"@T2: "+str(T2.Integral())+"\n")
fyield.write(Sys+"@T3: "+str(T3.Integral())+"\n")
T1.SetName("histo_ggH_T1"+Sys+"")
T2.SetName("histo_ggH_T2"+Sys+"")
T3.SetName("histo_ggH_T3"+Sys+"")
T1.Write()
T2.Write()
T3.Write()
fout.Close()
##########################################################
#2017 (with JER JES)
Systematics = ["THU_ggH_qmtop", "CMS_eff_m_2017", "CMS_eff_e_2017", "THU_ggH_Mig12", "CMS_scale_JESEC2", "CMS_btag_lf", "CMS_scale_JESFlavorQCD", "THU_qqH_Mjj1000", "CMS_eff_hwwtrigger_2017", "CMS_scale_JESEC2_2017", "CMS_scale_JESAbsolute", "THU_qqH_EWK", "CMS_scale_JESAbsolute_2017", "CMS_scale_JESRelativeBal", "THU_qqH_Mjj1500", "CMS_scale_JESBBEC1", "THU_qqH_Mjj60", "CMS_PU_2017", "CMS_btag_hfstats1_2017", "CMS_btag_hfstats2_2017", "CMS_scale_met_2017", "THU_qqH_PTH200", "THU_qqH_JET01", "CMS_scale_m_2017", "CMS_scale_JESHF_2017", "THU_ggH_VBF3j", "CMS_btag_lfstats1_2017", "CMS_btag_lfstats2_2017", "PS_FSR", "CMS_scale_e_2017", "CMS_res_j_2017", "THU_ggH_Res", "PS_ISR", "THU_qqH_YIELD", "CMS_btag_hf", "THU_ggH_PT60", "THU_ggH_Mu", "THU_qqH_Mjj700", "THU_qqH_Mjj120", "THU_qqH_PTH25", "CMS_btag_jes", "CMS_scale_JESRelativeSample_2017", "CMS_scale_JESBBEC1_2017", "CMS_eff_prefiring_2017", "THU_ggH_PT120", "CMS_btag_cferr2", "CMS_btag_cferr1", "THU_ggH_VBF2j", "CMS_PUID_2017", "THU_ggH_Mig01", "CMS_scale_JESHF", "THU_qqH_Mjj350"]
'''
VBFConfig = [ ("0j", "mllVSmthExt", "VBF_", "H0M"),
("0j", "mllVSmthExt", "VBF_", "H0PH"),
("0j", "mllVSmthExt", "VBF_", "H0L1"),
("1j", "mllVSmthExt", "VBF_", "H0M"),
("1j", "mllVSmthExt", "VBF_", "H0PH"),
("1j", "mllVSmthExt", "VBF_", "H0L1"),
]
'''
VBFConfig = [ ("0j", "mllVSmth", "VBF_", "H0M"),
("0j", "mllVSmth", "VBF_", "H0PH"),
("0j", "mllVSmth", "VBF_", "H0L1"),
("dytt_0j", "mllVSmth", "VBF_", "H0M"),
("dytt_0j", "mllVSmth", "VBF_", "H0PH"),
("dytt_0j", "mllVSmth", "VBF_", "H0L1"),
("top_0j", "mllVSmth", "VBF_", "H0M"),
("top_0j", "mllVSmth", "VBF_", "H0PH"),
("top_0j", "mllVSmth", "VBF_", "H0L1"),
("1j", "mllVSmth", "VBF_", "H0M"),
("1j", "mllVSmth", "VBF_", "H0PH"),
("1j", "mllVSmth", "VBF_", "H0L1"),
("dytt_1j", "mllVSmth", "VBF_", "H0M"),
("dytt_1j", "mllVSmth", "VBF_", "H0PH"),
("dytt_1j", "mllVSmth", "VBF_", "H0L1"),
("top_1j", "mllVSmth", "VBF_", "H0M"),
("top_1j", "mllVSmth", "VBF_", "H0PH"),
("top_1j", "mllVSmth", "VBF_", "H0L1"),
]
'''
VBFConfig = [ ("0j", "mllExt", "VBF_", "H0M"),
("0j", "mllExt", "VBF_", "H0PH"),
("0j", "mllExt", "VBF_", "H0L1"),
("1j", "mllExt", "VBF_", "H0M"),
("1j", "mllExt", "VBF_", "H0PH"),
("1j", "mllExt", "VBF_", "H0L1"),
]
VBFConfig = [ ("0j", "mthExt", "VBF_", "H0M"),
("0j", "mthExt", "VBF_", "H0PH"),
("0j", "mthExt", "VBF_", "H0L1"),
("1j", "mthExt", "VBF_", "H0M"),
("1j", "mthExt", "VBF_", "H0PH"),
("1j", "mthExt", "VBF_", "H0L1"),
]
WHConfig = [ ("0j", "mllVSmthExt", "WH_", "H0M"),
("0j", "mllVSmthExt", "WH_", "H0PH"),
("0j", "mllVSmthExt", "WH_", "H0L1"),
("1j", "mllVSmthExt", "WH_", "H0M"),
("1j", "mllVSmthExt", "WH_", "H0PH"),
("1j", "mllVSmthExt", "WH_", "H0L1"),
]
'''
WHConfig = [ ("0j", "mllVSmth", "WH_", "H0M"),
("0j", "mllVSmth", "WH_", "H0PH"),
("0j", "mllVSmth", "WH_", "H0L1"),
("dytt_0j", "mllVSmth", "WH_", "H0M"),
("dytt_0j", "mllVSmth", "WH_", "H0PH"),
("dytt_0j", "mllVSmth", "WH_", "H0L1"),
("top_0j", "mllVSmth", "WH_", "H0M"),
("top_0j", "mllVSmth", "WH_", "H0PH"),
("top_0j", "mllVSmth", "WH_", "H0L1"),
("1j", "mllVSmth", "WH_", "H0M"),
("1j", "mllVSmth", "WH_", "H0PH"),
("1j", "mllVSmth", "WH_", "H0L1"),
("dytt_1j", "mllVSmth", "WH_", "H0M"),
("dytt_1j", "mllVSmth", "WH_", "H0PH"),
("dytt_1j", "mllVSmth", "WH_", "H0L1"),
("top_1j", "mllVSmth", "WH_", "H0M"),
("top_1j", "mllVSmth", "WH_", "H0PH"),
("top_1j", "mllVSmth", "WH_", "H0L1"),
]
'''
WHConfig = [ ("0j", "mllExt", "WH_", "H0M"),
("0j", "mllExt", "WH_", "H0PH"),
("0j", "mllExt", "WH_", "H0L1"),
("1j", "mllExt", "WH_", "H0M"),
("1j", "mllExt", "WH_", "H0PH"),
("1j", "mllExt", "WH_", "H0L1"),
]
WHConfig = [ ("0j", "mthExt", "WH_", "H0M"),
("0j", "mthExt", "WH_", "H0PH"),
("0j", "mthExt", "WH_", "H0L1"),
("1j", "mthExt", "WH_", "H0M"),
("1j", "mthExt", "WH_", "H0PH"),
("1j", "mthExt", "WH_", "H0L1"),
]
ZHConfig = [ ("0j", "mllVSmthExt", "ZH_", "H0M"),
("0j", "mllVSmthExt", "ZH_", "H0PH"),
("0j", "mllVSmthExt", "ZH_", "H0L1"),
("1j", "mllVSmthExt", "ZH_", "H0M"),
("1j", "mllVSmthExt", "ZH_", "H0PH"),
("1j", "mllVSmthExt", "ZH_", "H0L1"),
]
'''
ZHConfig = [ ("0j", "mllVSmth", "ZH_", "H0M"),
("0j", "mllVSmth", "ZH_", "H0PH"),
("0j", "mllVSmth", "ZH_", "H0L1"),
("dytt_0j", "mllVSmth", "ZH_", "H0M"),
("dytt_0j", "mllVSmth", "ZH_", "H0PH"),
("dytt_0j", "mllVSmth", "ZH_", "H0L1"),
("top_0j", "mllVSmth", "ZH_", "H0M"),
("top_0j", "mllVSmth", "ZH_", "H0PH"),
("top_0j", "mllVSmth", "ZH_", "H0L1"),
("1j", "mllVSmth", "ZH_", "H0M"),
("1j", "mllVSmth", "ZH_", "H0PH"),
("1j", "mllVSmth", "ZH_", "H0L1"),
("dytt_1j", "mllVSmth", "ZH_", "H0M"),
("dytt_1j", "mllVSmth", "ZH_", "H0PH"),
("dytt_1j", "mllVSmth", "ZH_", "H0L1"),
("top_1j", "mllVSmth", "ZH_", "H0M"),
("top_1j", "mllVSmth", "ZH_", "H0PH"),
("top_1j", "mllVSmth", "ZH_", "H0L1"),
]
'''
ZHConfig = [ ("0j", "mllExt", "ZH_", "H0M"),
("0j", "mllExt", "ZH_", "H0PH"),
("0j", "mllExt", "ZH_", "H0L1"),
("1j", "mllExt", "ZH_", "H0M"),
("1j", "mllExt", "ZH_", "H0PH"),
("1j", "mllExt", "ZH_", "H0L1"),
]
ZHConfig = [ ("0j", "mthExt", "ZH_", "H0M"),
("0j", "mthExt", "ZH_", "H0PH"),
("0j", "mthExt", "ZH_", "H0L1"),
("1j", "mthExt", "ZH_", "H0M"),
("1j", "mthExt", "ZH_", "H0PH"),
("1j", "mthExt", "ZH_", "H0L1"),
]
'''
SigConfig2V = ZHConfig + WHConfig + VBFConfig
'''
ggHConfig = [ ("0j", "mllVSmthExt", "H0M"),
("0j", "mllVSmthExt", "H0PH"),
("0j", "mllVSmthExt", "H0L1"),
("1j", "mllVSmthExt", "H0M"),
("1j", "mllVSmthExt", "H0PH"),
("1j", "mllVSmthExt", "H0L1"),
]
'''
ggHConfig = [ ("0j", "mllVSmth", "H0M"),
("0j", "mllVSmth", "H0PH"),
("0j", "mllVSmth", "H0L1"),
("dytt_0j", "mllVSmth", "H0M"),
("dytt_0j", "mllVSmth", "H0PH"),
("dytt_0j", "mllVSmth", "H0L1"),
("top_0j", "mllVSmth", "H0M"),
("top_0j", "mllVSmth", "H0PH"),
("top_0j", "mllVSmth", "H0L1"),
("1j", "mllVSmth", "H0M"),
("1j", "mllVSmth", "H0PH"),
("1j", "mllVSmth", "H0L1"),
("dytt_1j", "mllVSmth", "H0M"),
("dytt_1j", "mllVSmth", "H0PH"),
("dytt_1j", "mllVSmth", "H0L1"),
("top_1j", "mllVSmth", "H0M"),
("top_1j", "mllVSmth", "H0PH"),
("top_1j", "mllVSmth", "H0L1"),
]
'''
ggHConfig = [ ("0j", "mllExt", "H0M"),
("0j", "mllExt", "H0PH"),
("0j", "mllExt", "H0L1"),
("1j", "mllExt", "H0M"),
("1j", "mllExt", "H0PH"),
("1j", "mllExt", "H0L1"),
]
ggHConfig = [ ("0j", "mthExt", "H0M"),
("0j", "mthExt", "H0PH"),
("0j", "mthExt", "H0L1"),
("1j", "mthExt", "H0M"),
("1j", "mthExt", "H0PH"),
("1j", "mthExt", "H0L1"),
]
'''
for cat, var, prod, sig in SigConfig2V :
create2VIntTemplates(cat, var, prod, sig, "", True)
for sys in Systematics :
if sys == "CMS_PU_2017" and ("WH" in prod or "ZH" in prod): continue
if "THU_ggH" in sys: continue
if "THU_qqH" in sys and ("WH" in prod or "ZH" in prod): continue
create2VIntTemplates(cat, var, prod, sig, "_"+sys+"Up", False)
create2VIntTemplates(cat, var, prod, sig, "_"+sys+"Down", False)
for cat, var, sig in ggHConfig :
create1VIntTemplates(cat, var, sig, "", True)
for sys in Systematics :
if "THU_qqH" in sys: continue
create1VIntTemplates(cat, var, sig, "_"+sys+"Up", False)
create1VIntTemplates(cat, var, sig, "_"+sys+"Down", False)
'''
AddOtherTemplates("0j", "mllVSmthExt", "H0M"),
AddOtherTemplates("0j", "mllVSmthExt", "H0PH"),
AddOtherTemplates("0j", "mllVSmthExt", "H0L1"),
AddOtherTemplates("1j", "mllVSmthExt", "H0M"),
AddOtherTemplates("1j", "mllVSmthExt", "H0PH"),
AddOtherTemplates("1j", "mllVSmthExt", "H0L1"),
'''
AddOtherTemplates("0j", "mllVSmth", "H0M"),
AddOtherTemplates("0j", "mllVSmth", "H0PH"),
AddOtherTemplates("0j", "mllVSmth", "H0L1"),
AddOtherTemplates("dytt_0j", "mllVSmth", "H0M"),
AddOtherTemplates("dytt_0j", "mllVSmth", "H0PH"),
AddOtherTemplates("dytt_0j", "mllVSmth", "H0L1"),
AddOtherTemplates("top_0j", "mllVSmth", "H0M"),
AddOtherTemplates("top_0j", "mllVSmth", "H0PH"),
AddOtherTemplates("top_0j", "mllVSmth", "H0L1"),
AddOtherTemplates("1j", "mllVSmth", "H0M"),
AddOtherTemplates("1j", "mllVSmth", "H0PH"),
AddOtherTemplates("1j", "mllVSmth", "H0L1"),
AddOtherTemplates("dytt_1j", "mllVSmth", "H0M"),
AddOtherTemplates("dytt_1j", "mllVSmth", "H0PH"),
AddOtherTemplates("dytt_1j", "mllVSmth", "H0L1"),
AddOtherTemplates("top_1j", "mllVSmth", "H0M"),
AddOtherTemplates("top_1j", "mllVSmth", "H0PH"),
AddOtherTemplates("top_1j", "mllVSmth", "H0L1"),
'''
AddOtherTemplates("0j", "mllExt", "H0M"),
AddOtherTemplates("0j", "mllExt", "H0PH"),
AddOtherTemplates("0j", "mllExt", "H0L1"),
AddOtherTemplates("1j", "mllExt", "H0M"),
AddOtherTemplates("1j", "mllExt", "H0PH"),
AddOtherTemplates("1j", "mllExt", "H0L1"),
AddOtherTemplates("0j", "mthExt", "H0M"),
AddOtherTemplates("0j", "mthExt", "H0PH"),
AddOtherTemplates("0j", "mthExt", "H0L1"),
AddOtherTemplates("1j", "mthExt", "H0M"),
AddOtherTemplates("1j", "mthExt", "H0PH"),
AddOtherTemplates("1j", "mthExt", "H0L1"),
'''
|
[
"tomas.kello@cern.ch"
] |
tomas.kello@cern.ch
|
67892bec91a5b51722b84ad0ed1c65ff578271b5
|
29511e6344136ab5b2e921dd10e48a2c85efc0e9
|
/days/46-48-beautifulsoup4/github-trending-scraper/scraper.py
|
2fe077498b6ca351a82a76ad8384d3158530d221
|
[
"MIT"
] |
permissive
|
michaelstrefeler/100daysofcode-with-python-course
|
29e695ece3366d2d60d9f3dab50580f2af619d3b
|
a332ac1bf23105904c987cdf02611b78282dd054
|
refs/heads/master
| 2020-04-02T12:47:42.531554
| 2019-01-31T17:01:50
| 2019-01-31T17:01:50
| 154,452,154
| 0
| 0
| null | 2018-10-24T06:45:49
| 2018-10-24T06:45:48
| null |
UTF-8
|
Python
| false
| false
| 595
|
py
|
from requests import get
from bs4 import BeautifulSoup
URL = "https://github.com/trending"
def pull_site():
raw_site_page = get(URL)
raw_site_page.raise_for_status()
return raw_site_page
def scrape(site):
soup = BeautifulSoup(site.text, 'html.parser')
repos = soup.find_all('li', 'col-12 d-block width-full py-4 border-bottom')
print("Trending repos on GitHub")
for tag in repos:
link = 'https://github.com' + tag.a.get("href")
name = tag.a.getText()
print(name, link)
if __name__ == "__main__":
site = pull_site()
scrape(site)
|
[
"mstrefeler@me.com"
] |
mstrefeler@me.com
|
ada5076670ac2df02c7e357dcab945a4c4e346ff
|
5c7e1f689acf9e6602fcd71e9b35611e3f7b4f68
|
/flaskr/blog.py
|
477fcf8c55ed094b7e208573fec94e58ac284834
|
[] |
no_license
|
luisfdresch/flask-tutorial
|
d65da628fa6710c4a5185864dbb7181c9e8cb5c6
|
31ca9bda8d8ac253fa443d726a3c2d1838c3c1b0
|
refs/heads/main
| 2023-01-12T13:10:40.568332
| 2020-11-06T21:06:14
| 2020-11-06T21:06:14
| 310,701,254
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,653
|
py
|
from flask import Blueprint, flash, g, redirect, render_template, request, url_for
from werkzeug.exceptions import abort
from flaskr.auth import login_required
from flaskr.db import get_db
bp = Blueprint('blog', __name__)
@bp.route('/')
def index():
db = get_db()
posts = db.execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' ORDER BY created DESC'
).fetchall()
return render_template('blog/index.html', posts=posts)
@bp.route('/create', methods=('GET', 'POST'))
@login_required
def create():
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'INSERT INTO post (title, body, author_id)'
' VALUES (?, ?, ?)',
(title, body, g.user['id'])
)
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/create.html')
def get_post(id, check_author=True):
post = get_db().execute(
'SELECT p.id, title, body, created, author_id, username'
' FROM post p JOIN user u ON p.author_id = u.id'
' WHERE p.id = ?',
(id,)
).fetchone()
if post is None:
abort(404, "Post {0} doesn't exist.".format(id))
if check_author and post['author_id'] != g.user['id']:
abort(403)
return post
@bp.route('/<int:id>/update', methods=('GET', 'POST'))
@login_required
def update(id):
post = get_post(id)
if request.method == 'POST':
title = request.form['title']
body = request.form['body']
error = None
if not title:
error = 'Title is required'
if error is not None:
flash(error)
else:
db = get_db()
db.execute(
'UPDATE post SET title = ?, body = ?'
' WHERE id = ?',
(title, body, id)
)
db.commit()
return redirect(url_for('blog.index'))
return render_template('blog/update.html', post=post)
@bp.route('/<int:id>/delete', methods=('POST',))
@login_required
def delete(id):
get_post(id)
db = get_db()
db.execute('DELETE FROM post WHERE id = ?', (id,))
db.commit()
return redirect(url_for('blog.index'))
|
[
"66801762+luisfdresch@users.noreply.github.com"
] |
66801762+luisfdresch@users.noreply.github.com
|
f071dd3ab7d5003922d1cca53b99fbfe3d628dc9
|
84d7b74bf290ff73a7a1322900841628caf501c8
|
/9.py
|
44a143ce64257a64954ca0754edfc59f638f5e17
|
[] |
no_license
|
glen-s-abraham/sem3record
|
351d03dd7d20acc4167be57b73ef67ef45bdf313
|
18d109f3a56481a2d31ba79f82c4582e4260106d
|
refs/heads/master
| 2023-01-07T21:40:27.509805
| 2020-11-11T14:38:13
| 2020-11-11T14:38:13
| 308,214,178
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 558
|
py
|
"""List operations"""
uset=set()
i=0
while i!=6:
print("\n1.Add to set\n2.Remove from set\n3.Print set\n4.Count Elements of set\n5.Find element in set\n6.Clear set and Exit")
i=int(input("Enter Option\n"))
if i==1:
element=input("EnterElement:")
uset.add(element)
elif i==2:
print(uset)
pos=input("Enter element to delete")
uset.discard(pos)
elif i==3:
print(uset)
elif i==4:
print(len(uset))
elif i==5:
pos=input("Enter element to find in set")
if pos in uset:
print("Element found")
elif i==6:
uset.clear()
break
|
[
"glenprojects666@gmail.com"
] |
glenprojects666@gmail.com
|
58b227ae000cdf636e95f70367240f5ccb8b65c8
|
a7e86b6fdd072ba3b78fbf8ff0adc1f5dba8d8bb
|
/2. Linear Regression/DecisionTreeRegression/Untitled.py
|
55a0bc0a8f127ca4e3187eb089bf840aa78855a8
|
[] |
no_license
|
TheRochVoices/UdemyML
|
28cddbd5ed4ac9b8e453dcbbc11b0127293c5ba6
|
b5f8565f8fdc28d2c59d91b2ccd333eef992d9ec
|
refs/heads/master
| 2020-03-18T05:56:31.808020
| 2018-06-15T08:41:55
| 2018-06-15T08:41:55
| 134,368,535
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 466
|
py
|
# coding: utf-8
# In[1]:
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
dataSet = pd.read_csv('Position_Salaries.csv')
lvl = dataSet.iloc[:, 1:2].values
slry = dataSet.iloc[:, 2].values
# In[2]:
from sklearn.tree import DecisionTreeRegressor
regressor = DecisionTreeRegressor()
regressor.fit(lvl, slry)
# In[4]:
print(regressor.predict(lvl))
# In[5]:
# DTR takes average of dependednt values in the splits that it has made.
|
[
"rochaks001@gmail.com"
] |
rochaks001@gmail.com
|
d51b0b507c16f3480bdd85165672cf7a919454c4
|
9e12f81814b24aa54acccb80fcaf3bf688bd984a
|
/Spiders/youku/parsers/parse_detail_list_page.py
|
0d5dc03cc9424dcec81974f5a428c0d4251a0c13
|
[] |
no_license
|
wangtaihong/mov
|
76793050cdece48894be6433dd407cc918c0bbb3
|
8d394ca9c8c2906a585e58b85f974f68664629d3
|
refs/heads/master
| 2020-03-19T20:02:52.626127
| 2018-07-16T02:40:52
| 2018-07-16T02:40:52
| 136,885,627
| 1
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 7,708
|
py
|
# coding:utf-8
import re
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from lxml import etree
def url_format(url):
"""
//v.youku.com/v_show/id_XMzA5NTA1ODg2MA==.html?s=bc2a0ca1a64b11e6b9bb
http://v.youku.com/v_show/id_XMzA5NTA1ODg2MA==.html
"""
url = re.sub('http:', '', url)
return "http:" + re.sub('(\.html.*)', '.html', url)
def parse_detail(r, url):
try:
page = etree.HTML(r)
except Exception as e:
return False
sss = re.sub(u'\\n', '', r)
data = dict()
stars = []
# title_show = re.search(u'class="p-thumb"><a title="([^"]+?)" href="([^"]+?)" target=',sss)
# v-show: v-show可能没有的
# v_show = re.search(u'class="p-thumb"><a title="([^" href]+?)" href="([^" ]+?)" target=',sss)
v_show = page.xpath(
u'//div[@class="p-post"]/div[@class="yk-pack p-list"]/div[@class="p-thumb"]/a')
if len(v_show) > 0:
data['v_show'] = url_format(v_show[0].get("href"))
# 海报:
# thumb = re.search(u'^(?=.*(http\://\w+\d+\.\w+\.com/(\w*\d*)+)").*$',sss).group(1)
thumb = page.xpath(
u'//div[@class="p-post"]/div[@class="yk-pack p-list"]/div[@class="p-thumb"]/img')
if len(thumb) > 0:
data['thumb'] = [{"url": url_format(thumb[0].get(
"src")), "title":thumb[0].get("alt"), "width":200, "height":300}]
data['title'] = thumb[0].get("alt")
# category:
# category = page.xpath('//div[@class="p-base"]/ul/li[@class="p-row p-title"]')[0].find('a')
category = page.xpath(
'//div[@class="p-base"]/ul/li[@class="p-row p-title"]/a')
if len(category) > 0:
data['category'] = category[0].text
# category_url = category.get('href')
# 年份:可能没有
year = page.xpath(
'//div[@class="p-base"]/ul/li[@class="p-row p-title"]/span[@class="sub-title"]')
if len(year) > 0:
data['year'] = year[0].text
# 别名:可能没有
alias = page.xpath('//div[@class="p-base"]/ul/li[@class="p-alias"]')
if len(alias) > 0:
data['alias'] = alias[0].get("title")
# 上映:可能没有
published_at = re.search(u'>上映:</label>(\w+-\d+-\d+)*</span>', sss)
if published_at != None:
data['published_at'] = published_at.group(1)
# 优酷上映:可能没有
yk_published_at = re.search(u'>优酷上映:</label>(\w+-\d+-\d+)*</span>', sss)
if yk_published_at != None:
data['yk_published_at'] = yk_published_at.group(1)
# 优酷评分:可能没有
youku_score = page.xpath(
'//div[@class="p-base"]/ul/li[@class="p-score"]/span[@class="star-num"]')
if len(youku_score) > 0:
data['youku_score'] = youku_score[0].text
# 豆瓣评分:可能没有
douban_score = re.search(u'<span class="db-bignum">(\d+\.\d*)</span>', sss)
if douban_score != None:
data['douban_score'] = douban_score.group(1)
# 豆瓣评价数量,可能没有
douban_cm_num = re.search(u'<span class="db-cm-num">(\d*)评价</span>', sss)
if douban_cm_num != None:
data['douban_cm_num'] = douban_cm_num.group(1)
# 主演:可能没有
actors = page.xpath('//div[@class="p-base"]/ul/li[@class="p-performer"]')
if len(actors) > 0:
data['actors'] = actors[0].get('title')
data['actor_list'] = []
for x in page.xpath('//div[@class="p-base"]/ul/li[@class="p-performer"]/a'):
print(x)
data['actor_list'].append({"name":x.text,"youkuid":re.search(u"//list\.youku\.com/star/show/(.*)\.html",etree.tostring(x)).group(1)})
# 集数
renew = page.xpath(
'//div[@class="p-base"]/ul/li[@class="p-row p-renew"]')
if len(renew) > 0:
data['renew'] = renew[0].text
# 主演连接:可能没有
actors_a = page.xpath(
'//div[@class="p-base"]/ul/li[@class="p-performer"]/a')
if len(actors_a) > 1:
for x in actors_a:
# actor_url = url_format(x.get('href'))
actor_name = x.text
stars.append(url_format(x.get('href')))
# rd.sadd(config.yk_star_task, url_format(x.get('href'))) # 明星采集队列,redis set特性去重
# //list.youku.com/star/show/uid_UODY0MjQ=.html
# 导演:循环出来
# directed = page.xpath('//div[@class="p-base"]/ul/li[@class="p-performer"]')[0].getnext().findall('a')
directed = page.xpath(
u'//div[@class="p-base"]/ul/li[contains(text(),"导演:")]/a')
data['director_list'] = []
if len(directed) > 0:
data['directors'] = ''
for x in directed:
# star_url = url_format(x.get("href"))
data['directors'] = data['directors'] + '|' + x.text
stars.append(url_format(x.get('href')))
data['director_list'].append({"name":x.text,"youkuid":re.search(u"//list\.youku\.com/star/show/(.*)\.html",etree.tostring(x)).group(1)})
# rd.sadd(config.yk_star_task, url_format(x.get("href"))) # 明星采集队列,redis set特性去重
# 地区,可能没有
area = re.search(
u'>地区:<a href="//list\.youku\.com/category/show/([^\.html]+?)\.html" target="_blank">([^</a></li>]+?)</a>', sss)
if area != None:
data['area'] = area.group(2)
# 类型:循环出来
types = page.xpath(
u'//div[@class="p-base"]/ul/li[contains(text(),"类型")]/a')
if len(types) > 0:
data['types'] = ''
for x in types:
data['types'] = data['types'] + ',' + x.text
# 总播放数:可能为none
plays_num = re.search(u'<li>总播放数:([^</li>]+?)</li>', sss)
if plays_num != None:
data['plays_num'] = plays_num.group(1)
# 评论数量:可能为none
youku_comments_num = re.search(u'<li>评论:([^</li>]+?)</li>', sss)
if youku_comments_num:
data['youku_comments_num'] = youku_comments_num.group(1)
# 顶:可以空
ding = re.search(u'<li>顶:([^</li>]+?)</li>', sss)
if ding:
data['ding'] = ding.group(1)
# 简介:
try:
page.xpath(
u'//div[@class="p-base"]/ul/li[@class="p-row p-intro"]/span[@class="intro-more hide"]')[0]
except Exception as e:
print("parse_detail_list_page:", url, str(e), r)
#update_session(proxy)
return False
# sys.exit("die")
summary = page.xpath(
u'//div[@class="p-base"]/ul/li[@class="p-row p-intro"]/span[@class="intro-more hide"]')[0]
if summary != None:
data['summary'] = summary.text
# 适合年龄,可能为空
age = re.search(u'>适用年龄:([^</li>]+?)</li>', sss)
if age:
data['age'] = age.group(1)
peiyin = page.xpath(
u'//div[@class="p-base"]/ul/li[contains(text(),"声优:")]/a')
if len(peiyin) > 0:
data['peiyin'] = ''
data['peiyin_list'] = []
for x in peiyin:
data['peiyin'] = data['peiyin'] + '|' + x.text
stars.append(url_format(x.get('href')))
# data['peiyin_list'].append({"name":x.text,"youkuid":re.search(u"show/(.*)\.html",etree.tostring(x)).group(1)})
data['peiyin_list'].append({"name":x.text,"youkuid":re.search(u"//list\.youku\.com/star/show/(.*)\.html",etree.tostring(x)).group(1)})
# 综艺节目有
presenters = page.xpath(
u'//div[@class="p-base"]/ul/li[contains(text(),"主持人:")]/a')
if len(presenters) > 0:
data['presenters'] = ""
for x in presenters:
data['presenters'] = data['presenters'] + '|' + x.text
stars.append(url_format(x.get('href')))
# rd.sadd(config.yk_star_task, url_format(x.get("href"))) # 明星采集队列,redis set特性去重
return {"data": data, "stars": stars}
|
[
"wangtaihong8@163.com"
] |
wangtaihong8@163.com
|
636a708a2340a9dd663072ed3c62b42dc9dca597
|
c62097f261a152ef849ee1328ce4dae5565e99c6
|
/lec2_branch_loops.py
|
7a69aaf8c485c0a1e543dc8a4685af7716c26dd4
|
[] |
no_license
|
grovemonkey/pyprojects
|
84c4de0576fff90f4d6ea5eb6db34927945aa059
|
4970c8e14b8de72cc30d90277eb8264a039b0aeb
|
refs/heads/master
| 2020-11-24T17:29:18.867645
| 2020-03-29T16:11:27
| 2020-03-29T16:11:27
| 228,272,908
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 3,313
|
py
|
###################
## EXAMPLE: strings
###################
#hi = "hello there"
#name = "ana"
#greet = hi + name
#print(greet)
#greeting = hi + " " + name
#print(greeting)
#silly = hi + (" " + name)*3
#print(silly)
####################
## EXAMPLE: output
####################
#x = 15
#x_str = str(x)
#print("my fav number is", x, ".", "x=", x)
#print("my fav number is", x_str + "." + "x=" + x_str)
#print("my fav number is" + x_str + "." + "x=" + x_str)
#print("my fav number is" + x_str + "." + "x=" + x_str)
####################
## EXAMPLE: input
####################
#text = input("Type anything... ")
#print(5*text)
#num = int(input("Type a number... "))
#print(5*num)
#####################
### EXAMPLE: conditionals/branching
#####################
#####################
### Entered 3 Input Variables so I can begin the program body
#####################
#annual_salary = float(input("Enter Your Annual Salary: "))
#portion_saved = float(input("Enter the percent of your salary to save, as a decimal: "))
#total_cost = float(input("Enter the cost of your dream home: "))
#####################
### Next I need to use those 3 inputs to generate the result
### using 4 other conditions
#####################
#if x == y:
# print("x and y are equal")
# if y != 0:
# print("therefore, x / y is", x/y)
#elif x < y:
# print("x is smaller")
#elif x > y:
# print("y is smaller")
#print("thanks!")
####################
## EXAMPLE: remainder
####################
#num = int(input("Enter a number: "))
#if num % 2 == 0:
# print("number is even")
#else:
# print("number is odd")
####################
## EXAMPLE: while loops
## Try expanding this code to show a sad face if you go right
## twice and flip the table any more times than that.
## Hint: use a counter
####################
#n = input("You are in the Lost Forest\n****************\n****************\n :)\n****************\n****************\nGo left or right? ")
#while n == "right" or n == "Right":
# n = input("You are in the Lost Forest\n****************\n****** ***\n (╯°□°)╯︵ ┻━┻\n****************\n****************\nGo left or right? ")
#print("\nYou got out of the Lost Forest!\n\o/")
#n = 0
#while n < 5:
# print(n)
# n = n+1
####################
## EXAMPLE: for loops
####################
#for n in range(5):
# print(n)
#
#mysum = 0
#for i in range(10):
# mysum += i
#print(mysum)
#
#mysum = 0
#for i in range(7, 10):
# mysum += i
#print(mysum)
##
#mysum = 0
#for i in range(5, 11, 2):
# mysum += i
# if mysum == 5:
# break
# mysum += 1
#print(mysum)
####################
## EXAMPLE: perfect squares
####################
#ans = 0
#neg_flag = False
#x = int(input("Enter an integer: "))
#if x < 0:
# neg_flag = True
#while ans**2 < x:
# ans = ans + 1
#if ans**2 == x:
# print("Square root of", x, "is", ans)
#else:
# print(x, "is not a perfect square")
# if neg_flag:
# print("Just checking... did you mean", -x, "?")
####################
## TEST YOURSELF!
## Modify the perfect squares example to print
## imaginary perfect sqrts if given a negative num.
####################
|
[
"noreply@github.com"
] |
grovemonkey.noreply@github.com
|
bf4cc1866bdf160e9fef06a248672cd9a1a9bd77
|
a50b27edfb2ae5398169c452731257019d23583d
|
/contrib/pyminer/pyminer.py
|
e09cc81ba1cf59a4ef3e342474e90110c043d468
|
[
"MIT"
] |
permissive
|
medelin/bearcoin
|
b33a93c85372c7af000bb300871a1df9cb603ee5
|
0e35af5b1a1f92594401f3fe1d090a13d963bb0e
|
refs/heads/master
| 2016-08-16T10:54:58.298304
| 2015-07-01T05:06:07
| 2015-07-01T05:06:07
| 34,278,961
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 6,449
|
py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Nautiluscoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class NautiluscoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = NautiluscoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
[
"piotr@medelin.net"
] |
piotr@medelin.net
|
ae3c07417196b04210dbed26d9b1fba5aac5f9ec
|
07ec5a0b3ba5e70a9e0fb65172ea6b13ef4115b8
|
/lib/python3.6/site-packages/numpy/core/tests/test_regression.py
|
39a92211635a6dcc5cd242241cf5f18f0e08b70e
|
[] |
no_license
|
cronos91/ML-exercise
|
39c5cd7f94bb90c57450f9a85d40c2f014900ea4
|
3b7afeeb6a7c87384049a9b87cac1fe4c294e415
|
refs/heads/master
| 2021-05-09T22:02:55.131977
| 2017-12-14T13:50:44
| 2017-12-14T13:50:44
| 118,736,043
| 0
| 0
| null | 2018-01-24T08:30:23
| 2018-01-24T08:30:22
| null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
version https://git-lfs.github.com/spec/v1
oid sha256:2d5a65e7c1da1e87651cabd3481c0012ad15f784275aad1259a1312faf19cfc2
size 81211
|
[
"seokinj@jangseog-in-ui-MacBook-Pro.local"
] |
seokinj@jangseog-in-ui-MacBook-Pro.local
|
98273d8b2de18dfad4d203ed5449358037428885
|
5bdd9737aef29f0341676b21c2df2d371985628f
|
/object_ref_object.py
|
5325909f9582199489f4ec01585200986c4fa51a
|
[] |
no_license
|
lamontu/starter
|
23eb9ceb8f260a7380d4fb2111158f0dc06dd7cb
|
176539fc03508b78da320737eeae43b4e509dbd6
|
refs/heads/master
| 2022-09-25T22:51:16.456495
| 2022-09-10T15:13:48
| 2022-09-10T15:13:48
| 62,473,690
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 337
|
py
|
# -*- coding: utf-8 -*-
class from_obj(object):
def __init__(self, to_obj):
self.to_obj = to_obj
print('b = [1, 2, 3]')
b = [1, 2, 3]
print('a = from_obj(b)')
a = from_obj(b)
print('id(b) >>>>')
print(hex(id(b)))
print()
print('id(a) >>>>')
print(hex(id(a)))
print()
print('id(a.to_obj) >>>>')
print(hex(id(a.to_obj)))
|
[
"lamontyu@163.com"
] |
lamontyu@163.com
|
2b8edfa347b5b9d6a6b2c2d912242611e9907980
|
7b102f9c8f2e3f9240090d1d67af50333a2ba98d
|
/nonfatal_code/hospital/Formatting/001_pre_format_UK_UTLA_fit_models.py
|
a401509726a0ff362b8b717c593c63c90020b098
|
[] |
no_license
|
Nermin-Ghith/ihme-modeling
|
9c8ec56b249cb0c417361102724fef1e6e0bcebd
|
746ea5fb76a9c049c37a8c15aa089c041a90a6d5
|
refs/heads/main
| 2023-04-13T00:26:55.363986
| 2020-10-28T19:51:51
| 2020-10-28T19:51:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,688
|
py
|
# -*- coding: utf-8 -*-
"""
formatting UK UTLA data
"""
import pandas as pd
import numpy as np
import platform
import sys
import statsmodels.formula.api as smf
import statsmodels.api as sm
import time
sys.path.append("FILEPATH")
from hosp_prep import *
# Environment:
if platform.system() == "Linux":
root = "FILEPATH"
else:
root = "FILEPATH"
print("need to incorporate injuries data which are stored in separate files")
################################################
# Use data prepped on the cluster
###############################################
# was too big to merge locally so merged on the cluster and written to FILEPATH
# just read in the merged data from drive
both = pd.read_csv("FILEPATH", compression='gzip')
# both = pd.read_csv("FILEPATH", compression='gzip')
#both = pd.read_csv("FILEPATH", compression='gzip')
# back = both.copy()
# the regional level data needs to be split to include age start 90
# it's breaking the models so I'm gonna subset that age group out
# both = both[both.age_start < 80]
# also drop 2011, 2012
# both = both[both.fiscal_year < 2011]
# drop the rows that don't match (only 2 rows before 2011)
both = both[~both.log_rate.isnull()]
##################################
# FIT THE LINEAR MODELS
###################################
causes = both.cause_code.unique()
# both = both[both.cause_code.isin(causes)]
both['preds'] = np.nan # initialize pred col
# loop over causes and sexes
start = time.time()
counter = 0
counter_denom = causes.size
for cause in causes:
for s in [1, 2]:
# create the mask
mask = (both['cause_code'] == cause) & (both['sex_id'] == s)
if both[mask].log_rate.isnull().sum() == both[mask].shape[0]:
print("there's no data")
continue
# our formula for predictions
formula = "log_rate ~ C(age_start) + C(location_id)"
# fit the model
fit = smf.ols(formula, data=both[mask]).fit()
# exponentiate the predicted values
both.loc[mask, 'preds'] = np.exp(fit.predict(both[mask]))
if s == 1:
counter += 1
if counter % 125 == 0:
print(round((counter / counter_denom) * 100, 1), "% Done")
print("Run time: ", (time.time()-start)/60, " minutes")
print("Done in ", (time.time()-start) / 60, " minutes")
# both.to_csv("FILEPATH")
###################################################
# both = back.copy()
# subtract off the existing cases that we have at utla level
# use a groupby transform to leave the data in same format but create sums of
# known values at the regional level
reg_groups = ['cause_code', 'location_parent_id', 'age_start', 'age_end',
'sex_id', 'fiscal_year']
# fill missing utla level data with zeroes instead of NA so rows will be
# included in groupby
both['value'].fillna(value=0, inplace=True)
# sum the existing utla values up to the regional level
both['utla_val_to_reg'] = both.groupby(reg_groups)['value'].transform('sum')
# split the data
# subset the data to get only rows where utla value was suppressed
pred_df = both[both.utla_log_rate.isnull()].copy()
# drop the rows where utla value was suppressed
both = both[both.utla_log_rate.notnull()]
# subtract the known utla values from the regional values to get
# residual (unknown) values
pred_df['reg_resid_value'] = pred_df['reg_value'] - pred_df['utla_val_to_reg']
# new method
# get into count space
pred_df['pred_counts'] = pred_df['preds'] * pred_df['utla_population']
# sum utla predicted counts to region level
pred_df['utla_pred_to_reg'] = pred_df.groupby(reg_groups)['pred_counts'].\
transform('sum')
# make the weights
pred_df['weight'] = pred_df['reg_resid_value'] / pred_df['utla_pred_to_reg']
# apply weights to predicted values
pred_df['weighted_counts'] = pred_df['pred_counts'] * pred_df['weight']
# now test
reg_compare = pred_df.copy()
# get the sum of values at the regional level
reg_compare = reg_compare[['cause_code', 'location_parent_id', 'age_start',
'age_end', 'sex_id', 'fiscal_year',
'reg_resid_value']]
reg_compare.drop_duplicates(inplace=True)
reg_sum = reg_compare.reg_resid_value.sum()
# get the sum of desuppressed values
pred_df_sum = pred_df.weighted_counts.sum()
# pretty dang close to zero
assert round(reg_sum - pred_df_sum, 5) == 0
# assert residual vals are smaller than regional vals
assert (pred_df.reg_value >= pred_df.reg_resid_value).all()
# concat de-suppressed and un-suppressed data back together
both = pd.concat([both, pred_df])
# merge data that needed to be de-suppressed and data that didn't into same col
# fill value with desuppressed val where value = 0 and desuppressed isn't null
condition = (both['value'] == 0) & (both['weighted_counts'].notnull())
both.loc[condition, 'value'] = both.loc[condition, 'weighted_counts']
# write to a csv for use with a Shiny app
both['rates'] = both['value'] / both['utla_population']
both[['location_id', 'location_parent_id', 'age_start', 'age_end', 'sex_id',
'fiscal_year', 'cause_code', 'utla_log_rate', 'value', 'preds',
'reg_value', 'reg_resid_value',
'weight', 'rates', 'utla_population']].\
to_csv("FILEPATH", index=False)
# write to FILEPATH intermediate data
both[['location_id', 'location_parent_id', 'age_start', 'age_end', 'sex_id',
'fiscal_year', 'cause_code', 'utla_log_rate', 'value', 'preds',
'reg_value', 'reg_resid_value', 'weight']].\
to_csv("FILEPATH", index=False)
|
[
"nsidles@uw.edu"
] |
nsidles@uw.edu
|
b1363d2eeea65f67da9c4da23778667e39565849
|
ee4152e9b5eafa7afafe05de04391a9a3606eea3
|
/client/API/AddRecord.py
|
431bc9058aefc1020df12034d650ed008e3998a5
|
[] |
no_license
|
adibl/password_saver
|
3a06c8c04905d82f01fc14b41b646a6578af2b70
|
2ea73781db92ce750f91039251f2c06e929da7bb
|
refs/heads/master
| 2020-04-09T23:51:34.804870
| 2019-06-16T10:13:42
| 2019-06-16T10:13:42
| 160,665,348
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 1,675
|
py
|
"""
name:
date:
description
"""
import base64
import json
import requests
import os
from .connection import Request
class Passwords(object):
FILE_NAME = 'token.txt'
@classmethod
def handle(cls, url, username, password):
return cls.POST(url, username, password)
@classmethod
def GET(cls):
auto = cls.read_jwt()
if auto is None:
return {'general': 401}
responce = conn = Request().get_conn().get(Request.URI + '/passwords', headers={'Authorization': 'Bearer {0}'.format(auto)})
if responce.status_code == 200:
return json.loads(responce.text)
else:
return {'general': responce.status_code}
@classmethod
def POST(cls, url, username, password):
auto = cls.read_jwt()
if auto is None:
return {'general': 401}
print base64.urlsafe_b64encode(url)
encode_url = base64.urlsafe_b64encode(url)
responce = conn = Request().get_conn().post(Request.URI + '/passwords', headers={'Authorization': 'Bearer {0}'.format(auto)}
, json={'username': username, 'password': password,
'program_id': encode_url})
if responce.status_code == 200:
return True
elif responce.status_code == 442:
return json.loads(responce.text)
else:
return {'general': 'general error'}
@classmethod
def read_jwt(cls):
if os.path.isfile(cls.FILE_NAME):
with open(cls.FILE_NAME, 'rb')as handel:
jwt = handel.read()
return jwt
else:
return None
|
[
"bleyer23@gmail.com"
] |
bleyer23@gmail.com
|
3020613b94d8ab6d48331de09fbcc650efe92b54
|
1978a9455159b7c2f3286e0ad602652bc5277ffa
|
/exercises/15_module_re/task_15_2a.py
|
ff8cb7e603b04c43d8bed5f08c6262dda11c4009
|
[] |
no_license
|
fortredux/py_net_eng
|
338fd7a80debbeda55b5915dbfba4f5577279ef0
|
61cf0b2a355d519c58bc9f2b59d7e5d224922890
|
refs/heads/master
| 2020-12-03T17:32:53.598813
| 2020-04-08T20:55:45
| 2020-04-08T20:55:45
| 231,409,656
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,102
|
py
|
# -*- coding: utf-8 -*-
'''
Задание 15.2a
Создать функцию convert_to_dict, которая ожидает два аргумента:
* список с названиями полей
* список кортежей со значениями
Функция возвращает результат в виде списка словарей, где ключи - взяты из первого списка,
а значения подставлены из второго.
Например, если функции передать как аргументы список headers и список
[('FastEthernet0/0', 'up', 'up', '10.0.1.1'),
'FastEthernet0/1', 'up', 'up', '10.0.2.1')]
Функция должна вернуть такой список со словарями (порядок полей может быть другой):
[{'interface': 'FastEthernet0/0', 'status': 'up', 'protocol': 'up', 'address': '10.0.1.1'},
{'interface': 'FastEthernet0/1', 'status': 'up', 'protocol': 'up', 'address': '10.0.2.1'}]
Проверить работу функции:
* первый аргумент - список headers
* второй аргумент - результат, который возвращает функция parse_sh_ip_int_br из задания 15.2, если ей как аргумент передать sh_ip_int_br.txt.
Функцию parse_sh_ip_int_br не нужно копировать.
Ограничение: Все задания надо выполнять используя только пройденные темы.
'''
import re
from task_15_2 import parse_sh_ip_int_br
parsed_sh_ip_int_br = parse_sh_ip_int_br('/home/vagrant/GitHub/pynet_rep/exercises/15_module_re/sh_ip_int_br.txt')
headers = ['interface', 'address', 'status', 'protocol']
def convert_to_dict(list_headers, list_values):
final_list = []
for tup in list_values:
final_list.append(dict(zip(list_headers, tup)))
return final_list
if __name__ == '__main__':
from pprint import pprint
pprint(convert_to_dict(headers, parsed_sh_ip_int_br))
|
[
"fortunaredux@protonmail.com"
] |
fortunaredux@protonmail.com
|
e9ab3371c89c06a80611e79a4dffd4bb44019dfa
|
3718077f1cbbc458fdb55dd7f904baab4b493bde
|
/main.py
|
5417be7d37b58912767d73d230f893f70ce35013
|
[] |
no_license
|
SupersonicCoder18/THE-MOVIE-API-
|
7b65cf9d8caba1f44826f0b03c188c973296155b
|
3a75eda4b0d1a7caf183d518a2b615ff086efbcd
|
refs/heads/main
| 2023-02-21T21:23:01.509582
| 2021-01-25T12:24:28
| 2021-01-25T12:24:28
| 332,738,371
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 2,772
|
py
|
from flask import Flask, jsonify, request
import csv
from storage import all_movies, liked_movies, not_liked_movies, did_not_watch
from Demographic_Filtering import output
from Content_Filtering import get_recommendations
app = Flask(__name__)
@app.route("/get-movie")
def get_movie():
movie_data = {
"title": all_movies[0][19],
"poster_link": all_movies[0][27],
"release_date": all_movies[0][13] or "N/A",
"duration": all_movies[0][15],
"rating": all_movies[0][20],
"overview": all_movies[0][9],
}
return jsonify({
"data": movie_data,
"status": "Success!"
})
@app.route("/liked-movie", methods = ["POST"])
def liked_movie():
movie = all_movies[0]
all_movies = all_movies[1:]
liked_movie.append(movie)
return jsonify({
"status": "Success"
}), 201
@app.route("/unliked-movie", methods = ["POST"])
def unliked_movie():
movie = all_movies[0]
all_movies = all_movies[1:]
not_liked_movie.append(movie)
return jsonify({
"status": "Success"
}), 201
@app.route("/did-not-watch-movie", methods = ["POST"])
def did_not_watched_movie():
movie = all_movies[0]
all_movies = all_movies[1:]
did_not_watch.append(movie)
return jsonify({
"status": "Success"
}), 201
@app.route("/popular-movies")
def popular_movies():
movie_data = []
for movie in output:
_d = {
"title": movie[0],
"poster_link": movie[1],
"release_date": movie[2] or "N/A",
"duration": movie[3],
"rating": movie[4],
"overview": movie[5]
}
movie_data.append(_d)
return jsonify({
"data": movie_data,
"status": "success"
}), 200
@app.route("/recommended-movies")
def recommended_movies():
all_recommended = []
for liked_movie in liked_movies:
output = get_recommendations(liked_movie[19])
for data in output:
all_recommended.append(data)
import itertools
all_recommended.sort()
all_recommended = list(all_recommended for all_recommended,_ in itertools.groupby(all_recommended))
movie_data = []
for recommended in all_recommended:
_d = {
"title": recommended[0],
"poster_link": recommended[1],
"release_date": recommended[2] or "N/A",
"duration": recommended[3],
"rating": recommended[4],
"overview": recommended[5]
}
movie_data.append(_d)
return jsonify({
"data": movie_data,
"status": "success"
}), 200
if __name__ == "__main__":
app.run()
|
[
"noreply@github.com"
] |
SupersonicCoder18.noreply@github.com
|
b73073e4f2e03c0334d581cd58c370cbb1610117
|
fc74465456f450ecf6570b442cf50c3e8d1660d6
|
/client/utils/anylizer.py
|
6edbc18fefee6ec6417c80c73c61a77dbf30df5a
|
[] |
no_license
|
Kipparis/Colored-Music
|
be9c62dd2f2df51acb3d8d83a12cb39744845494
|
d8f197d3650cbfa8687e787a17e604b3b5b4437d
|
refs/heads/master
| 2021-06-24T18:54:21.868129
| 2021-03-14T09:28:12
| 2021-03-14T09:28:12
| 215,289,291
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 130
|
py
|
if __name__ == "__main__":
print("create instance of class for each song and add callback to detect\
when beat occure")
|
[
"keepintine@gmail.com"
] |
keepintine@gmail.com
|
881b5c0fc9bea295c8d51dcae0942461610bb9c2
|
8c5f1e07333edfd14a58677ea90ea9a8ec24daa7
|
/examples/simple_pendulum/custom_simple_pendulum.py
|
423dcab619d69ba966d9a866ae2b925a8862fb9f
|
[
"MIT"
] |
permissive
|
echoix/pyro
|
52c37b3c14fb3b52977be510545fdc43922dd8f9
|
787920cb14e3669bc65c530fd8f91d4277a24279
|
refs/heads/master
| 2020-09-07T09:08:21.114064
| 2019-11-10T05:59:50
| 2019-11-10T05:59:50
| 220,733,155
| 0
| 0
|
MIT
| 2019-11-10T02:52:39
| 2019-11-10T02:52:38
| null |
UTF-8
|
Python
| false
| false
| 1,412
|
py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 7 12:19:01 2018
@author: nvidia
"""
###############################################################################
import numpy as np
###############################################################################
from pyro.dynamic import pendulum
###############################################################################
###############################################################################
class MyCustomPendulum( pendulum.SinglePendulum ):
"""
"""
###########################################################################
# Only overload functions that are different from base version
###########################################################################
def setparams(self):
""" Set model parameters here """
# kinematic
self.l1 = 3
self.lc1 = 2
# dynamic
self.m1 = 10
self.I1 = 10
self.gravity = 9.81
self.d1 = 50
'''
#################################################################
################## Main ########
#################################################################
'''
if __name__ == "__main__":
""" MAIN TEST """
sys = MyCustomPendulum()
x0 = np.array([0.8,0])
sys.plot_animation( x0 )
|
[
"alx87grd@gmail.com"
] |
alx87grd@gmail.com
|
042afc513c24332f122836a2cec49692b2f77a28
|
7a63ce94e1806a959c9c445c2e0bae95afb760c8
|
/tests/incident/test_resolve.py
|
8ccf653a5dbc4b46fd96837ef309be097512d6e1
|
[
"MIT"
] |
permissive
|
pklauke/pycamunda
|
20b54ceb4a40e836148e84912afd04d78d6ba0ec
|
3faac4037212df139d415ee1a54a6594ae5e9ac5
|
refs/heads/master
| 2023-08-18T10:23:30.503737
| 2022-04-17T18:34:40
| 2022-04-17T18:34:40
| 240,333,835
| 40
| 16
|
MIT
| 2023-09-12T13:29:08
| 2020-02-13T18:37:25
|
Python
|
UTF-8
|
Python
| false
| false
| 1,602
|
py
|
# -*- coding: utf-8 -*-
import unittest.mock
import pytest
import pycamunda.incident
from tests.mock import raise_requests_exception_mock, not_ok_response_mock
def test_resolve_params(engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
assert resolve_incident.url == engine_url + '/incident/anId'
assert resolve_incident.query_parameters() == {}
assert resolve_incident.body_parameters() == {}
@unittest.mock.patch('requests.Session.request')
def test_resolve_calls_requests(mock, engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
resolve_incident()
assert mock.called
assert mock.call_args[1]['method'].upper() == 'DELETE'
@unittest.mock.patch('requests.Session.request', raise_requests_exception_mock)
def test_resolve_raises_pycamunda_exception(engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
with pytest.raises(pycamunda.PyCamundaException):
resolve_incident()
@unittest.mock.patch('requests.Session.request', not_ok_response_mock)
@unittest.mock.patch('pycamunda.base._raise_for_status')
def test_resolve_raises_for_status(mock, engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
resolve_incident()
assert mock.called
@unittest.mock.patch('requests.Session.request', unittest.mock.MagicMock())
def test_resolve_returns_none(engine_url):
resolve_incident = pycamunda.incident.Resolve(url=engine_url, id_='anId')
result = resolve_incident()
assert result is None
|
[
"peter.klauke@tu-dortmund.de"
] |
peter.klauke@tu-dortmund.de
|
b549437484d60e16f3abc854f97caa01baff0c64
|
cfdaf1675a6a6a3c21c163ea48556a82d1f761f7
|
/Actividades/AF04/ventana_principal.py
|
591a0cbb2d104c65690f5adbec995595b09c185e
|
[] |
no_license
|
catalinamusalem/Catalina
|
8114568486f2e6e8b73def164274064de6790bbb
|
e508ccb622e03e543c1a7da6b2c1d4636325b92b
|
refs/heads/master
| 2022-11-28T14:42:55.037042
| 2020-07-05T22:06:01
| 2020-07-05T22:06:01
| 286,140,628
| 0
| 0
| null | null | null | null |
UTF-8
|
Python
| false
| false
| 5,460
|
py
|
import os
import sys
from random import choice
from PyQt5.QtWidgets import QLabel, QWidget, QLineEdit, \
QHBoxLayout, QVBoxLayout, QPushButton
from PyQt5.QtCore import Qt, pyqtSignal
from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QApplication
class VentanaPrincipal(QWidget):
# Aquí debes crear una señal que usaras para enviar la jugada al back-end
senal_enviar_jugada = pyqtSignal(dict)
def __init__(self, *args):
super().__init__(*args)
self.crear_pantalla()
def crear_pantalla(self):
# Aquí deben crear la ventana vacia.
self.setWindowTitle("DCCuent")
# Es decir, agregar y crear labels respectivos a datos del juego, pero sin contenido
nombre = ""
vict = ""
derrot = ""
self.rutai=""
self.rutar=""
self.rutaa=""
self.pixeles_i= ""
self.pixeles_r= ""
self.pixeles_a= ""
self.nombre_usuario= QLabel(nombre,self)
self.victorias =QLabel(vict,self)
self.derrotas = QLabel(derrot,self)
self.infanteria = QLabel("Q", self)
self.rango=QLabel("W",self)
self.artilleria=QLabel("E",self)
self.logo_i = QLabel(self)
self.logo_r = QLabel(self)
self.logo_a = QLabel(self)
pix_i=QPixmap(self.rutai)
pix_r=QPixmap(self.rutar)
pix_a=QPixmap(self.rutaa)
self.logo_i.setPixmap(pix_i)
self.logo_r.setPixmap(pix_r)
self.logo_a.setPixmap(pix_a)
vlayout1 = QVBoxLayout()
vlayout1.addWidget(self.nombre_usuario)
vlayout1.addWidget(self.infanteria)
vlayout1.addWidget(self.logo_i)
vlayout2 = QVBoxLayout()
vlayout2.addWidget(self.victorias)
vlayout2.addWidget(self.rango)
vlayout2.addWidget(self.logo_r)
vlayout3 = QVBoxLayout()
vlayout3.addWidget(self.derrotas)
vlayout3.addWidget(self.artilleria)
vlayout3.addWidget(self.logo_a)
mainlayout = QHBoxLayout()
mainlayout.addLayout(vlayout1)
mainlayout.addLayout(vlayout2)
mainlayout.addLayout(vlayout3)
self.setLayout(mainlayout)
# Si usas layout recuerda agregar los labels al layout y finalmente setear el layout
def actualizar(self, datos):
# Esta es la funcion que se encarga de actualizar el contenido de la ventana y abrirla
# Recibe las nuevas cartas y la puntuación actual en un diccionario
nombre = datos["usuario"]
vict = datos["victorias"]
derrot = datos["derrotas"]
self.pixeles_i= datos["infanteria"]
self.pixeles_r= datos["rango"]
self.pixeles_a= datos["artilleria"]
self.rutai=datos["infanteria"]["ruta"]
self.rutar=datos["rango"]["ruta"]
self.rutaa=datos["artilleria"]["ruta"]
# Al final, se muestra la ventana.
self.show()
def keyPressEvent(self, evento):
# Aquí debes capturar la techa apretara,
# y enviar la carta que es elegida
if evento.text() == "q":
data= self.pixeles_i
self.senal_enviar_jugada.emit(data)
if evento.text() == "w":
data= self.pixeles_r
self.senal_enviar_jugada.emit(data)
if evento.text() == "e":
data= self.pixeles_a
self.senal_enviar_jugada.emit(data)
class VentanaCombate(QWidget):
# Esta señal es para volver a la VentanaPrincipal con los datos actualizados
senal_regresar = pyqtSignal(dict)
# Esta señal envia a la ventana final con el resultado del juego
senal_abrir_ventana_final = pyqtSignal(str)
def __init__(self, *args):
super().__init__(*args)
self.crear_pantalla()
def crear_pantalla(self):
self.setWindowTitle("DCCuent")
self.vbox = QVBoxLayout()
self.layout_principal = QHBoxLayout()
self.label_carta_usuario = QLabel()
self.label_victoria = QLabel()
self.label_carta_enemiga = QLabel()
self.boton_regresar = QPushButton("Regresar")
self.layout_principal.addWidget(self.label_carta_usuario)
self.layout_principal.addWidget(self.label_victoria)
self.layout_principal.addWidget(self.label_carta_enemiga)
self.boton_regresar.clicked.connect(self.regresar)
self.vbox.addLayout(self.layout_principal)
self.vbox.addWidget(self.boton_regresar)
self.setLayout(self.vbox)
def mostrar_resultado_ronda(self, datos):
self.datos = datos
mensaje = datos["mensaje"]
carta_enemiga = datos["enemigo"]
carta_jugador = datos["jugador"]
self.label_carta_usuario.setPixmap(QPixmap(carta_jugador["ruta"]).scaled(238,452))
self.label_carta_enemiga.setPixmap(QPixmap(carta_enemiga["ruta"]).scaled(238,452))
self.label_victoria.setText(mensaje)
self.show()
def regresar(self):
resultado = self.datos["resultado"]
if resultado == "victoria" or resultado == "derrota":
self.senal_abrir_ventana_final.emit(resultado)
else:
self.senal_regresar.emit(self.datos)
self.hide()
if __name__ == "__main__":
def hook(type, value, traceback):
print(type)
print(traceback)
sys.__excepthook__ = hook
a = QApplication(sys.argv)
ventana_principal = VentanaPrincipal()
ventana_principal.show()
sys.exit(a.exec())
|
[
"catalina.musalem@uc.cl"
] |
catalina.musalem@uc.cl
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.