blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
2
616
content_id
stringlengths
40
40
detected_licenses
listlengths
0
69
license_type
stringclasses
2 values
repo_name
stringlengths
5
118
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringlengths
4
63
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
2.91k
686M
star_events_count
int64
0
209k
fork_events_count
int64
0
110k
gha_license_id
stringclasses
23 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
220 values
src_encoding
stringclasses
30 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
2
10.3M
extension
stringclasses
257 values
content
stringlengths
2
10.3M
authors
listlengths
1
1
author_id
stringlengths
0
212
440b99869395c2d799afc7da86ab92b42d80e041
42b2533118529ef6050676c7b2832390d0051dba
/kafka_example.py
cbdc29c9ce68776e5bf1de66127fed18b7f50ac2
[]
no_license
kartikeya-calsoft/mqtt_liota_kafka
1baf5e95a0684d8a64ab1a80eeb56d5d3d060afe
041b1e589376af50fa05d3e9506afbd7a8b8f3f9
refs/heads/master
2021-05-07T22:51:33.994261
2017-10-18T06:28:01
2017-10-18T06:28:01
107,366,636
0
0
null
null
null
null
UTF-8
Python
false
false
3,318
py
import Queue import logging import pint import string from liota.dccs.dcc import RegistrationFailure from liota.device_comms.mqtt_device_comms import MqttDeviceComms from liota.entities.edge_systems.dk300_edge_system import Dk300EdgeSystem from liota.entities.metrics.metric import Metric from liota.lib.utilities.utility import read_user_config # from liota.dccs.graphite import Graphite from liota.dcc_comms.socket_comms import SocketDccComms from kafka import KafkaConsumer from kafka_comms import KafkaDccComms from _kafka import Kafka log = logging.getLogger(__name__) # getting values from conf file config = read_user_config('samplePropMqtt.conf') # Create unit registry ureg = pint.UnitRegistry() # Store temperature values in Queue kafka_data = Queue.Queue() # Callback functions # To put corresponding values in queue def callback_kitchen_temp(client, userdata, message): kitchen_temperature_data.put(float(message.payload)) def callback_living_room_temp(client, userdata, message): living_room_temperature_data.put(float(message.payload)) def callback_presence(client, data, message): presence_data.put(float(message.payload)) def callback_kafka(client, data, message): try: kafka_data.put({str(string.replace(str(message.topic),"/",".")) : str(message.payload)}) #Excluding part before '/' in topic except: pass # Extract data from Queue def get_value(queue): data = kafka_data.get(block=True) print "Got data " print data return data if __name__ == "__main__": # Creating EdgeSystem edge_system = Dk300EdgeSystem(config['EdgeSystemName']) # Connect with MQTT broker using DeviceComms and subscribe to topics # Get kitchen and living room temperature values using MQTT channel kafka = Kafka(KafkaDccComms(ip = config['KafkaIP'], port = str(config['KafkaPort']))) # graphite = Graphite(SocketDccComms(ip=config['GraphiteIP'], # port=int(config['GraphitePort']))) kafka_reg_edge_system = kafka.register(edge_system) # graphite_reg_edge_system = graphite.register(edge_system) mqtt_conn = MqttDeviceComms(url = config['BrokerIP'], port = config['BrokerPort'], identity=None, tls_conf=None, qos_details=None, clean_session=True, keep_alive=config['keep_alive'], enable_authentication=False) mqtt_conn.subscribe(config['MqttChannel1'],0, callback_kafka) try: metric_name = config['MetricName'] content_metric = Metric( name=metric_name, unit=None, interval=1, aggregation_size=1, sampling_function=get_value #this is coming from the xmpp device/server #sampling_function = read_cpu_utilization #sampling_function = random_fun ) reg_content_metric = kafka.register(content_metric) kafka.create_relationship(kafka_reg_edge_system, reg_content_metric) reg_content_metric.start_collecting() except RegistrationFailure: print "Registration to IOTCC failed"
[ "Kartikeya.Bhatnagar@calsoftinc.com" ]
Kartikeya.Bhatnagar@calsoftinc.com
3fe99be4fc9dd1b4475b5099b5f1a26acdddbb8d
f04eed5e6c4499d22fb8e339667267aa59c8dfc7
/MonteCarloSimDraw8balls.py
a5de8664cb39a1bcd8e359a74ce2f9906b98f320
[ "Giftware" ]
permissive
shanjgit/previous-work
4ca7e29e231498891752307ba4b04c9726f0eb67
664cc40bd0b97e3adc10f551e18a4a7a62e5a760
refs/heads/master
2021-01-19T20:02:57.744302
2017-08-24T17:57:55
2017-08-24T17:57:55
101,217,213
2
0
null
null
null
null
UTF-8
Python
false
false
699
py
import random def drawing_without_replacement_sim(numTrials): ''' Runs numTrials trials of a Monte Carlo simulation of drawing 3 balls out of a bucket containing 4 red and 4 green balls. Balls are not replaced once drawn. Returns a float - the fraction of times 3 balls of the same color were drawn in the first 3 draws. ''' suc = 0.0 for i in xrange(numTrials): box = [0,0,0,0,1,1,1,1] draw = [] for j in xrange(3): x = random.choice(box) draw.append(x) box.remove(x) if (draw == [0,0,0] or draw == [1,1,1]): suc += 1 return suc/float(numTrials)
[ "noreply@github.com" ]
shanjgit.noreply@github.com
5f4e30912f841b906361f0e772695f3f6d5b2393
393a2545700bd2d217dc2fd85a10d1490cfd36dd
/LeetCode/find_min_in_rotated_sorted_array_2.py
b1069020d04be9657e02caf8ad439134cea8b1d7
[]
no_license
Jfeng3/careercup
3b12d0c2f5b1b7ef317c32cf38760dad90508995
3087e67b8d44ebdca68b6face7c7b6b991f45d70
refs/heads/master
2020-05-15T12:03:43.867295
2015-01-27T20:55:58
2015-01-27T20:55:58
29,100,566
0
1
null
null
null
null
UTF-8
Python
false
false
884
py
class Solution: # @param num, a list of integer # @return an integer def findMin(self, num): return self.findMin_re(num,0,len(num)-1) def findMin_re(self,num,start,end): if start == end: return num[start] if start+1 == end: if num[start]<=num[end]: return num[start] else: return num[end] mid = start + (end-start)/2 if num[end]>num[mid]: return self.findMin_re(num,start,mid) elif num[end]<num[mid]: return self.findMin_re(num,mid+1,end) elif num[start]>num[mid]: return self.findMin_re(num,start+1,mid) elif num[start]<num[mid]: return self.findMin_re(num,start,mid-1) else: return min(self.findMin_re(num,start,mid),self.findMin_re(num,mid+1,end))
[ "jfeng1115@gmail.com" ]
jfeng1115@gmail.com
ac68c34a9df77b38ee0be71b8c371854aa47da18
142fd48d2c09bc83ba31b96553fc6d27fad596a3
/v1/202.happy-number.132775164.ac.py
76ae51a59bdadd727573d185296fe6de77a038ba
[]
no_license
goalong/lc
baaa8ecc55ecdb136271687d21609832f32ccf6e
7b45d500e65c759cc2e278d33d9d21925a713017
refs/heads/master
2021-10-28T03:40:23.534592
2019-04-21T14:29:47
2019-04-21T14:29:47
111,088,996
6
0
null
null
null
null
UTF-8
Python
false
false
1,284
py
# # [202] Happy Number # # https://leetcode.com/problems/happy-number/description/ # # algorithms # Easy (41.39%) # Total Accepted: 153.8K # Total Submissions: 371.6K # Testcase Example: '1' # # Write an algorithm to determine if a number is "happy". # # A happy number is a number defined by the following process: Starting with # any positive integer, replace the number by the sum of the squares of its # digits, and repeat the process until the number equals 1 (where it will # stay), or it loops endlessly in a cycle which does not include 1. Those # numbers for which this process ends in 1 are happy numbers. # # Example: 19 is a happy number # # # 12 + 92 = 82 # 82 + 22 = 68 # 62 + 82 = 100 # 12 + 02 + 02 = 1 # # # Credits:Special thanks to @mithmatt and @ts for adding this problem and # creating all test cases. # class Solution(object): def isHappy(self, n): """ :type n: int :rtype: bool """ # 3 star. memo = set() while n not in memo: memo.add(n) n = self.get_next(n) if n == 1: return True return False def get_next(self, num): num_list = list(str(num)) rs = sum([int(i)*int(i) for i in num_list]) return rs
[ "along@myw-vicdeiMac.local" ]
along@myw-vicdeiMac.local
cdfb76942b472660a93bac1e3dc180069042129d
2dc7cee4cde492d6a7bbe253e95a7b9e9601cc59
/config.py
2fb86f6e715e6622bdd67b4132c7c8812a46e1a4
[]
no_license
qylshy/myproject
5a7db09553b6e4d4fa21cdb08afc4ee38b6fdefb
6c0f4f6b5d98df8e192f64e2390f934ced9ffacb
refs/heads/master
2020-05-28T11:08:54.043512
2019-05-28T07:46:08
2019-05-28T07:46:08
188,979,787
0
0
null
null
null
null
UTF-8
Python
false
false
672
py
import os basedir = os.path.abspath(os.path.dirname(__file__)) class Config(object): SECRET_KEY = os.environ.get('SECRET_KEY') or 'you-will-never-guess' SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL') or \ 'sqlite:///' + os.path.join(basedir, 'app.db') SQLALCHEMY_TRACK_MODIFICATIONS = False MAIL_SERVER = os.environ.get('MAIL_SERVER') MAIL_PORT = int(os.environ.get('MAIL_PORT') or 25) MAIL_USE_TLS = os.environ.get('MAIL_USE_TLS') is not None MAIL_USERNAME = os.environ.get('MAIL_USERNAME') MAIL_PASSWORD = os.environ.get('MAIL_PASSWORD') ADMINS = ['929130707@qq.com'] POSTS_PER_PAGE = 3
[ "qiuyunlong@bytedance.com" ]
qiuyunlong@bytedance.com
2c8f96dfd60e771a4512c4b9b459a21ff197f9ae
e04c3af194afacf7e454eb63a1f917c0df46698d
/MAST/test/workflow_test/workflow_setup.py
4fecd61563ecda63785f435c50709a593de50be3
[ "MIT" ]
permissive
kcantosh/MAST
050716de2580fe53cf241b0d281a84f13175b542
4138b87e5a1038eb65023232f80907333d3196f2
refs/heads/dev
2021-01-20T16:51:22.759949
2017-01-31T16:40:45
2017-01-31T16:40:45
82,833,665
0
1
null
2017-02-22T17:34:13
2017-02-22T17:34:13
null
UTF-8
Python
false
false
4,053
py
############################################################## # This code is part of the MAterials Simulation Toolkit (MAST) # # Maintainer: Tam Mayeshiba # Last updated: 2016-02-08 ############################################################## ############################################################## # Requirements: # 1. Home directory access from where the test will be run # 2. MAST installation ############################################################## import os import time import shutil import numpy as np from MAST.utility import MASTError from MAST.utility import dirutil from MAST.utility import MASTFile import MAST import subprocess testname ="workflow_test" testdir = dirutil.get_test_dir(testname) checkname = os.path.join(testdir, "WORKFLOW_CONFIG") def verify_checks(): checkfile=MASTFile(checkname) for myline in checkfile.data: if "Check" in myline: checkresult = myline.split(":")[1].strip()[0].lower() if checkresult == 'y': print "Checks okay" else: raise MASTError("verify checks","Checks for workflow setup not verified. Check %s" % checkname) return def get_variables(): verify_checks() myvars=dict() checkfile=MASTFile(checkname) for myline in checkfile.data: if myline[0:9] == "workflow_": mykey = myline.split("=")[0].strip() myval = myline.split("=")[1].strip() myvars[mykey] = myval return myvars def create_workflow_test_script(inputfile): myvars = get_variables() # set up testing directory tree wtdir=myvars['workflow_test_directory'] mast_test_dir=os.path.join(wtdir,"no_directory_yet") while not (os.path.isdir(mast_test_dir)): timestamp=time.strftime("%Y%m%dT%H%M%S") mast_test_dir = os.path.join(wtdir,"output_test_%s" % timestamp) if not (os.path.isdir(mast_test_dir)): shutil.copytree("%s/mini_mast_tree" % wtdir, mast_test_dir) # set up output file and submission script shortname = inputfile.split(".")[0] output="%s/output_%s" % (wtdir, shortname) submitscript="%s/submit_%s.sh" % (wtdir, shortname) generic_script="%s/generic_mast_workflow.sh" % wtdir bashcommand="bash %s %s %s %s %s %s >> %s" % (generic_script, mast_test_dir, myvars["workflow_examples_located"], inputfile, myvars["workflow_activate_command"], myvars["workflow_testing_environment"], output) submitfile=MASTFile() submitfile.data.append(bashcommand + "\n") submitfile.to_file(submitscript) return [mast_test_dir, submitscript, output] def generic_submit(inputfile): [mast_test_dir, submitscript, outputname] = create_workflow_test_script(inputfile) mygsub = "bash %s" % submitscript gproc = subprocess.Popen(mygsub, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) gproc.wait() if not (os.path.isfile(outputname)): print "Sleep 5" time.sleep(5) if not (os.path.isfile(outputname)): raise OSError("Test did not create output %s" % outputname) print "Output %s created" % outputname waitct=0 tailcmd = "tail -n 3 %s" % outputname maxwait=502 while waitct < maxwait: tail3proc=subprocess.Popen(tailcmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) tail3=tail3proc.communicate()[0] tail3proc.wait() for tailline in tail3.split("\n"): if "Workflow completed" in tailline: return ["Completed", mast_test_dir] time.sleep(30) waitct = waitct + 1 print "Output not complete. Attempt %i/%i" % (waitct, maxwait) return ["Unfinished", mast_test_dir] def get_finished_recipe_dir(mast_test_dir): trydirs=os.listdir(os.path.join(mast_test_dir,"ARCHIVE")) for trydir in trydirs: trypath=os.path.join(mast_test_dir,"ARCHIVE",trydir) if (os.path.isdir(trypath)): return trypath return ""
[ "mayeshiba@wisc.edu" ]
mayeshiba@wisc.edu
79477d7ab3de33f495c52b4c124955dd2490a742
5b6ff2aaad93717f68ec9babbee59234536cb6a4
/AddField.py
621a27f3ab062cceb94485888e8db269cb33ec7e
[]
no_license
byrash/py
46db32a29a2fffe1f2c854fd09b3451ee6b5b98d
8d532585b015d6304dcca3ccda6d82c18f2f57ac
refs/heads/master
2021-03-22T05:20:55.517051
2017-10-25T05:41:25
2017-10-25T05:41:25
107,758,596
0
0
null
null
null
null
UTF-8
Python
false
false
459
py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sat Oct 14 13:12:34 2017 @author: Shivaji """ import os import glob import pandas def addField(indir="/Users/Shivaji/tmp/extracted"): os.chdir(indir) fileList=glob.glob("*") for fileName in fileList: df = pandas.read_csv(fileName, sep='\s+',header=None) df["Station"]=[fileName.rsplit("-",1)[0]]*df.shape[0] df.to_csv(fileName+".csv",index=None,header=None)
[ "shivaji.byrapaneni@gmail.com" ]
shivaji.byrapaneni@gmail.com
aa5ce4f7642dc7712a7bdca6f0c2ed9b99d4fdac
d2a9ff16cdbcc97a65ae01cdcd79be4d560ef7c9
/homework5/db.py
7d644cfc342b43a270440041a3cc25410afcffe5
[]
no_license
humantom88/geekbrains-data-scraping
af3d7231f83ceb86992f01956b9d2184aa560b28
b02d68788be8a4d500b5433ec6a89f5583864061
refs/heads/master
2022-11-06T10:14:25.515797
2020-06-28T14:08:54
2020-06-28T14:08:54
268,997,676
0
1
null
null
null
null
UTF-8
Python
false
false
876
py
from pymongo import MongoClient, errors from pprint import pprint import zlib client = MongoClient('localhost', 27017) db = client['mails_db'] mails_db = db.mails db2 = client['goods_db'] goods_db = db2.goods def make_hash(item): return zlib.adler32(bytes(repr(item), 'utf-8')) def save_mails_to_db(mails_list): for mail in mails_list: mail_hash = make_hash(mail) mail["_id"] = mail_hash try: mails_db.insert_one(mail) except errors.DuplicateKeyError: print("Duplicate found for mail: ", mail) pass def save_goods_to_db(goods_list): for good in goods_list: good_hash = make_hash(good) good["_id"] = good_hash try: goods_db.insert_one(good) except errors.DuplicateKeyError: print("Duplicate found for good: ", good) pass
[ "humantom88@gmail.com" ]
humantom88@gmail.com
18757dc913ff5692065e3d0722d1a414217f341e
ad668acbbbf321db2dcbf2cc5a330387df814531
/MyEscapades/coordinateCombat.py
e1eaa6b57bfdcf3a8849cd3e205a35842bc8bf56
[]
no_license
LIHTU/mc_python_files
65969323866dd87bde3ddc97d47dc2dce7e6642e
d0408eea3adf59249ba0742e4c9101a42eb8e6c2
refs/heads/master
2020-12-24T13:16:54.389643
2016-03-20T22:00:15
2016-03-20T22:00:15
35,831,326
4
0
null
null
null
null
UTF-8
Python
false
false
521
py
# Coordinate Combat # In this minigame the user will be prompted to # defend themself against mobs by entering in coordinates # for defensive strucutes and weapons, such as fire or # trap pits, or TNT. ''' 1. figure out if we can turn mobs on, and make them hostile. 2. learn and implement chat commands. 3. Design minigame and sequencing. where which mobs 4. Can we detect whether a mob entity is dead or alive? 5. Maybe we could simulate a mob with moving block monsters, like the ufo in adventure 8. '''
[ "robinanelson@gmail.com" ]
robinanelson@gmail.com
696c4f4e21c3eb7fe7ea3890ef830d042a91d421
c2c86157ae2a4031d79b6f71b3f3cdcad913a87d
/matlabfiles/.svn/text-base/runonebyone.py.svn-base
cad7cd2195024b4366b6e4d261ec8176d6c025ed
[]
no_license
fishdda/Automatic-Radiation-Treatment-Planning-System-
e3875e7d17e96e488c7d678d70da4411213a98c6
42eba3a27e62e53907c782f01585abb0de15d7e4
refs/heads/master
2020-05-16T16:58:28.804199
2014-10-04T16:05:44
2014-10-04T16:05:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
144
import os; cases= [5,25,104,108,113,123,208,30,79]; for c in cases: cmd = './PrioritizedNlp cases' +str(c)+'.txt'; print cmd os.system(cmd);
[ "parastiwari@Paras-Tiwaris-MacBook-Pro.local" ]
parastiwari@Paras-Tiwaris-MacBook-Pro.local
389ce0bd3e07869ffa7d5d82fc97f0e6114b317e
1740075fca5d99eee47d8ab10e918be07f544d55
/catalog/migrations/0002_auto_20191107_1239.py
bd20f9edcd6d8711f45f088ad0c948df3acd2e3a
[]
no_license
Grayw0lf/local_library
0933bd5d35ef64ee4dc90dd0cdd83686a8eeed3a
652f0260bfd153138eaee24810685c52f4063b07
refs/heads/master
2023-04-30T10:23:38.048841
2019-11-13T21:10:09
2019-11-13T21:10:09
221,551,305
1
0
null
2023-04-21T20:40:05
2019-11-13T21:03:49
Python
UTF-8
Python
false
false
688
py
# Generated by Django 2.2.7 on 2019-11-07 09:39 from django.db import migrations, models import uuid class Migration(migrations.Migration): dependencies = [ ('catalog', '0001_initial'), ] operations = [ migrations.RenameField( model_name='author', old_name='date_of_died', new_name='date_of_death', ), migrations.AlterField( model_name='bookinstance', name='id', field=models.UUIDField(default=uuid.UUID('976d9b8b-7c2f-4e07-9879-78d7f1d2fe11'), help_text='Unique ID for this particular book across whole library', primary_key=True, serialize=False), ), ]
[ "akosheev@rambler.ru" ]
akosheev@rambler.ru
8eb5717f4d3e6a853f98ec128fe586d28b3c4c9f
0e29d70a54fa89cb7cb468529c8601d0ddf7b1b5
/date_migrate.py
5d78960a9634fe1cc69bd81af1c79ded42e0293e
[]
no_license
BishopJustice/MultiPage
f715d845137ed844d789b705a783b996ddb8f5a8
fbd3858790485abfb8120618cd936fd94e61d61d
refs/heads/master
2020-07-03T19:18:09.147296
2016-10-24T23:26:20
2016-10-24T23:26:20
66,682,896
0
0
null
null
null
null
UTF-8
Python
false
false
722
py
from app.models import User, Item from app import db import datetime from dateutil import parser # users = db.session.query(User).all() # for each in users: # if each.joined: # each.joined = datetime.datetime.strptime(each.joined, '%Y/%m/%d %H:%M:%S') items = db.session.query(Item).all() # for each in items: # print each.opened_at.date() for each in items: if each.opened_at: each.opened_at = parser.parse(each.opened_at) db.session.add(each.opened_at) print type(each.opened_at) if each.resolved_at: # each.resolved_at = parser.parse(each.resolved_at) # db.session.add(each) print type(each.resolved_at) db.session.commit() print "Done!"
[ "luke@lyft.com" ]
luke@lyft.com
1b6205e42264a320af0acc5001e8365d4e80aa70
bda9a317e22707e51e1f78f4ffca8205750f6d95
/mapbox/app/app/app.py
0e53e92e233cfc15117da47a1e4a8d72a0cba2c2
[]
no_license
elroypeter/SeniorProject
0395e8ffc977ea0f917a8525b5b85ca696fcca19
238b8e3c091b0294f620e6db68e897d8b8598ec3
refs/heads/master
2020-04-05T11:17:04.943758
2018-11-05T20:09:43
2018-11-05T20:09:43
null
0
0
null
null
null
null
UTF-8
Python
false
false
683
py
from flask import Flask, render_template from werkzeug import secure_filename from app import app, db, models import os, sys, requests @app.route('/',methods=['GET','POST']) def my_maps(): # TODO: need to change this to be dynamic response = requests.get('http://dblayer:80/jsonData/test.json') return render_template('index.html', data = response.json()) @app.route('/recommendations', methods=['GET','POST']) def recommendation(): return render_template('recommendations.html') @app.route('/urgent', methods=['GET','POST']) def urgent(): return render_template('urgent.html') @app.route('/admin', methods=['GET','POST']) def admin(): return render_template('admin.html')
[ "j.a.cochran.cs@gmail.com" ]
j.a.cochran.cs@gmail.com
522f7b5624afc3f1cd74452502167aa8d9f5b6d9
14be624679f0bd4521989f26263bf1803b2afba5
/Python3/URI1041.py
ad2c32cc607832a0ec4655cd4e04c6b9a0005934
[]
no_license
axelaviloff/uri-solutions
2521e792bbedba23a8320ced3e9c05bf5af8f7e0
c51b1122b608d65298cff26f3c1ad87ec059e2d0
refs/heads/master
2023-05-13T22:17:39.323316
2020-10-27T12:37:21
2020-10-27T12:37:21
null
0
0
null
null
null
null
UTF-8
Python
false
false
362
py
coordenadas = input() x, y = coordenadas.split(" ") x = float(x) y = float(y) if x > 0 and y > 0: print("Q1") elif x > 0 and y < 0: print("Q4") elif x < 0 and y < 0: print("Q3") elif x < 0 and y > 0: print("Q2") elif x == 0 and (y > 0 or y < 0): print("Eixo Y") elif (x > 0 or x < 0) and y == 0: print("Eixo X") else: print("Origem")
[ "axel.aviloff@estudante.uffs.edu.br" ]
axel.aviloff@estudante.uffs.edu.br
8b29ce161cc0486a4b357fc0b7e9a4eff0014b1b
636e304830d60907c778634df346a42399631e7d
/webots-project/controllers/pos-prediction/predictors/predictor_NN.py
3c6cc3f45a5ebf2afc72941669fe0e87e7a0dd94
[ "MIT" ]
permissive
varun-projects/webots-thesis
8784807b42a35dbe00040c3f903cdd4b86251338
c18c53b281af6c68431b9b3abde07d1934c37dd9
refs/heads/master
2023-01-14T12:16:37.984530
2020-10-06T14:00:26
2020-10-06T14:00:26
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,443
py
from keras import models from keras import layers import numpy as np import math import pickle import os.path from keras.models import load_model class PredictorNN: def __init__(self, data_collector): self.percentageTraining = .8 self.dc = data_collector data = self.dc.get_data_frame() # delete NA examples data = data.dropna() # if model exists load it otherwise create it if os.path.isfile('train_data_model_NN.h5'): self.inputs = data[['x', 'y', 'theta']] self.output = data[['sensor_1', 'sensor_2', 'sensor_3', 'sensor_4', 'sensor_5', 'sensor_6', 'sensor_7', 'sensor_8']] self.model = load_model('train_data_model_NN.h5') self.inputs_max = self.inputs.max() self.inputs_min = self.inputs.min() self.output_max = self.output.max() self.output_min = self.output.min() def create_model(self, train_data, train_targets, test_data, test_targets): k = 4 num_val_samples = len(train_data) // k num_epochs = 100 all_scores = [] all_mae_histories = [] model = self.build_model(train_data) history = model.fit(train_data, train_targets, epochs=num_epochs, batch_size=1) model.save('train_data_model_NN.h5') f = open('history.pckl', 'wb') pickle.dump(history, f) f.close() mae_history = history.history['mean_absolute_error'] val_mse, val_mae = model.evaluate(test_data, test_targets) all_scores.append(val_mae) all_mae_histories.append(mae_history) print('Scores of the k-fold', all_scores) print('Saving all scores and mae histories in local files') # save f = open('all_scores.pckl', 'wb') pickle.dump(all_scores, f) f.close() f = open('all_mae_histories.pckl', 'wb') pickle.dump(all_mae_histories, f) f.close() return model def normalize_data(self, train_data, test_data): mean = train_data.mean(axis=0) train_data -= mean std = train_data.std(axis=0) train_data /= std test_data -= mean test_data /= std def build_model(self, train_data): model = models.Sequential() model.add(layers.Dense(64, activation='relu', input_shape=(train_data.shape[1],))) model.add(layers.Dense(64, activation='relu')) model.add(layers.Dense(8)) model.compile(optimizer='rmsprop', loss='mse', metrics=['mae']) return model def normalize_inputs(self, inputs): return (inputs - self.inputs_min)/(self.inputs_max - self.inputs_min) def denormalize_output(self, outputs): return outputs*(self.output_max - self.output_min)+self.output_min def prediction_error(self, x, y, theta, sensors): features = self.normalize_inputs(np.array([x, y, theta])) pre_sensors = self.denormalize_output(self.model.predict(np.array([features]))[0]) err = 0 n_sensors = len(sensors) bad_data = True # print(true_dist) for ix, elem in enumerate(pre_sensors): if not math.isnan(sensors[ix]): bad_data = False # print('err', elem) # print('true', true_dist[ix]) err += (elem - sensors[ix]) ** 2 return 1/err, bad_data
[ "joan.sebastian.gerard@gmail.com" ]
joan.sebastian.gerard@gmail.com
00dd467e4af16d74877dc97ac0f1085d681b0a8c
0b79018d98ca5ae4e4eeae5d7be4bf43381739c4
/backend/src/scrapper/test.py
b46529b98e1976db4d7152217da18cfa6be877cc
[]
no_license
tsuasian/Dragon-Course-Review
1560c4da58417eef4e34ce5fa65f2f033800655d
127e1d3f2fa879b9542eaec54007ce0879aec37c
refs/heads/master
2023-08-24T15:53:55.246060
2021-10-28T08:11:57
2021-10-28T08:11:57
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,434
py
import requests import bs4 import lxml import os import json as json base_url = 'https://termmasterschedule.drexel.edu' home_res = requests.get('https://termmasterschedule.drexel.edu/webtms_du/app') home_soup = bs4.BeautifulSoup(home_res.text,'lxml') f = open("course_links.txt","w") """ What: Extract classes from the table returned when follow subject link How: Found a very specific attribute width=99% that only that table elements has. Select "even" and "odd" classnames and parse their content, ignoring Day/Time column Return: List of classes for a required subject """ def get_rows(soup): result = soup.find_all('table', attrs={"width": "99%"}) arr = [] for r in result: even = r.select(".even") odd = r.select(".odd") arr.append(even) arr.append(odd) even_rows = arr[0] odd_rows = arr[1] total_rows = even_rows + odd_rows tt = [] for i in total_rows: tds = i.find_all('td') if len(tds) > 5: tt.append(i) return tt """ What: Extract links based on selector How: Search soup object based on selector. Search result for <a> tags and extract "href" link and tag content. Assemble a dictionary with tag_content as key and "href" as value pairs Return: Dictionary with "tag_content": "href" structure """ def get_links(soup,selector): result = soup.select(selector) tmp = [] for block in result: tmp.append(block.find_all('a',href=True)) link_objects = tmp[0] links = {} for link in link_objects: link_value = link.contents[0] link_url = link.get('href') links[link_value] = link_url return links """ What: Extract one level down links tree for term -> college links How: Loops through dictionary and follow links in the values. Calls to get_links*() to get sublinks. Assemble nested dictionary with structure: { term: { college : "href" } } Return: One level down nested dictionary """ def term_level_tree(current_tree): tmp = {} for term in current_tree: res = requests.get(base_url + current_tree[term]) college_soup = bs4.BeautifulSoup(res.text,'lxml') college_links = get_links(college_soup,'#sideLeft') tmp[term] = college_links return tmp """ What: Extract two level down links tree for term -> college -> subject links How: Loops through dictionary and follow links in the values. Calls to get_links*() to get sublinks. Assemble nested dictionary with structure: { term: { college : { subject: "href" } } } Return: Two levels down nested dictionary """ def college_level_tree(current_tree): tmp1 = {} for term in current_tree: tmp2 = {} for college in current_tree[term]: res = requests.get(base_url + current_tree[term][college]) subject_soup = bs4.BeautifulSoup(res.text,'lxml') subject_links = get_links(subject_soup,'.collegePanel') tmp2[college] = subject_links tmp1[term] = tmp2 return tmp1 """ What: Extract string representative of a row of class information from three(final) levels down links tree for term -> college -> subject -> class For testing purposes print() instead of writing to file. How: Loops through dictionary and follow links in the values. Calls to get_links*() to get sublinks. When reached the bottom level, search table data for values and assemble a string. For testing output is redirected to out.txt file Return: """ def subject_level_tree_print(current_tree): f = open("a.out", "w") for term in current_tree: for college in current_tree[term]: for subject in current_tree[term][college]: res = requests.get(base_url + current_tree[term][college][subject]) class_soup = bs4.BeautifulSoup(res.text,'lxml') r = get_rows(class_soup) for i in r: tds = i.find_all('td') write_string = "" for c in i: try: if len(c.contents) == 1: write_string = write_string + "|" + c.contents[0] except: pass f.write(write_string + "\n") print(write_string) f.close() def main(): term_links = get_links(home_soup,'.termPanel') for l in term_links: write_string = l + ": " + term_links[l] + "\n" f.write(write_string) f.close() #print(term_links) winter_link_tree = {} winter_link_tree['Winter Quarter 20-21'] = term_links['Winter Quarter 20-21'] term_college_tree = term_level_tree(winter_link_tree) print(term_college_tree) college_subject_tree = college_level_tree(term_college_tree) subject_level_tree_print(college_subject_tree) main()
[ "tchang@gmail.com" ]
tchang@gmail.com
2a500a425eb1abbc023f928e0a265bbc37889d78
64fc5dfec9a6f7b31c224286321899f5103d3983
/duckworthd/mining.py
f2127b917aeb1fa70482dce7c25ce5b13176311f
[]
no_license
espoirMur/public-DKHQ_GlobalWitness
68aaaaef52a1b05773ded143060a0c5f45c14c6a
e0b0d2b669faa1cb6b3cc86791ff5ce306c1cfcb
refs/heads/master
2020-04-17T17:22:23.323979
2018-10-11T19:50:42
2018-10-11T19:50:42
null
0
0
null
null
null
null
UTF-8
Python
false
false
17,678
py
"""Utilities for working with satellite imagery for mining.""" import json import os import re import string import tempfile import urllib import zipfile from matplotlib import pyplot as plt import bcolz import ee as earth_engine earth_engine.Initialize() import gdal import h5py import numpy as np import pandas as pd import geopandas as gpd # Default directory containing images. DEFAULT_IMAGE_ROOT = '/workspace/lab/data_s3/mines_ipis' # Fusion Table ID containing polygons around mining sites. DEFAULT_IPIS_MINING_POLYGONS = 'ft:1HG3R3cebqMp2yK0cOimTL7wLnh41c1DH24GyWQg1' # Images with 4 axes. The first two are typical for images -- x, y. The # third is color band (traditionally RGB, but Landsat captures more). The # fourth is time, representing when the image was captured. X_AXIS = 0 Y_AXIS = 1 BAND_AXIS = 2 TIME_AXIS = 3 def load_ipis_mining_sites_dataset(): """Load all mining sites annotated by IPIS from FusionTable as GeoJSON.""" return earth_engine.FeatureCollection('ft:1P1f-A2Sl44YJEqtD1FvA1z7QtDFsRut1QziMD-nV').getInfo() def _get_metadata_file_path(image_root): """Get absolute path to metadata.json file in a given directory. If there are more than one metadata.json files, pick the last one after sorting. """ if not os.path.exists(image_root): raise ValueError( u'%s does not exist. No metadata files found.' % image_root) filenames = os.listdir(image_root) metadata_filenames = [name for name in filenames if 'metadata' in name] if not metadata_filenames: raise ValueError( u'No files with "metadata" in name found under %s' % image_root) metadata_filename = list(sorted(metadata_filenames))[-1] return os.path.join(image_root, metadata_filename) def load_metadata(image_root=None): """Load JSON file storing image metadata from disk. If no JSON file can be found, an empty DataFrame is returned. """ image_root = image_root or DEFAULT_IMAGE_ROOT try: fpath = _get_metadata_file_path(image_root) except ValueError: return pd.DataFrame( columns=["bands", "collection", "dates", "dim", "fpath", "id"]) with open(fpath) as f: return pd.DataFrame(json.load(f)) def save_metadata(image_root, metadata): """Store DataFrame containing image metadata to disk.""" if not os.path.exists(image_root): os.makedirs(image_root) with open(os.path.join(image_root, "metadata4.json"), "w") as f: return metadata.to_json(f) def merge_metadata(old_metadata, new_metadata): """Merge two metadata DataFrames.""" # Remove all rows from 'old_metadata' that have the same path as in 'new_metadata' old_metadata = old_metadata[~old_metadata['fpath'].isin( new_metadata['fpath'])] # Concatenate new and old together. return pd.concat([old_metadata, new_metadata], ignore_index=True) def load_image(img_metadata, image_root=None): """Load a single image from disk.""" image_root = image_root or DEFAULT_IMAGE_ROOT fname = os.path.join(image_root, img_metadata['fpath']) return bcolz.open(fname)[:] def geodataframe_to_earthengine(geodataframe): """Converts a GeoDataFrame to an ee.FeatureCollection.""" geojson_str = geodataframe.to_json() geojson = json.loads(geojson_str) return geojson_to_earthengine(geojson) def geojson_to_earthengine(geojson): """Converts a GeoJSON dict to an Earth Engine type. Args: geojson: GeoJSON-supported object as a nested dict/list/tuple. Returns: A matching type that Earth Engine understands (e.g. ee.FeatureCollection, ee.Geometry.Point). """ if isinstance(geojson, dict): if 'type' not in geojson: raise ValueError("Not 'type' attribute in geojson: %s" % (geojson,)) if geojson['type'] == 'FeatureCollection': return earth_engine.FeatureCollection( geojson_to_earthengine(geojson['features'])) elif geojson['type'] == 'Feature': return earth_engine.Feature( geojson_to_earthengine(geojson['geometry']), geojson['properties']) elif geojson['type'] == 'Point': return earth_engine.Geometry.Point(coords=geojson['coordinates']) elif geojson['type'] == 'Polygon': return earth_engine.Geometry.Polygon( coords=geojson['coordinates'], geodesic=geojson.get('geodesic', None)) raise ValueError("Unsupported GeoJSON dict type: %s" % geojson['type']) elif isinstance(geojson, list): return [geojson_to_earthengine(element) for element in geojson] elif isinstance(geojson, tuple): return tuple(geojson_to_earthengine(element) for element in geojson) elif type(geojson) in [int, float, str, unicode]: return geojson else: raise ValueError("Unable to parse type: %s" % type(geojson)) def to_earthengine_featurecollection(obj): """Converts an object to an ee.FeatureCollection. 'obj' can be one of: - str: a Fusion Table ID ("ft:xxx") - GeoDataFrame - GeoJSON dict of type 'FeatureCollection' """ # If string, load FeatureCollection using Earth Engine. if isinstance(obj, basestring): return earth_engine.FeatureCollection(obj) # If GeoDataFrame, convert to ee.FeatureCollection. if isinstance(obj, gpd.GeoDataFrame): return geodataframe_to_earthengine(obj) # If GeoJSON, convert to ee.FeatureCollection. if isinstance(obj, dict): assert 'type' in obj assert obj['type'] == 'FeatureCollection' return geojson_to_earthengine(obj) def load_image_mask(img_metadata, ipis_mining_sites=None, ipis_mining_polygons=None, image_root=None): """Load binary mask labeling pixels as "mining" or "not mining". Args: img_metadata: pd.Series from a metadata.json file. ipis_mining_sites: FeatureCollection GeoJSON dict containing all IPIS mining site locations as Points. ipis_mining_polygons: Object that can be converted to an ee.FeatureCollection. See to_earthengine_featurecollection() for available options. Default's to Sina's Fusion Table. image_root: string. unused? Returns: numpy array of shape [100, 100] with values {0, 1}, where 0.0 == no mine and 1.0 == mine, centered at the location described by img_metadata. """ # If None, use the Fusion Table containing mining sites that Sina created. if ipis_mining_sites is None: ipis_mining_polygons = DEFAULT_IPIS_MINING_POLYGONS ipis_mining_polygons = to_earthengine_featurecollection(ipis_mining_sites) ipis_mining_image = ipis_mining_polygons.reduceToImage( properties=['mine'], reducer=earth_engine.Reducer.first()) # earth_engine.Image() type # Get Point corresponding to this image from IPIS dataset. roi_id = img_metadata['id'] if ipis_mining_sites is None: ipis_mining_sites = load_ipis_mining_sites_dataset() roi = ipis_mining_sites['features'][roi_id]['geometry'] assert roi['type'] == 'Point' # Create a circle around the point with a given buffer size (in meters). buff = 1500 # radius of 1500 meters about the point. roi_point = earth_engine.Geometry.Point(roi['coordinates']) roi_buff = earth_engine.Geometry.buffer(roi_point, buff) # ee.Geometry() roi_buff = roi_buff.getInfo() # GeoJSON dict # Download image containing circle from Earth Engine. scale = 30 # 30 meters/pixel --> circle with 100 pixel diameter. mask = load_map_tile_containing_roi( ipis_mining_image, roi_buff['coordinates'], scale=scale) # Some images are 101 x 101, some are 100 x 100. Let's ensure they're all # 100 x 100. mask = mask[:100, :100] assert mask.shape[2] == 1, 'Mask has > 1 band.' return mask.reshape(mask.shape[0], mask.shape[1]) def load_map_tile_containing_roi(image, roi, scale=30): """Get rasterized image containing ROI from Earth Engine. Constructs a rasterized image tile subsetting 'image'. The image is large enough to fully contain the polygon described by 'roi', and will contain one pixel per 'scale' m^2 area. Args: image: ee.Image instance. To be used as mask. Must have exactly 1 band. roi: Triple-nested list of floats, where lowest level is [longitude, latitude] pairs from 'coordinates' of a GeoJSON polygon. scale: int. Number of squared meters per pixel. Returns: numpy array of shape [N x M x K], where N is width, M is height, and K is number of bands. """ # Generate a random filename. filename = ''.join(np.random.choice(list(string.ascii_letters), size=10)) # Download image containing ROI. url = earth_engine.data.makeDownloadUrl( earth_engine.data.getDownloadId({ 'image': image.serialize(), 'scale': '%d' % scale, 'filePerBand': 'false', 'name': filename, 'region': roi })) local_zip, headers = urllib.urlretrieve(url) with zipfile.ZipFile(local_zip) as local_zipfile: local_tif_filename = local_zipfile.extract( filename + '.tif', tempfile.mkdtemp()) # Read image into memory. Result has shape [x, y, color bands]. dataset = gdal.Open(local_tif_filename, gdal.GA_ReadOnly) bands = [dataset.GetRasterBand(i + 1).ReadAsArray() for i in range(dataset.RasterCount)] return np.stack(bands, axis=2) def save_images(image_root, images, metadata): """Store a list of images to disk.""" assert len(images) == len(metadata) if not os.path.exists(image_root): os.makedirs(image_root) for (img, (_, img_metadata)) in zip(images, metadata.iterrows()): save_image(image_root, img, img_metadata) def save_image(image_root, img, img_metadata): """Store a single image to disk.""" if not os.path.exists(image_root): os.makedirs(image_root) fname = os.path.join(image_root, img_metadata['fpath']) dname = os.path.dirname(fname) if not os.path.exists(dname): os.makedirs(dname) c = bcolz.carray(img, rootdir=fname, mode='w') c.flush() def save_images_with_hdf5(image_root, images, metadata): assert len(images) > 0, "Must have 1+ images to write." # Make directory if necessary. if not os.path.exists(image_root): os.makedirs(image_root) # Construct an empty HDF5 dataset on disk. image_shape = images[0].shape initial_images_shape = (len(images),) + image_shape max_images_shape = (None,) + image_shape with h5py.File(os.path.join(image_root, "images.h5"), "w") as h5f: dataset = h5f.create_dataset( "images", initial_images_shape, maxshape=max_images_shape) # Write images into space. for i, image in enumerate(images): dataset[i] = image def save_images_with_bcolz(image_root, imgs, metadata): assert len(imgs) == len(metadata) # Make directory if necessary. if not os.path.exists(image_root): os.makedirs(image_root) # Construct a bcolz array with the first image only. assert len(imgs) > 0, "Must have 1+ images to write." output_shape = (1, ) + imgs[0].shape with bcolz.carray(imgs[0].reshape(output_shape), rootdir=os.path.join(image_root, "images"), mode="w") as array: # Add all other images. for i, img in enumerate(imgs): if i == 0: continue array.append(img.reshape(output_shape)) def load_images_with_hdf5(image_root): """Load all images from HDF5 array.""" with h5py.File(os.path.join(image_root, "images.h5")) as h5f: return h5f['images'][:] def load_image_with_hdf5(image_root, img_metadata): """Load all images from HDF5 array.""" with h5py.File(os.path.join(image_root, "images.h5")) as h5f: return h5f['images'][int(img_metadata.name)] def load_images_with_bcolz(image_root): """Load all images from bcolz array.""" with bcolz.open(os.path.join(image_root, "images")) as array: return array[:] def load_image_with_bcolz(image_root, img_metadata): """Load a single image from bcolz array.""" with bcolz.open(os.path.join(image_root, "images")) as array: return array[int(img_metadata.name)] def plot_image(image, metadata=None, band=None, ax=None, cmap='gray'): ax = ax or plt.gca() # Aggregate over time. if len(image.shape) == 4: image = np.nanmedian(image, axis=TIME_AXIS) # Select only the bands requested. if len(image.shape) == 3: assert band is not None, "You must choose a band to plot." assert metadata is not None, "metadata required to select color band." band_index = metadata['bands'].index(band) image = image[:, :, band_index] ax.imshow(image, cmap=cmap) ax.xaxis.set_ticks([]) ax.yaxis.set_ticks([]) return ax def canonicalize_image(img, img_metadata): """Canonicalize image for machine learning models. - Aggregates across 2016/06 to 2017/06 - Drops all bands but B1...B11. """ img_metadata = img_metadata.copy() # Get all dates in a 12 month span. dates = [date for date in img_metadata['dates'] if date >= '20160601' and date < '20170601'] if len(dates) < 12: raise ValueError( "Found %d dates for the following image when 12 were expected. %s" % (len(dates), img_metadata)) img_metadata['dates'] = dates # Aggregate across 12 month span (hides cloud cover). Only keep the start # date in the metadata, as there's exactly one date dimension. img = np.nanmedian(img, axis=TIME_AXIS, keepdims=True) img_metadata['dates'] = [dates[0]] # Only keep raw bands. All others bands are simple functions of these. bands = [band for band in img_metadata['bands'] if re.search('^B\d+$', band) is not None] band_indices = [img_metadata['bands'].index(band) for band in bands] img = img[:, :, band_indices] img_metadata['bands'] = bands img_metadata["dim"] = img.shape return img, img_metadata def canonicalize_image_by_month(img, img_metadata, band=None): """Canonicalize an image by taking its median pixel value per month. Args: img: numpy array, shape [height, width, num color bands, num dates]. img_metadata: pandas Series. Contains 'bands' and 'dates' entries. band: None, string, or list of strings. If None, output all color bands. If string, output a single color band, if list of strings, output one color band per string. """ assert len(img.shape) == 4, "img must be [width, height color band, time]." # Select bands to process. if band is None: bands = img_metadata["bands"] if isinstance(band, basestring): bands = [band] elif isinstance(band, list): bands = band else: raise ValueError("Unrecognized type for argument 'band': %s" % band) band_idxs = [img_metadata["bands"].index(b) for b in bands] img_band = img[:, :, band_idxs, :] # Extract month out of each date (YYYYMMDD string) dates = pd.DataFrame({"dates": img_metadata['dates']}) dates["month"] = dates["dates"].str.slice(4, 6) # Construct result image. There will be 12 months. width, height, _, _ = img.shape result_img = np.full((width, height, len(bands), 12), np.nan) for month, group in dates.groupby("month"): # Select the appropriate time, color bands. time_idxs = list(group.index) img_month = img_band[:, :, :, time_idxs] # Take median pixel intensity over time. result_img[:, :, :, int(month) - 1] = np.nanmedian( img_month, axis=[TIME_AXIS]) # Construct new metadata. We'll use the first date for each month in the # grouping. result_metadata = img_metadata.copy() result_metadata["dim"] = result_img.shape result_metadata["bands"] = bands result_metadata["dates"] = list(dates.groupby("month").first()["dates"]) return result_img, result_metadata def merge_canonical_image_and_mask(canonical_img, mask, img_metadata): """Combine canonical_image and mask into a single array.""" # Ensure canonical_img and mask have the same shape. assert len(canonical_img.shape) == 4 mask = np.reshape(mask, [mask.shape[0], mask.shape[1], 1, 1]) # Copy time dim as many times as necessary to match 'canonical_img'. mask = np.tile(mask, [1, 1, 1, canonical_img.shape[3]]) # Concatenate mask as the final band. canonical_img = np.concatenate([canonical_img, mask], axis=BAND_AXIS) # Add 'mask' as the final band to the metadata. img_metadata = img_metadata.copy() img_metadata['bands'] = img_metadata['bands'] + ['mask'] return canonical_img, img_metadata def plot_monthly_image(img, img_metadata): assert len( img.shape) == 4, "img shape must be [height, width, color band, month]" assert img.shape[3] == 12, "img must have 1 entry per month for every color band." months = ["Jan", "Feb", "Mar", "Apr", "May", "June", "July", "Aug", "Sept", "Oct", "Nov", "Dec"] num_cols = len(img_metadata["bands"]) num_rows = len(months) plt.figure(figsize=(2 * num_cols, 2 * num_rows)) for i in range(num_rows): for j in range(num_cols): ax = plt.subplot(num_rows, num_cols, i * num_cols + j + 1) ax.set_title("%s/%s" % (months[i], img_metadata["bands"][j])) plot_image(img[:, :, j, i])
[ "duckworthd@gmail.com" ]
duckworthd@gmail.com
9d32317d1286c1736e8582adf02d5839dba92f00
9246f53f8048e2040f6c40b12fd6e81bf11bce1b
/chapter10/kmeans_sklearn.py
acdaa10443f1f5da396d3791aea808f2b6ff816b
[ "MIT" ]
permissive
damonclifford/Python-Machine-Learning-By-Example-Third-Edition
3541afefde8de164c3c82a47441f7fb20bbd7f71
35f364fd9f7f044771fb750bddf4b6fb101ea89e
refs/heads/master
2022-12-12T02:50:18.503257
2020-09-02T05:48:14
2020-09-02T05:48:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
791
py
''' Source codes for Python Machine Learning By Example 3rd Edition (Packt Publishing) Chapter 10 Discovering Underlying Topics in the Newsgroups Dataset with Clustering and Topic Modeling Author: Yuxi (Hayden) Liu (yuxi.liu.ece@gmail.com) ''' from sklearn import datasets iris = datasets.load_iris() X = iris.data[:, 2:4] y = iris.target import numpy as np from matplotlib import pyplot as plt k = 3 from sklearn.cluster import KMeans kmeans_sk = KMeans(n_clusters=3, random_state=42) kmeans_sk.fit(X) clusters_sk = kmeans_sk.labels_ centroids_sk = kmeans_sk.cluster_centers_ for i in range(k): cluster_i = np.where(clusters_sk == i) plt.scatter(X[cluster_i, 0], X[cluster_i, 1]) plt.scatter(centroids_sk[:, 0], centroids_sk[:, 1], marker='*', s=200, c='#050505') plt.show()
[ "yuxi.liu.ece@gmail.com" ]
yuxi.liu.ece@gmail.com
d487e6f13e0f9607074d24d0dcca3b4571ee9366
00fac941f4f9e39cda9e3286d5cc1a77bda1d888
/GetCode.py
39bd6b4ba3989967c5cbd3a4f5254a2935a566c6
[]
no_license
bluegray/Sublime-Text-3-config
feb340139f01dd35e63874cb26c5e6515b3368bc
73428ebd263c60472cb7de64028c559252ed8c5e
refs/heads/master
2020-04-10T14:00:33.960991
2015-05-16T12:06:12
2015-05-16T12:06:30
33,752,869
0
1
null
null
null
null
UTF-8
Python
false
false
407
py
import sublime import sublime_plugin class GetSelectionCodeCommand(sublime_plugin.TextCommand): def run(self, edit): sel = self.view.sel() if len(sel) > 0: charcode = ord(self.view.substr(sel[0].begin())) msg = "%d 0x%x" % (charcode, charcode) self.view.set_status("Char Code", "Char Code: " + msg + " :: ") sublime.set_clipboard(msg)
[ "bluegray@users.noreply.github.com" ]
bluegray@users.noreply.github.com
ea646c020a58d6eb93ac0b637c7b9fdd4b435641
addb7882a96d30c431b40ba33309cbf8f8328894
/taskgraph/tests/model.py
26500f3cfe82c33d90c54b6c0e55e7a9e544f7e4
[]
no_license
mmuddy/taskgraph
581de61669f6ce56a87c487390a4b2ee3bbfb9ac
0925dd2f8fd7c83b70a830cc5903366bf2c830ba
refs/heads/master
2020-04-06T20:02:26.896467
2017-01-23T22:22:38
2017-01-23T22:22:38
68,546,946
0
0
null
2016-12-23T05:27:31
2016-09-18T20:58:23
HTML
UTF-8
Python
false
false
6,519
py
from taskgraph.model.model import * from taskgraph.tasktracker.getinterface import get_interface from .settings import tracker_dummy, tracker_redmine from django.test import TestCase from django.db import IntegrityError class TestTracker(TestCase): def create_tracker(self): all_trackers = Tracker.objects.all() tracker = Tracker.objects.create(url='no-validation', type='no-validation') from_get_tracker = Tracker.objects.get(url='no-validation') self.assertTrue(tracker and from_get_tracker) self.assertEqual(tracker, from_get_tracker) from_get_tracker.delete() all_trackers_after_delete = Tracker.objects.all() self.assertEqual(len(all_trackers), len(all_trackers_after_delete)) def test_unique(self): Tracker.objects.get_or_create(url='no-validation', type='no-validation') try: Tracker.objects.create(url='no-validation', type='no-validation') except IntegrityError: return self.assertTrue(False) def assert_creation(test_case, models_before): for model_type, objects_before in models_before: test_case.assertTrue(model_type.objects.all().count() - objects_before > 0) def assert_cleanup(test_case, models_before): for model_type, objects_before in models_before: print model_type test_case.assertTrue(model_type.objects.all().count() - objects_before == 0) def test_projects_creation_and_cleanup(test_case, tracker): type_list = [Project, Assignee, TaskState, TaskRelationType, TaskCategory] models_before = [] for model_type in type_list: models_before.append((model_type, model_type.objects.all().count())) tracker.restore_project_list(get_interface(tracker.type)) assert_creation(test_case, models_before) tracker.delete() # assert_cleanup(test_case, models_before) def test_create_and_clean_up_tasks(test_case, tracker): i_tracker = get_interface(tracker.type).connect(tracker) i_tracker.refresh() tracker.restore_project_list(i_tracker) list_before = [] task_count = Task.objects.all().count() rel_count = TaskRelation.objects.all().count() list_before.append((Task, task_count)) list_before.append((TaskRelation, rel_count)) for project in tracker.projects: project.is_active = True project.save() tracker.restore_project_tasks(i_tracker, only_active=False) for model_type, before_count in list_before: test_case.assertTrue(model_type.objects.all().count() - before_count > 0) tracker.delete() #for model_type, before_count in list_before: # test_case.assertTrue(model_type.objects.all().count() - before_count == 0) class TestTrackerWithDummy(TestCase): def test_projects_creation_and_cleanup(self): test_projects_creation_and_cleanup(self, tracker_dummy()) class TestTrackerWithRedmine(TestCase): def test_projects_creation_and_cleanup(self): test_projects_creation_and_cleanup(self, tracker_redmine()) class TestProjectWithDummy(TestCase): def test_projects_creation_and_cleanup(self): test_create_and_clean_up_tasks(self, tracker_dummy()) class TestProjectWithRedmine(TestCase): def test_projects_creation_and_cleanup(self): test_create_and_clean_up_tasks(self, tracker_redmine()) class TestIntegrationWithRedmine(TestCase): def test_task_update(self): tracker = tracker_redmine() tracker.save() i_tracker = get_interface(tracker.type) i_tracker.connect(tracker) i_tracker.refresh() tracker.restore_project_list(get_interface(tracker.type)) pytiff = None for project in tracker.projects: if project.name == 'Pytift test': project.is_active = True project.save() pytiff = project break tracker.restore_project_tasks(get_interface(tracker.type)) for task in filter(lambda t: t.category.name == 'UnitTest', pytiff.tasks): subj_field = filter(lambda f: f.name == 'subject', task.additional_field)[0] subj_field.char += '$ test passed' subj_field.save() task.save(save_on_tracker=True, i_tracker=i_tracker) tracker.restore_project_tasks(get_interface(tracker.type)) pytiff = filter(lambda p: p.name == 'Pytift test', tracker.projects)[0] for task in filter(lambda t: t.category.name == 'UnitTest', pytiff.tasks): subj_field = filter(lambda f: f.name == 'subject', task.additional_field)[0] subj_field.char = subj_field.char.split('$')[0] subj_field.save() task.save(save_on_tracker=True, i_tracker=i_tracker) def test_relation_update(self): tracker = tracker_redmine() tracker.save() i_tracker = get_interface(tracker.type) i_tracker.connect(tracker) i_tracker.refresh() tracker.restore_project_list(get_interface(tracker.type)) pytiff = None for project in tracker.projects: if project.name == 'Pytift test': project.is_active = True project.save() pytiff = project break tracker.restore_project_tasks(get_interface(tracker.type)) t_from = None t_to = None t_type = None old_count = len(pytiff.tasks_relations) for relation in pytiff.tasks_relations: t_from = relation.from_task t_to = relation.to_task t_type = relation.type relation.delete(i_tracker=i_tracker) break self.assertTrue(t_from and t_to and t_type) self.assertEqual(len(pytiff.tasks_relations), old_count - 1) tracker.restore_project_tasks(get_interface(tracker.type)) pytiff = filter(lambda p: p.name == 'Pytift test', tracker.projects)[0] self.assertEqual(len(pytiff.tasks_relations), old_count - 1) t_type = filter(lambda p: p.name == t_type.name, pytiff.task_relation_types)[0] t_from = filter(lambda p: p.identifier == t_from.identifier, pytiff.tasks)[0] t_to = filter(lambda p: p.identifier == t_to.identifier, pytiff.tasks)[0] old_rel = TaskRelation.objects.create(project=pytiff, type=t_type, from_task=t_from, to_task=t_to) old_rel.save(i_tracker=i_tracker) self.assertEqual(len(pytiff.tasks_relations), old_count)
[ "yakovlevvladyakovlev@yandex.ru" ]
yakovlevvladyakovlev@yandex.ru
61e7ac4e48aa441ddac70c7a136199bc95ef0cb8
97be97cfc56fb2170b60b91063dbfe5f1449e3c0
/python/ABC189/D.py
7e2f46bfedc29c348c5d23cf98f1faf6718dbc94
[]
no_license
iWonder118/atcoder
73d965a0a9ade189733808e47634f2b7776aad4b
3ab7271e838a2903ff0e07f94015ef13c59577e1
refs/heads/master
2022-01-25T10:10:55.007340
2021-12-31T14:04:54
2021-12-31T14:04:54
245,155,997
0
0
null
null
null
null
UTF-8
Python
false
false
77
py
n = int(input()) logics = [input() for _ in range(n)] for i in range(n):
[ "52240372+iWonder118@users.noreply.github.com" ]
52240372+iWonder118@users.noreply.github.com
e72b7714ac7c6c70fcad4cd97133be4a97489a94
72a934f4940c4ae77682d45a2d1e8ec5b1e2ff01
/pro/models/sequential.py
18d7368dc2b1336ad3c0ea4e895f26cf5c057335
[]
no_license
dhaval-jain/g1
5347160fcf4efc21207fdf9f996a10dd4e0f61e9
a6d4deb672204b9eaf1efc5c6e0c12f38b5bb906
refs/heads/master
2023-03-18T22:49:03.559327
2020-11-17T16:59:50
2020-11-17T16:59:50
346,329,013
0
0
null
null
null
null
UTF-8
Python
false
false
3,117
py
TRAIN_PATH = "C:/Users/meow/PycharmProjects/project1/CovidDataset/Train" # gets the the paths in that folder VAL_PATH = "C:/Users/meow/PycharmProjects/project1/CovidDataset/Val" import numpy as np import matplotlib.pyplot as plt import keras from keras.layers import * from keras.models import * from keras.preprocessing import image import tensorflow as tf from tensorflow.keras.models import Sequential from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten, Conv2D, MaxPooling2D, Conv3D model = Sequential() model.add(Conv2D(32,kernel_size=(3,3),activation='relu',input_shape=(224,224,3))) model.add(Conv2D(64,(3,3),activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(64,(3,3),activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Conv2D(128,(3,3),activation='relu')) model.add(MaxPooling2D(pool_size=(2,2))) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(64,activation='relu')) model.add(Dropout(0.5)) model.add(Dense(1,activation='sigmoid')) model.compile(loss=keras.losses.binary_crossentropy,optimizer='adam',metrics=['accuracy']) model.summary() # Use the Image Data Generator to import the images from the dataset from keras.preprocessing.image import ImageDataGenerator train_datagen = ImageDataGenerator(rescale = 1./255, shear_range = 0.2, zoom_range = 0.2, horizontal_flip = True) test_datagen = ImageDataGenerator(rescale = 1./255) # Make sure you provide the same target size as initialied for the image size training_set = train_datagen.flow_from_directory('C:/Users/meow/PycharmProjects/project1/CovidDataset/Train', target_size = (224, 224), batch_size = 16, class_mode = 'categorical') training_set.class_indices test_set = test_datagen.flow_from_directory('C:/Users/meow/PycharmProjects/project1/CovidDataset/Val', target_size = (224, 224), batch_size = 16, class_mode = 'categorical') r = model.fit_generator( training_set, validation_data=test_set, epochs=20, steps_per_epoch=len(training_set), validation_steps=len(test_set) ) # save it as a h5 file import tensorflow as tf from keras.models import load_model model.save('model_sequential_14.h5') model_json = model.to_json() with open('model_adam_sequential_2020.json', 'w') as json_file: json_file.write(model_json) print('Model saved to the disk.') plt.plot(r.history['loss'], label='train_loss') plt.plot(r.history['val_loss'], label='val_loss') plt.legend() plt.show() plt.savefig('LossVal_loss') # plot the accuracy plt.plot(r.history['accuracy'], label='train_accuracy') plt.plot(r.history['val_accuracy'], label='val_accuracy') plt.legend() plt.show() plt.savefig('AccVal_accuracy')
[ "inexorable619@gmail.com" ]
inexorable619@gmail.com
28983ad35bba438daa2553a1003ba96695c3d775
745b63bdfb798f88d4f1b7679f435e43e6f2aec1
/pomodoro.py
1a109425e5adc3ef44c4f3e24efc89c1d45024fb
[]
no_license
torjeikenes/pomodoro
009bad75d2f0decca722d892253bd80266cabc85
07ccade38090f34b028e1e562c41e7a1bd77c836
refs/heads/master
2023-01-13T05:25:52.126481
2020-11-25T22:30:49
2020-11-25T22:30:49
316,047,832
0
0
null
null
null
null
UTF-8
Python
false
false
4,070
py
#! /usr/bin/env python3 # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Distributed under terms of the MIT license. """ Program for handling pomodoro timers """ import json import argparse from datetime import datetime, timedelta import copy import os import subprocess from pathlib import Path home = str(Path.home()) file = home+'/bin/data.json' datetimeFormat = '%Y-%m-%dT%H:%M:%S.%f' pomodoroLen = 25 breakLen = 5 x = { 'start': 0, 'end' : 0, 'length' : pomodoroLen, 'type' : 'none' } def main(): if args.file: file = args.file if args.pomodoro: newPomodoro(int(args.pomodoro)) if args.sbreak: newBreak(int(args.sbreak)) if args.next: nextTimer() if args.check: checkTime() def nextTimer(): try: with open(file, 'r') as f: try: lines = f.read().splitlines() line = lines[-1] data = json.loads(line) except Exception as e: raise Exception("Not valid json format") except Exception as e: return if (data['type'] == 'pomodoro') and (data['end'] != 0): newBreak(breakLen) elif (data['type'] == 'break') and (data['end'] != 0): newPomodoro(pomodoroLen) def newBreak(length): data = copy.copy(x) now = datetime.now().strftime(datetimeFormat) data['start'] = now data['type'] = 'break' data['length'] = length writeToFile(data) def newPomodoro(length): data = copy.copy(x) now = datetime.now().strftime(datetimeFormat) data['start'] = now data['type'] = 'pomodoro' data['length'] = length data['end'] = 0 writeToFile(data) def writeToFile(data): mode = 'a' if os.path.exists(file) else 'w' with open(file, mode) as f: f.write(json.dumps(data)) f.write('\n') def checkTime(): try: with open(file, 'r') as f: try: lines = f.read().splitlines() line = lines[-1] data = json.loads(line) except Exception as e: raise Exception("Not valid json format") except Exception as e: print("N/A") return cntd = "00:00" sumToday = 0 today = datetime.today().date() for l in lines: lineData = json.loads(l) start = datetime.strptime(lineData['start'], datetimeFormat) if (lineData['end'] != 0) and (start.date() == today) and (lineData['type'] == 'pomodoro'): sumToday += 1 if data['end'] == 0: time = datetime.strptime(data['start'], datetimeFormat) endtime = time + timedelta(minutes=int(data['length'])) now = datetime.now() diff = endtime - now if endtime < now: notify(data) data['end'] = endtime.strftime(datetimeFormat) lines[-1] = json.dumps(data) cntd = "00:00" with open(file, 'w') as f: f.write('\n'.join(lines) + '\n') else: cntd = ':'.join(str(diff).split(':')[1:]) cntd = cntd.split('.')[0] tp = data['type'][0].upper() returnString = "{} {} {}".format(sumToday, tp, cntd) print(returnString) def notify(data): tp = data['type'] if (tp == 'pomodoro'): message = "Time is up!\nTake a break." elif (tp == 'break'): message = "Break is over!\nGet back to work." else: message = "Time is up" subprocess.Popen(['notify-send', message]) return if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--file', '-f', help="Set json file") parser.add_argument('--pomodoro', '-p', const=pomodoroLen, nargs='?', help="Start a pomodoro timer") parser.add_argument('--sbreak', '-b', const=breakLen, nargs='?',help="Start a break timer") parser.add_argument('--check', '-c', action='store_true',help="Check time") parser.add_argument('--next', '-n', action='store_true',help="Start next timer") args = parser.parse_args() main()
[ "torje.n.eikenes@gmail.com" ]
torje.n.eikenes@gmail.com
9be50d39d015e172e51c97d330d5fe5035965ef5
b8e3363a40bc9928ae85c16232c5bf6240597a18
/out/production/home-assistant/components/switch/tellduslive.py
7edab40054f51d8807a01fa0c066ed3cb09c138f
[ "MIT" ]
permissive
LaurentTrk/home-assistant
4cbffd5a71f914e003918542319bc6caa96dbb72
5a808d4e7df4d8d0f12cc5b7e6cff0ddf42b1d40
refs/heads/dev
2021-01-15T23:02:38.147063
2016-05-15T12:21:52
2016-05-15T12:21:52
51,471,180
2
0
null
2016-02-10T20:49:47
2016-02-10T20:49:47
null
UTF-8
Python
false
false
1,832
py
""" homeassistant.components.switch.tellduslive ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Support for Tellstick switches using Tellstick Net and the Telldus Live online service. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/switch.tellduslive/ """ import logging from homeassistant.components import tellduslive from homeassistant.helpers.entity import ToggleEntity _LOGGER = logging.getLogger(__name__) def setup_platform(hass, config, add_devices, discovery_info=None): """ Find and return Tellstick switches. """ if discovery_info is None: return add_devices(TelldusLiveSwitch(switch) for switch in discovery_info) class TelldusLiveSwitch(ToggleEntity): """ Represents a Tellstick switch. """ def __init__(self, switch_id): self._id = switch_id self.update() _LOGGER.debug("created switch %s", self) def update(self): tellduslive.NETWORK.update_switches() self._switch = tellduslive.NETWORK.get_switch(self._id) @property def should_poll(self): """ Tells Home Assistant to poll this entity. """ return True @property def name(self): """ Returns the name of the switch if any. """ return self._switch["name"] @property def available(self): return not self._switch.get("offline", False) @property def is_on(self): """ True if switch is on. """ from tellive.live import const return self._switch["state"] == const.TELLSTICK_TURNON def turn_on(self, **kwargs): """ Turns the switch on. """ tellduslive.NETWORK.turn_switch_on(self._id) def turn_off(self, **kwargs): """ Turns the switch off. """ tellduslive.NETWORK.turn_switch_off(self._id)
[ "laurent.turek_github@gadz.org" ]
laurent.turek_github@gadz.org
4e24c93448376bf4ec8685ec08596212224928a2
45c13e4f2204c711c8ddb619a423fa2802df93fc
/blogspace/articles/migrations/0003_article_author.py
007e5bb909ab9152f422c1cd19343ec87b121f7c
[]
no_license
darklongnightt/blogspace
f7d287cf91e1900893676e4815078e41e7fc4f7f
ba67b4f9a621c5ac4a8cf5c016bdbf0617b40f00
refs/heads/master
2020-11-30T10:21:40.397046
2019-12-28T14:18:37
2019-12-28T14:18:37
230,376,202
0
0
null
null
null
null
UTF-8
Python
false
false
592
py
# Generated by Django 3.0.1 on 2019-12-28 11:43 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('articles', '0002_article_thumbnail'), ] operations = [ migrations.AddField( model_name='article', name='author', field=models.ForeignKey(default=None, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), ]
[ "toki.1243@gmail.com" ]
toki.1243@gmail.com
965ccfbb787575189bbd405fdd1a466953457af5
ee7596f2efcf9abf86ff312bb1f0a0f963e7787d
/chunking/main.py
ce4637ab17240a729280d041ed31f638b89aeb96
[]
no_license
the-league-of-legends/chunk
8edfb906fbfba0e291bd6adebcf95831d0491d71
b8d6327c2ebd4a4a41c94d4fb4322c15ff039f4d
refs/heads/master
2020-06-20T02:59:59.629299
2019-07-15T09:34:25
2019-07-15T09:34:25
null
0
0
null
null
null
null
UTF-8
Python
false
false
14,936
py
# -*-encoding=utf8-*- import json import codecs import itertools from collections import OrderedDict import os import sys from itertools import chain import tensorflow as tf import numpy as np from tensorflow.contrib.crf import crf_log_likelihood from tensorflow.contrib.crf import viterbi_decode from tensorflow.contrib.layers.python.layers import initializers currentPath = os.getcwd() sys.path.append(currentPath) import jieba import jieba.posseg as pseg root_path = os.getcwd() global pyversion if sys.version > '3': pyversion = 'three' else: pyversion = 'two' if pyversion == 'three': import pickle else: import cPickle, pickle root_path = os.getcwd() + os.sep CONFIG = { } class Model(object): # 初始化模型参数 def __init__(self, config): self.config = config self.lr = config["lr"] self.char_dim = config["char_dim"] self.lstm_dim = config["lstm_dim"] self.seg_dim = config["seg_dim"] self.num_tags = config["num_tags"] self.num_chars = config["num_chars"] # 样本中总字数 self.num_segs = 4 self.global_step = tf.Variable(0, trainable=False) self.best_dev_f1 = tf.Variable(0.0, trainable=False) self.best_test_f1 = tf.Variable(0.0, trainable=False) self.initializer = initializers.xavier_initializer() self.char_inputs = tf.placeholder(dtype=tf.int32, shape=[None, None], name="ChatInputs") self.seg_inputs = tf.placeholder(dtype=tf.int32, shape=[None, None], name="SegInputs") self.targets = tf.placeholder(dtype=tf.int32, shape=[None, None], name="Targets") # dropout keep prob self.dropout = tf.placeholder(dtype=tf.float32, name="Dropout") used = tf.sign(tf.abs(self.char_inputs)) length = tf.reduce_sum(used, reduction_indices=1) self.lengths = tf.cast(length, tf.int32) self.batch_size = tf.shape(self.char_inputs)[0] self.num_steps = tf.shape(self.char_inputs)[-1] self.model_type = config['model_type'] self.layers = [ { 'dilation': 1 }, { 'dilation': 1 }, { 'dilation': 2 }, ] self.filter_width = 3 self.num_filter = self.lstm_dim self.embedding_dim = self.char_dim + self.seg_dim self.repeat_times = 4 self.cnn_output_width = 0 embedding = self.embedding_layer(self.char_inputs, self.seg_inputs, config) if self.model_type == 'idcnn': model_inputs = tf.nn.dropout(embedding, self.dropout) model_outputs = self.IDCNN_layer(model_inputs) self.logits = self.project_layer_idcnn(model_outputs) else: raise KeyError self.loss = self.loss_layer(self.logits, self.lengths) with tf.variable_scope("optimizer"): optimizer = self.config["optimizer"] if optimizer == "sgd": self.opt = tf.train.GradientDescentOptimizer(self.lr) elif optimizer == "adam": self.opt = tf.train.AdamOptimizer(self.lr) elif optimizer == "adgrad": self.opt = tf.train.AdagradOptimizer(self.lr) else: raise KeyError grads_vars = self.opt.compute_gradients(self.loss) capped_grads_vars = [[tf.clip_by_value(g, -self.config["clip"], self.config["clip"]), v] for g, v in grads_vars] self.train_op = self.opt.apply_gradients(capped_grads_vars, self.global_step) self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=5) def embedding_layer(self, char_inputs, seg_inputs, config, name=None): embedding = [] self.char_inputs_test = char_inputs self.seg_inputs_test = seg_inputs with tf.variable_scope("char_embedding" if not name else name): self.char_lookup = tf.get_variable( name="char_embedding", shape=[self.num_chars, self.char_dim], initializer=self.initializer) embedding.append(tf.nn.embedding_lookup(self.char_lookup, char_inputs)) if config["seg_dim"]: with tf.variable_scope("seg_embedding"): self.seg_lookup = tf.get_variable( name="seg_embedding", # shape=[4*20] shape=[self.num_segs, self.seg_dim], initializer=self.initializer) embedding.append(tf.nn.embedding_lookup(self.seg_lookup, seg_inputs)) embed = tf.concat(embedding, axis=-1) self.embed_test = embed self.embedding_test = embedding return embed def IDCNN_layer(self, model_inputs, name=None): model_inputs = tf.expand_dims(model_inputs, 1) self.model_inputs_test = model_inputs reuse = False if self.dropout == 1.0: reuse = True with tf.variable_scope("idcnn" if not name else name): # shape=[1*3*120*100] shape = [1, self.filter_width, self.embedding_dim, self.num_filter] print(shape) filter_weights = tf.get_variable( "idcnn_filter", shape=[1, self.filter_width, self.embedding_dim, self.num_filter], initializer=self.initializer) layerInput = tf.nn.conv2d(model_inputs, filter_weights, strides=[1, 1, 1, 1], padding="SAME", name="init_layer", use_cudnn_on_gpu=True) self.layerInput_test = layerInput finalOutFromLayers = [] totalWidthForLastDim = 0 for j in range(self.repeat_times): for i in range(len(self.layers)): # 1,1,2 dilation = self.layers[i]['dilation'] isLast = True if i == (len(self.layers) - 1) else False with tf.variable_scope("atrous-conv-layer-%d" % i, reuse=True if (reuse or j > 0) else False): w = tf.get_variable( "filterW", shape=[1, self.filter_width, self.num_filter, self.num_filter], initializer=tf.contrib.layers.xavier_initializer()) if j == 1 and i == 1: self.w_test_1 = w if j == 2 and i == 1: self.w_test_2 = w b = tf.get_variable("filterB", shape=[self.num_filter]) conv = tf.nn.atrous_conv2d(layerInput, w, rate=dilation, padding="SAME") self.conv_test = conv conv = tf.nn.bias_add(conv, b) conv = tf.nn.relu(conv) if isLast: finalOutFromLayers.append(conv) totalWidthForLastDim += self.num_filter layerInput = conv finalOut = tf.concat(axis=3, values=finalOutFromLayers) keepProb = 1.0 if reuse else 0.5 finalOut = tf.nn.dropout(finalOut, keepProb) finalOut = tf.squeeze(finalOut, [1]) finalOut = tf.reshape(finalOut, [-1, totalWidthForLastDim]) self.cnn_output_width = totalWidthForLastDim return finalOut def project_layer_idcnn(self, idcnn_outputs, name=None): with tf.variable_scope("project" if not name else name): with tf.variable_scope("logits"): W = tf.get_variable("W", shape=[self.cnn_output_width, self.num_tags], dtype=tf.float32, initializer=self.initializer) b = tf.get_variable("b", initializer=tf.constant(0.001, shape=[self.num_tags])) pred = tf.nn.xw_plus_b(idcnn_outputs, W, b) return tf.reshape(pred, [-1, self.num_steps, self.num_tags]) def loss_layer(self, project_logits, lengths, name='crf_loss'): with tf.variable_scope(name): small = -1000.0 start_logits = tf.concat( [small * tf.ones(shape=[self.batch_size, 1, self.num_tags]), tf.zeros(shape=[self.batch_size, 1, 1])], axis=-1) pad_logits = tf.cast(small * tf.ones([self.batch_size, self.num_steps, 1]), tf.float32) logits = tf.concat([project_logits, pad_logits], axis=-1) logits = tf.concat([start_logits, logits], axis=1) targets = tf.concat( [tf.cast(self.num_tags * tf.ones([self.batch_size, 1]), tf.int32), self.targets], axis=-1) self.trans = tf.get_variable( "transitions", shape=[self.num_tags + 1, self.num_tags + 1], initializer=self.initializer) log_likelihood, self.trans = crf_log_likelihood( inputs=logits, tag_indices=targets, transition_params=self.trans, sequence_lengths=lengths + 1) return tf.reduce_mean(-log_likelihood) def create_feed_dict(self, batch): _, chars, segs, tags = batch feed_dict = { self.char_inputs: np.asarray(chars), self.seg_inputs: np.asarray(segs), self.dropout: 1.0, } return feed_dict def run_step(self, sess, batch): feed_dict = self.create_feed_dict(batch) lengths, logits = sess.run([self.lengths, self.logits], feed_dict) return lengths, logits def decode(self, logits, lengths, matrix): paths = [] small = -1000.0 start = np.asarray([[small] * self.num_tags + [0]]) for score, length in zip(logits, lengths): score = score[:length] pad = small * np.ones([length, 1]) logits = np.concatenate([score, pad], axis=1) logits = np.concatenate([start, logits], axis=0) path, _ = viterbi_decode(logits, matrix) paths.append(path[1:]) return paths def result_to_json(self, string, tags): item = {"string": string, "entities": []} entity_name = "" entity_start = 0 idx = 0 for char, tag in zip(string, tags): if tag[0] == "S": item["entities"].append({"word": char, "start": idx, "end": idx + 1, "type": tag[2:]}) elif tag[0] == "B": entity_name += char entity_start = idx elif tag[0] == "I": entity_name += char elif tag[0] == "E": entity_name += char item["entities"].append({"word": entity_name, "start": entity_start, "end": idx + 1, "type": tag[2:]}) entity_name = "" else: entity_name = "" entity_start = idx idx += 1 return item def evaluate_line(self, sess, inputs, id_to_tag): trans = self.trans.eval(session=sess) lengths, scores = self.run_step(sess, inputs) batch_paths = self.decode(scores, lengths, trans) tags = [id_to_tag[idx] for idx in batch_paths[0]] return self.result_to_json(inputs[0][0], tags) class Chunk(object): def __init__(self): self.config_file = json.load(open("config_file", encoding="utf8")) self.tf_config = tf.ConfigProto() self.sess = tf.Session(config=self.tf_config) self.sess.run(tf.global_variables_initializer()) self.maps = "maps.pkl" if pyversion == 'three': self.char_to_id, self.id_to_char, self.tag_to_id, self.id_to_tag = pickle.load(open(self.maps, "rb")) else: self.char_to_id, self.id_to_char, self.tag_to_id, self.id_to_tag = pickle.load(open(self.maps, "rb"), protocol=2) self.model = Model(self.config_file) self.ckpt = tf.train.get_checkpoint_state("ckpt") if self.ckpt and tf.train.checkpoint_exists(self.ckpt.model_checkpoint_path): print("Reading model parameters from %s" % self.ckpt.model_checkpoint_path) self.model.saver.restore(self.sess, self.ckpt.model_checkpoint_path) else: print("No model file") def features(self, string): def _w2f(word): lenth = len(word) if lenth == 1: r = [0] if lenth > 1: r = [2] * lenth r[0] = 1 r[-1] = 3 return r return list(chain.from_iterable([_w2f(word) for word in jieba.cut(string) if len(word.strip()) > 0])) def get_text_input(self, text): inputs = list() inputs.append([text]) D = self.char_to_id["<UNK>"] inputs.append([[self.char_to_id.setdefault(char, D) for char in text if len(char.strip()) > 0]]) inputs.append([self.features(text)]) inputs.append([[]]) if len(text.strip()) > 1: return self.model.evaluate_line(self.sess, inputs, self.id_to_tag) if __name__ == "__main__": c = Chunk() for line in open('text.txt', 'r', encoding='utf8'): print(c.get_text_input(line.strip())) # s="典型胸痛 因体力活动、情绪激动等诱发,突感心前区疼痛,多为发作性绞痛或压榨痛,也可为憋闷感。疼痛从胸骨后或心前区开始,向上放射至左肩、臂,甚至小指和无名指,休息或含服硝酸甘油可缓解。胸痛放散的部位也可涉及颈部、下颌、牙齿、腹部等。胸痛也可出现在安静状态下或夜间,由冠脉痉挛所致,也称变异型心绞痛。如胸痛性质发生变化,如新近出现的进行性胸痛,痛阈逐步下降,以至稍事体力活动或情绪激动甚至休息或熟睡时亦可发作。疼痛逐渐加剧、变频,持续时间延长,祛除诱因或含服硝酸甘油不能缓解,此时往往怀疑不稳定心绞痛。" # print(c.get_text_input(s))
[ "lsvt@lsvtdeiMac.local" ]
lsvt@lsvtdeiMac.local
04a6fcd8590335f274756db0927a0e81091f1511
e4039781ee08c03d32cc5bdfa1d424a4a4f33ac0
/BO_parser_prot.py
d54edca1049f6dca1df659dea22a86288773161e
[]
no_license
CoderMatthias/ortholog_pipeline
c003a03af94392eb8ce3cab4fc551630d595d456
7828d88700004f6dc61fde0c565d48f7c88e2d34
refs/heads/master
2021-01-19T12:39:23.897794
2015-07-15T19:49:29
2015-07-15T19:49:29
39,141,896
0
0
null
null
null
null
UTF-8
Python
false
false
3,666
py
#!/usr/bin/python3 import sys import time ''' This python script takes the blastp tab-deliminated data and parses it to include unique hits and an additive bit score Requires: output from blastp, a dictionary to convert FBpp#s to FBgn#s for both Dmen and other D species Usage: python BO_parser_prot.py blastp_output.tsv Dmel_FBpp_to_FBgn.tsv species_FBpp_to_FBgn.tsv ''' start_time = time.time() def line_list_from_input (sys_argv): '''Open a file and make a line list with it's contents''' with open(sys_argv, 'r') as source_file: line_list = source_file.read().split('\n') source_file_name = source_file.name LoL = make_list_into_LoL(line_list) return LoL , source_file_name def make_list_into_LoL (line_list): '''Take a line list and make a list of list (LoL)''' record , LoL = () , [] for line in line_list: if not line.startswith('#') and line.strip() != '': record = line.split('\t') LoL.append(record) return LoL def FBpp_to_FBgn_dict (dict_sys_argv): '''Converts the FBpp <-> FBgn file and to a dictionary''' LoL , nullname = line_list_from_input (dict_sys_argv) out_dict = {} for line in LoL: out_dict[line[0]] = line[1] return out_dict def replace_FBpp_w_FBgn (pp_to_gn_dict , list_to_switch , column_to_switch): '''Replaces protein number (FBpp) with the gene number (FBgn)''' for line in list_to_switch: line[column_to_switch] = pp_to_gn_dict[line[column_to_switch]] return list_to_switch def column_value_unique_list (line_list , column_number): '''Make list of all unique items in column of list''' unique_list = [] for line in line_list: if line[column_number] not in unique_list: unique_list.append(line[column_number]) return unique_list def make_blast_dict (blast_subset): '''make a dictionary of blast results where key = gene and value contains bitscore''' blast_dict = {} for line in blast_subset: if line[1] not in blast_dict: blast_dict[line[1]] = [float(line[-1])] else: blast_dict[line[1]].append(float(line[-1])) return blast_dict def write_output(name , list_to_write): '''Write an output file from a list''' output_file_name = '3_{}_parsed.tsv'.format(name[:-4]) print 'Output saved as: {}'.format(output_file_name) with open(output_file_name, 'w') as output_file: for line in list_to_write: output_line = '\t'.join(map(str, line)) output_file.write(output_line + '\n') def main(): blast_list , source_file_name = line_list_from_input (sys.argv[1]) m_dict = FBpp_to_FBgn_dict (sys.argv[2]) s_dict = FBpp_to_FBgn_dict (sys.argv[3]) blast_list = replace_FBpp_w_FBgn (m_dict , blast_list , 0) blast_list = replace_FBpp_w_FBgn (s_dict , blast_list , 1) unique_mel_genes = column_value_unique_list (blast_list , 0) output_list = [] for mel_gene in unique_mel_genes: blast_subset , new_blast_list = [] , [] for line in blast_list: if line[0] == mel_gene: blast_subset.append(line) else: new_blast_list.append(line) blast_list = new_blast_list blast_dict = make_blast_dict (blast_subset) for blast in blast_subset: blast.append(sum(blast_dict[blast[1]])) if blast[0:2] + [blast[-1]] not in output_list: output_list.append(blast[0:2] + [blast[-1]]) write_output(source_file_name , output_list) # print (time.time()-start_time) if __name__ == '__main__': main()
[ "Matt.Kanke@gmail.com" ]
Matt.Kanke@gmail.com
63cd09ad5e4f6c73fabb07766215cf1ea10619ac
760a806cf48d62f96c32906f2cb2be861ab4eda2
/venv/bin/python-config
90275ad6c7c13e606e7fdb5fe45939227bd636f2
[ "MIT" ]
permissive
francamacdowell/AnalyzeYou
e6339104181012ef196e0ce5d7c537efa21dd1c2
3fa6556b621af99543693fc26fa0d784996bd19c
refs/heads/master
2021-02-23T14:35:41.925987
2020-03-06T11:39:30
2020-03-06T11:39:30
245,402,115
0
0
MIT
2020-03-06T11:33:13
2020-03-06T11:26:52
Python
UTF-8
Python
false
false
2,359
#!/home/macdowell/Workspace/AnalyzeYou/venv/bin/python import sys import getopt import sysconfig valid_opts = ['prefix', 'exec-prefix', 'includes', 'libs', 'cflags', 'ldflags', 'help'] if sys.version_info >= (3, 2): valid_opts.insert(-1, 'extension-suffix') valid_opts.append('abiflags') if sys.version_info >= (3, 3): valid_opts.append('configdir') def exit_with_usage(code=1): sys.stderr.write("Usage: {0} [{1}]\n".format( sys.argv[0], '|'.join('--'+opt for opt in valid_opts))) sys.exit(code) try: opts, args = getopt.getopt(sys.argv[1:], '', valid_opts) except getopt.error: exit_with_usage() if not opts: exit_with_usage() pyver = sysconfig.get_config_var('VERSION') getvar = sysconfig.get_config_var opt_flags = [flag for (flag, val) in opts] if '--help' in opt_flags: exit_with_usage(code=0) for opt in opt_flags: if opt == '--prefix': print(sysconfig.get_config_var('prefix')) elif opt == '--exec-prefix': print(sysconfig.get_config_var('exec_prefix')) elif opt in ('--includes', '--cflags'): flags = ['-I' + sysconfig.get_path('include'), '-I' + sysconfig.get_path('platinclude')] if opt == '--cflags': flags.extend(getvar('CFLAGS').split()) print(' '.join(flags)) elif opt in ('--libs', '--ldflags'): abiflags = getattr(sys, 'abiflags', '') libs = ['-lpython' + pyver + abiflags] libs += getvar('LIBS').split() libs += getvar('SYSLIBS').split() # add the prefix/lib/pythonX.Y/config dir, but only if there is no # shared library in prefix/lib/. if opt == '--ldflags': if not getvar('Py_ENABLE_SHARED'): libs.insert(0, '-L' + getvar('LIBPL')) if not getvar('PYTHONFRAMEWORK'): libs.extend(getvar('LINKFORSHARED').split()) print(' '.join(libs)) elif opt == '--extension-suffix': ext_suffix = sysconfig.get_config_var('EXT_SUFFIX') if ext_suffix is None: ext_suffix = sysconfig.get_config_var('SO') print(ext_suffix) elif opt == '--abiflags': if not getattr(sys, 'abiflags', None): exit_with_usage() print(sys.abiflags) elif opt == '--configdir': print(sysconfig.get_config_var('LIBPL'))
[ "fmdss@ic.ufal.br" ]
fmdss@ic.ufal.br
013916367cfd1dfcd2bbaf32bb98f24b7cbf6c17
273c436a67c50e0128e9f7c181f6a18891b9bac9
/ModuleWeatherBundle/Resource/WeatherResourceImpl/Cached.py
da5ff27d7bd66cfeb5b8271e13095684a4cf19c5
[]
no_license
jaepyoung/weather-microservice
302c52cad82dcb7248a2b1025449bca308e5ef6f
6818e9ae96817f3e8708b654a7922554441db393
refs/heads/master
2021-01-19T13:36:05.398880
2015-01-19T08:33:09
2015-01-19T08:33:09
null
0
0
null
null
null
null
UTF-8
Python
false
false
975
py
from ModuleWeatherBundle.Resource.WeatherResource import WeatherResource import json class Cached(WeatherResource): def __init__(self, service, cache_redis, cache_ttl = 3600): super(Cached, self).__init__() self.service = service self.cache_redis = cache_redis self.cache_ttl = cache_ttl def getWeatherConditions(self, region, city): key_params = { 'method': 'conditions', 'region': region, 'city': city, } key = 'wunderground_' + json.dumps(key_params, separators=(',', ':')) data = self.cache_redis.get(key) if data is not None: return json.loads(data) data = self.service.getWeatherConditions(region, city) self.cache_redis.set( key, json.dumps(data), self.cache_ttl ) return data
[ "athlan@vgroup.pl" ]
athlan@vgroup.pl
46e425071b72856e84300bad5e705cc2c7dff76d
800b5cd8c3d58b60d80aca551e54af28ec3c9f18
/code/chapter_05_example_14.py
81334fc4e4cc158d144cc5ba91bcb59c006f0045
[]
no_license
CyberLight/two-scoops-of-django-1.8
6591347cb20f3c16e252943c04f0f524f8e8b235
423971ad609ec9a552617fc4f7424e701295c09b
refs/heads/master
2021-01-21T03:02:52.704822
2015-05-11T16:32:31
2015-05-11T16:32:31
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,626
py
""" Using This Code Example ========================= The code examples provided are provided by Daniel Greenfeld and Audrey Roy of Two Scoops Press to help you reference Two Scoops of Django: Best Practices for Django 1.8. Code samples follow PEP-0008, with exceptions made for the purposes of improving book formatting. Example code is provided "as is", and is not intended to be, and should not be considered or labeled as "tutorial code". Permissions ============ In general, you may use the code we've provided with this book in your programs and documentation. You do not need to contact us for permission unless you're reproducing a significant portion of the code or using it in commercial distributions. Examples: * Writing a program that uses several chunks of code from this course does not require permission. * Selling or distributing a digital package from material taken from this book does require permission. * Answering a question by citing this book and quoting example code does not require permission. * Incorporating a significant amount of example code from this book into your product's documentation does require permission. Attributions usually include the title, author, publisher and an ISBN. For example, "Two Scoops of Django: Best Practices for Django 1.8, by Daniel Roy Greenfeld and Audrey Roy Greenfeld. Copyright 2015 Two Scoops Press (ISBN-GOES-HERE)." If you feel your use of code examples falls outside fair use of the permission given here, please contact us at info@twoscoopspress.org.""" # Top of settings/production.py import os SOME_SECRET_KEY = os.environ["SOME_SECRET_KEY"]
[ "danny@eventbrite.com" ]
danny@eventbrite.com
6f2edb09e5c1f151145ab5c1adacec423009c475
e452f89c51180487f2ed68c33ca2fed54e14a967
/1-Python-Programming-Basics (Sep 2020)/Course-Exercises-and-Exams/03_Conditional-Statements-Advanced/01.Lab-04-Personal-Titles.py
72a970d05c0e96713bf60476264312a5d9ccd0bc
[ "MIT" ]
permissive
karolinanikolova/SoftUni-Software-Engineering
c996f18eea9fb93164ab674614e90b357ef4858a
7891924956598b11a1e30e2c220457c85c40f064
refs/heads/main
2023-06-21T23:24:55.224528
2021-07-22T16:15:59
2021-07-22T16:15:59
367,432,464
0
0
null
null
null
null
UTF-8
Python
false
false
856
py
# 4. Обръщение според възраст и пол # Да се напише конзолна програма, която прочита възраст (реално число) и пол ('m' или 'f'), въведени от потребителя, и отпечатва обръщение измежду следните: # • "Mr." – мъж (пол 'm') на 16 или повече години # • "Master" – момче (пол 'm') под 16 години # • "Ms." – жена (пол 'f') на 16 или повече години # • "Miss" – момиче (пол 'f') под 16 години age = float(input()) sex = input() if sex == 'f': if age >= 16: print('Ms.') elif age < 16: print('Miss') elif sex == 'm': if age >= 16: print('Mr.') elif age < 16: print('Master')
[ "Nikolova@eum.root.eumetsat.int" ]
Nikolova@eum.root.eumetsat.int
92390ff097f0dc700869fdfc84c2e3606ee46f1d
2260c05c1fae664b7a6395b6b8e2c5ad5a61eb4b
/driver.py
66222accfc43a640d831c08077a3bc31dd4579d5
[]
no_license
roulaoregan/neural_networks
05bb3e9572303c3b68cdaa2d710645cd5061bf70
9a158ab264fd12bb6b5175786d333ea9b574f332
refs/heads/master
2021-01-01T19:11:58.407523
2014-02-06T00:34:18
2014-02-06T00:34:18
null
0
0
null
null
null
null
UTF-8
Python
false
false
174
py
''' Main driver for Boltzmann machine ''' import logger import os import re import sys def main(argv=None): pass if '__name__' == '__main__': sys.exit(main())
[ "roula.oregan@gmail.com" ]
roula.oregan@gmail.com
2e9630b46c62bf6ed75120e758ba48e9ba4e9aa3
2f86ac5ea6b2781c30a031f8e9bb02ccbe4bac57
/ch05_external_testing_tools_calculator/test.py
ea55796af7459989924e7bc802d143f5f01b570d
[]
no_license
KatharinaWiedmann/module3_Business_Python
183945bd0017c15f38b5d800fb89a8361bae6860
fbe4b8ab0a903ea3a713a5f6b79b9dba7cce94c4
refs/heads/master
2020-04-18T03:21:54.050910
2019-02-07T12:16:51
2019-02-07T12:16:51
167,195,519
0
0
null
null
null
null
UTF-8
Python
false
false
942
py
# -*- coding: utf-8 -*- """ Created on Wed Jan 30 09:55:46 2019 @author: Katharina """ import unittest from calculator import Calculator class TddInPythonExample(unittest.TestCase): def setUp(self): self.calc = Calculator() def test_calculator_add_method_returns_correct_result(self): # result = self.calc.add(2,2) # self.assertEqual(4, result) # shorter version: self.assertEqual(self.calc.add(2,2), 4) def test_calculator_returns_error_message_if_both_args_no_numbers(self): self.assertRaises(ValueError, self.calc.add, 'two', 'three') def test_calculator_returns_error_message_if_x_not_number(self): self.assertRaises(ValueError, self.calc.add, 'two', 3) def test_calculator_returns_error_message_if_y_not_number(self): self.assertRaises(ValueError, self.calc.add, 2, 'three') if __name__ == '__main__': unittest.main()
[ "katie.wiedmann@gmx.de" ]
katie.wiedmann@gmx.de
3884eb54e7e03a3ef48250ac38e73501f51b3ad0
55b3948a9f3b9ecc55800ee20f703693057d4577
/code47.py
d168af513bc50186ca75f6fc67ad6ff413de98ee
[]
no_license
bommankondapraveenkumar/PYWORK
31b1c4edfb3e34a7f4103435f77a25814623b891
099bc260b80b1d724d46b714df8c931e037ee420
refs/heads/main
2023-01-07T16:52:25.915269
2020-11-11T06:40:16
2020-11-11T06:40:16
311,883,307
0
0
null
null
null
null
UTF-8
Python
false
false
1,282
py
def horoscope(): M=input("enter the month and date :\n") S=M.split() month=S[0] day=int(S[1]) print("YOUR ZODIAC SIGN IS:") if(month=="december"): if(day>21): print("Capricorn") else: print("sagittarius") elif(month=="january"): if(day>19): print("aquarius") else: print("capricorn") elif(month=="february"): if(day>19): print("Pisces") else: print("aquarius") elif(month=="march"): if(day>20): print("Aries") else: print("Pisces") elif(month=="april"): if(day>19): print("Taurus") else: print("Aries") elif(month=="may"): if(day>20): print("Gemini") else: print("Taurus") elif(month=="june"): if(day>20): print("cancer") else: print("Gemini") elif(month=="july"): if(day>22): print("Leo") else: print("cancer") elif(month=="august"): if(day>22): print("Virgo") else: print("Leo") elif(month=="september"): if(day>22): print("Libra") else: print("Virgo") elif(month=="october"): if(day>22): print("Scorpio") else: print("Lobra") elif(month=="november"): if(day>21): print("Sagittarius") else: print("Scorpio") else: print("please enter a valid month and date") horoscope()
[ "noreply@github.com" ]
bommankondapraveenkumar.noreply@github.com
4fe810f0f0f672f8136173ab9c58da8afa0a8929
a986754144d9f1db1ce5ac6d86c164ae1667ed3e
/cuenta/migrations/0002_auto_20210207_0918.py
83d183b5e01b44efee8e7e6c538f4a1bf01f5032
[]
no_license
mateo9516/ChatPublicoDjango
4b2bcc7eb75ed7fb5a73ab6927bdd2c11bbdc376
746c13a3ff48cf69bd3ff1d1f9ea9b24a4e909b0
refs/heads/master
2023-03-05T01:28:03.678399
2021-02-07T17:43:27
2021-02-07T17:43:27
336,840,304
1
0
null
null
null
null
UTF-8
Python
false
false
694
py
# Generated by Django 2.2.15 on 2021-02-07 14:18 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('cuenta', '0001_initial'), ] operations = [ migrations.AddField( model_name='cuenta', name='is_admin', field=models.BooleanField(default=False), ), migrations.AddField( model_name='cuenta', name='is_staff', field=models.BooleanField(default=False), ), migrations.AddField( model_name='cuenta', name='is_superuser', field=models.BooleanField(default=False), ), ]
[ "mateo.echeverry96@gmail.com" ]
mateo.echeverry96@gmail.com
69a73772a221b1d1fc46f63870acf9ab7b9d268f
76a269c93a79b156240d9a2568bd2eee7258622f
/naive_bayes.py
28a6c01619e26c4273617271c4be1ed825789eb7
[]
no_license
omarn33/Spam-Email-Classifier
f6bfeb3e1c66363b49af086004c42bb0d6c4ef2c
3b52c4fa7dbf45bd1aeabb9fb51183c92af2628b
refs/heads/master
2023-06-28T10:20:06.620304
2021-08-03T04:47:33
2021-08-03T04:47:33
392,191,798
1
0
null
null
null
null
UTF-8
Python
false
false
7,132
py
# naive_bayes.py # --------------- # Licensing Information: You are free to use or extend this projects for # educational purposes provided that (1) you do not distribute or publish # solutions, (2) you retain this notice, and (3) you provide clear # attribution to the University of Illinois at Urbana-Champaign # # Created by Justin Lizama (jlizama2@illinois.edu) on 09/28/2018 import numpy as np from collections import Counter """ This is the main entry point for Part 1 of this MP. You should only modify code within this file for Part 1 -- the unrevised staff files will be used for all other files and classes when code is run, so be careful to not modify anything else. """ def naiveBayes(train_set, train_labels, dev_set, smoothing_parameter, pos_prior): """ train_set - List of list of words corresponding with each email example: suppose I had two emails 'like this movie' and 'i fall asleep' in my training set Then train_set := [['like','this','movie'], ['i','fall','asleep']] train_labels - List of labels corresponding with train_set example: Suppose I had two emails, first one was ham and second one was spam. Then train_labels := [1, 0] dev_set - List of list of words corresponding with each email that we are testing on It follows the same format as train_set smoothing_parameter - The smoothing parameter --laplace (1.0 by default) pos_prior - positive prior probability (between 0 and 1) """ # *----Train Model----* # Initialize Counters to store the frequency of every word in Ham/Spam emails ham_word_counter = Counter() spam_word_counter = Counter() # Initialize dictionaries to store the probability of each word Ham/Spam emails ham_word_probability = {} spam_word_probability = {} # Initialize a list to store the predicted development set labels dev_labels = [] # Populate the frequency of every word in Ham/Spam emails index = 0 for label in train_labels: if label == 1: ham_word_counter.update(train_set[index]) else: spam_word_counter.update(train_set[index]) index += 1 # Display frequency print("Ham Word Counter:") print(ham_word_counter.most_common(10)) print() print("Spam Word Counter:") print(spam_word_counter.most_common(10)) print() # Determine the total number of words in the Ham/Spam training email set ham_total_words = 0 for word_frequency in ham_word_counter.values(): ham_total_words += word_frequency spam_total_words = 0 for word_frequency in spam_word_counter.values(): spam_total_words += word_frequency # Display totals BEFORE Laplace smoothing print("Total Number of Words in Ham Emails BEFORE Laplace:") print(ham_total_words) print() print("Total Number of Words in Spam Emails BEFORE Laplace:") print(spam_total_words) print() # Add the words present in the developer set but absent in the ham set to the counter with a frequency of zero for email in range(len(dev_set)): for word in dev_set[email]: if word not in ham_word_counter: ham_word_counter.update([word]) ham_word_counter.subtract([word]) # Add the words present in the developer set but absent in the spam set to the counter with a frequency of zero for email in range(len(dev_set)): for word in dev_set[email]: if word not in spam_word_counter: spam_word_counter.update([word]) spam_word_counter.subtract([word]) # Display the ham counter after the addition of words with zero frequency ham_word_counter_length = len(ham_word_counter) print("Smallest Ham Word Frequency:") print(ham_word_counter[ham_word_counter_length - 1]) print() # Display the spam counter after the addition of words with zero frequency spam_word_counter_length = len(spam_word_counter) print("Smallest Spam Word Frequency:") print(spam_word_counter[spam_word_counter_length - 1]) print() # Copy ham word counter content into ham word probability dictionary ham_word_probability = ham_word_counter.copy() # Copy spam word counter content into spam word probability dictionary spam_word_probability = spam_word_counter.copy() # Display dictionaries before the addition of the Laplace smoothing constant print("Ham Word Probability BEFORE Laplace:") # print(ham_word_probability) print() print("Spam Word Probability BEFORE Laplace:") # print(spam_word_probability) print() # Apply Laplace smoothing for word in ham_word_probability: ham_word_probability[word] += smoothing_parameter for word in spam_word_probability: spam_word_probability[word] += smoothing_parameter # Display the dictionaries after the addition of the Laplace smoothing constant print("Laplace Constant:") print(smoothing_parameter) print() print("Ham Word Probability AFTER Laplace:") # print(ham_word_probability) print() print("Spam Word Probability AFTER Laplace:") # print(spam_word_probability) print() # Determine the total number of words after Laplace smoothing ham_word_total = sum(ham_word_probability.values()) spam_word_total = sum(spam_word_probability.values()) # Display totals AFTER Laplace smoothing print("Total Number of Words in Ham Emails AFTER Laplace:") print(ham_word_total) print() print("Total Number of Words in Spam Emails AFTER Laplace:") print(spam_word_total) print() # Determine each word's likelihood in ham/spam emails (logging the probabilities to avoid underflow) for word in ham_word_probability: ham_word_probability[word] = np.log((ham_word_probability[word]) / ham_word_total) for word in spam_word_probability: spam_word_probability[word] = np.log((spam_word_probability[word]) / spam_word_total) # Determine likelihood of ham/spam prior [i.e: log(P(Ham)) and log(P(Spam))] likelihood_of_ham = np.log(pos_prior) likelihood_of_spam = np.log(1.0 - pos_prior) # *----Test Model----* likelihood_email_is_ham = likelihood_of_ham likelihood_email_is_spam = likelihood_of_spam for email in range(len(dev_set)): # Based on the words in a given email, determine the likelihood the email is ham and spam for word in dev_set[email]: likelihood_email_is_ham += ham_word_probability[word] likelihood_email_is_spam += spam_word_probability[word] # Classify email as ham or spam based on likelihood value if likelihood_email_is_ham > likelihood_email_is_spam: dev_labels.append(1) else: dev_labels.append(0) # Reset likelihoods to initial values likelihood_email_is_ham = likelihood_of_ham likelihood_email_is_spam = likelihood_of_spam print("Development Labels:") print(dev_labels) print() # return predicted labels of development set return dev_labels
[ "omarnaeem333@gmail.com" ]
omarnaeem333@gmail.com
0704b14dc207bdeb9c69726cce59cb935ea707cc
b4752cce5d753784c4eb9c742079da6b9df50ab3
/news_aggregator_api/save_data.py
94fe61a818410e9c72dd9bddb4623d699044a6ac
[]
no_license
verain1/Conzu
105458b33719f8e8304d25a74a06c8fd546b5693
d61441db2af3d05c3b8cbbd01336b3dfc49f9f9f
refs/heads/main
2023-07-25T08:20:13.186590
2021-08-13T19:14:59
2021-08-13T19:14:59
395,057,912
0
0
null
null
null
null
UTF-8
Python
false
false
567
py
from ndtv_scraper import get_ndtv_data # ndtv scraper from toi_scraper import get_toi_data # toi scraper import pandas as pd import numpy as np import os import random data1 = get_ndtv_data() data2 = get_toi_data() full_data = data1 + data2 full_data = pd.DataFrame(full_data) full_data = full_data.dropna() filtered_data = full_data.iloc[0::4] linux_path = '/home/ansh/' windows_path = 'C:/news_1/' #os.system('cd ..') full_data.to_csv(linux_path+'news_aggregator/articles.csv') filtered_data.to_csv(linux_path+'news_aggregator/filtered.csv') print(full_data)
[ "anshchadha9211@gmail.com" ]
anshchadha9211@gmail.com
b609de5a340b8ffa5463bb61550b269014cc97d8
673d55fe4fee15b4047bf0248f5ab5b8a72e3907
/face_recognition/face_recognizer_opencv.py
98e5ed9efda71000ebb7c22af5f5f0330e202bd7
[]
no_license
manuel-lang/BlackForestHackathon
bfb08e66f59144792c66bd116976037eb0000c51
d521d553974b1533f567f1e63f50f3f633022e1b
refs/heads/master
2021-07-12T03:16:46.823216
2017-10-09T10:07:58
2017-10-09T10:07:58
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,624
py
import cv2 import os import numpy as np lbph_rec = cv2.face.LBPHFaceRecognizer_create() subjects = ["", "Manuel Lang", "Marius Bauer", "Tobias Oehler", "Jerome Klausmann"] def detect_faces(img): gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) face_cascade = cv2.CascadeClassifier('lbpcascade_frontalface.xml') faces = face_cascade.detectMultiScale(gray, scaleFactor=1.2, minNeighbors=5); if (len(faces) == 0): return None, None val = [] for face in faces: (x, y, w, h) = face val.append(tuple((gray[y:y+w, x:x+h], face))) cv2.imshow("test", gray[y:y+w, x:x+h]) return np.asarray(val) def prepare_training_data(data_folder_path): dirs = os.listdir(data_folder_path) faces = [] labels = [] for dir_name in dirs: if not dir_name.startswith("s"): continue; label = int(dir_name.replace("s", "")) subject_dir_path = os.path.join(data_folder_path, dir_name) subject_images_names = os.listdir(subject_dir_path) for image_name in subject_images_names: if image_name.startswith("."): continue; image_path = os.path.join(subject_dir_path, image_name) image = cv2.imread(image_path) cv2.imshow("Training on image...", cv2.resize(image, (400, 500))) cv2.waitKey(100) for val in detect_faces(image): if val is None: continue face, rect = val if face is not None: faces.append(face) labels.append(label) return faces, labels def draw_rectangle(img, rect): (x, y, w, h) = rect cv2.rectangle(img, (x, y), (x+w, y+h), (0, 255, 0), 2) def draw_text(img, text, x, y): cv2.putText(img, text, (x, y), cv2.FONT_HERSHEY_PLAIN, 1.5, (0, 255, 0), 2) def predict(test_img): img = test_img.copy() for val in detect_faces(img): if val is None: continue face, rect = val label = lbph_rec.predict(face) label_text = subjects[label[0]] draw_rectangle(img, rect) draw_text(img, label_text, rect[0], rect[1]-5) return img def train(): faces, labels = prepare_training_data("training") print("Training classifier ...") lbph_rec.train(faces, np.array(labels)) print("Finished training ...") def test(): img = cv2.imread('test/2.jpg') img1 = cv2.imread('test/jerome.jpg') img2 = cv2.imread('test/tobias.jpg') img3 = cv2.imread('test/marius.jpg') img4 = cv2.imread('test/manu.jpg') cv2.imshow('detection', predict(img)) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imshow('detection-jerome', predict(img1)) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imshow('detection-tobias', predict(img2)) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imshow('detection-marius', predict(img3)) cv2.waitKey(0) cv2.destroyAllWindows() cv2.imshow('detection-manu', predict(img4)) cv2.waitKey(0) cv2.destroyAllWindows() def show_webcam(mirror=False): cam = cv2.VideoCapture(0) while True: ret_val, img = cam.read() if not img is None: if not ret_val: continue if mirror: img = cv2.flip(img, 1) try: cv2.imshow('detection', predict(img)) except: cv2.imshow('detection', img) if cv2.waitKey(1) == 27: break cv2.destroyAllWindows() def main(): train() test() #show_webcam(mirror=False) if __name__ == '__main__': main()
[ "manuellang183@gmail.com" ]
manuellang183@gmail.com
58df714142bc8b34b29e30a57f33a9a9cdc9faf6
6ca4a9f5483c754d12cecca3263bdf798a1d3447
/src/cleaning.py
ab72ec613cfb0b06fb3856da74de64357bba8b6c
[]
no_license
Esaslow/ChurnCaseStudy
712c37ab13a0c2a9cc2ba1071a5d48d2db665376
f510f64525ad1b20584e630773376bd233ce96f6
refs/heads/master
2021-04-18T21:30:01.159227
2018-04-04T05:50:38
2018-04-04T05:50:38
126,740,944
1
0
null
null
null
null
UTF-8
Python
false
false
6,312
py
import pandas as pd import numpy as np from datetime import timedelta import matplotlib.pyplot as plt from importlib import reload from src import cleaning as C from sklearn.preprocessing import StandardScaler def clean_city(df): """ Input: DataFrame New columns, the 'city' column to dummy values New column, the 'city' column converted to integers Output: DataFrame """ working_df = df.copy() # Duplcate, sacrifical 'city' column working_df['raw_city'] = working_df['city'] # Make Dummy Columns new_df = pd.get_dummies(working_df, columns=['raw_city'], drop_first=True) # Rename dummy columns new_df_names = new_df.rename(index=str, \ columns={"raw_city_King's Landing": "kings_landing", "raw_city_Winterfell": "winterfell"}) # Create column of city names mapped to numerical categories new_df_names['city_categories'] = \ new_df_names['city'].map({'Astapor':1, 'Winterfell':2, "King's Landing":3}) return new_df_names def add_target(df): ''' INPUTS: df = data frame with col for last trip data that has last date as a pandas date time object ------------------- OUTPUTS: df = data frame with col added called within_last 30 days Returns 1 if last ride was greater than 30 days away Returns 0 if last ride was less than 30 days 1 => CHURN 0 => NO CHURN ''' working_df = df.copy() latest = max(working_df['last_trip_date']) Last_trip = (latest - working_df['last_trip_date']) within_last_30 = (Last_trip > timedelta(days = 30)) * 1 working_df['within_last_30'] = within_last_30 working_df['within_last_60'] = working_df['within_last_30'] working_df.loc[Last_trip > timedelta(days = 60),'within_last_60'] = 2 return working_df def read_data(file_path): ''' INPUTS: filepath: tells where the data is located in reference to the current directory OUTPUTS: Data frame that has the last trip date parsed for the last ride date and the signup date ''' df = pd.read_csv(file_path,parse_dates= ['last_trip_date','signup_date']) return df def clean_rtg_of_driver(df): ''' Cleaning the 'rtg_of_driver' column and creating 3 new columns: 1. Column where we replace all np.nan to the median. 2. Column where we replace all np.nan to the mode. 3. Column where we replace all np.nan to the mean. 4. Column where we create a scaled version of original while replacing all np.nan to median. ''' df_copy = df.copy() # Create column replacing np.nan to median. median = df_copy.avg_rating_of_driver.median() df_copy['avg_rating_of_driver_median'] = df_copy.avg_rating_of_driver.fillna(median) # Create column replacing np.nan to mode. mode = df_copy.avg_rating_of_driver.mode()[0] df_copy['avg_rating_of_driver_mode'] = df_copy.avg_rating_of_driver.fillna(mode) # Create column replacing np.nan to mean. mean = df_copy.avg_rating_of_driver.mean() df_copy['avg_rating_of_driver_mean'] = df_copy.avg_rating_of_driver.fillna(mean) # Normalized column based off median size = df_copy['avg_rating_of_driver_median'].shape[0] scaler = StandardScaler() df_copy['avg_rating_of_driver_normalized'] = (scaler.fit_transform(df_copy['avg_rating_of_driver_median'] .values.reshape(size,1))) return df_copy def cleaning_avg_rating_by_driver(df): #make a copy of the dataframe df_copy = df.copy() # filling in Nans with column median rating_by_driver = df_copy['avg_rating_by_driver'] median = df_copy['avg_rating_by_driver'].median() rating_by_driver_median = rating_by_driver.fillna(median) #create cleaned column df_copy['rating_by_driver_median'] = rating_by_driver_median # Normalized column based off median size = df_copy['rating_by_driver_median'].shape[0] scaler = StandardScaler() #scaler.fit df_copy['rating_by_driver_median_normalized'] = scaler.fit_transform(df_copy['rating_by_driver_median'].values.reshape(size,1)) return df_copy def clean_luxury_user(df): working_df = df.copy() ludf = working_df['luxury_car_user'] num_ludf = ludf*1 working_df['num_Luxury_User'] = num_ludf return working_df def remove_july(df): working_df = df.copy() working_df = working_df.loc[working_df['last_trip_date'].dt.month != 7,:] return working_df def plot_(df,target,ax): ax[0].hist(df.trips_in_first_30_days[target == 0],bins = list(np.linspace(0,20,50)),alpha = .6,label = 'no churn',normed = 1); ax[0].hist(df.trips_in_first_30_days[target == 1],bins = list(np.linspace(0,20,50)),alpha = .6,label = '30 day churn',normed = 1); ax[0].set_xlim([-1,20]) ax[0].legend() ax[0].set_xlabel('Number of rides in the First 30 days') ax[0].set_ylabel('Normalized Count'); ax[0].grid(alpha = .2,color = 'r',linestyle = '--') ax[0].set_title('Number of rides in First 30 days hist') ax[1].hist(df.weekday_pct[target == 0],alpha = .6,label = 'no churn',normed = 1); ax[1].hist(df.weekday_pct[target == 1],alpha = .6,label = '30 day churn',normed = 1); ax[1].set_xlim([-1,110]) ax[1].legend() ax[1].set_xlabel('Week day Percent') ax[1].set_title('Weekday Percent hist') ax[1].grid(alpha = .2,color = 'r',linestyle = '--') ax[2].hist(df.surge_pct[target == 0],alpha = .6,label = 'no churn',normed = 1); ax[2].hist(df.surge_pct[target == 1],alpha = .6,label = '30 day churn',normed = 1); ax[2].set_xlim([-1,110]) ax[2].legend() ax[2].set_xlabel('Surge Percent') ax[2].set_title('Surge Percent hist') ax[2].grid(alpha = .2,color = 'r',linestyle = '--') ax[3].hist(df.avg_dist[target == 0],bins = list(np.linspace(0,60,40)),alpha = .6,label = 'no churn',normed = 1); ax[3].hist(df.avg_dist[target == 1],bins = list(np.linspace(0,60,40)),alpha = .6,label = '30 day churn',normed = 1); ax[3].set_xlim([-1,40]) ax[3].legend() ax[3].set_xlabel('Avg Distance') ax[3].set_title('Average Distance hist') ax[3].grid(alpha = .2,color = 'r',linestyle = '--') return ax
[ "Elsa7762@colorado.edu" ]
Elsa7762@colorado.edu
ebfb29af4611b4bc8dec9a2d065d6577a3201c0f
db70c979a9d1002cb2dfe3ea7028957402782fd8
/tests/test_success_range_below_equal.py
55f9e2d0ee784cc8aee547cc5960c654ad221f1f
[ "MIT" ]
permissive
Bernardo-MG/wargame_analysis_jupyter_notebook
739d94b697bf103d0c563d4dcedc9e0fb1890606
db13838ce0f8c6dcbc160259c1ee0ae258b51ba7
refs/heads/master
2022-12-15T12:15:32.798807
2020-08-26T06:38:27
2020-08-26T06:38:27
289,078,376
0
0
MIT
2020-08-26T06:38:28
2020-08-20T18:20:02
Python
UTF-8
Python
false
false
8,937
py
# -*- coding: utf-8 -*- import unittest from decimal import Decimal from scripts.probability import roll_success_range """ Max shots script tests. """ __author__ = 'Bernardo Martínez Garrido' __license__ = 'MIT' class TestZeroToTenBelowEqual(unittest.TestCase): """ Tests the chance to go above with the range [0,10]. """ def test_goal_0(self): chance = roll_success_range(0, 10, 0, above=False, equal=True) self.assertEqual({"min": 0, "max": 0}, chance) def test_goal_1(self): chance = roll_success_range(0, 10, 1, above=False, equal=True) self.assertEqual({"min": 0, "max": 1}, chance) def test_goal_2(self): chance = roll_success_range(0, 10, 2, above=False, equal=True) self.assertEqual({"min": 0, "max": 2}, chance) def test_goal_3(self): chance = roll_success_range(0, 10, 3, above=False, equal=True) self.assertEqual({"min": 0, "max": 3}, chance) def test_goal_4(self): chance = roll_success_range(0, 10, 4, above=False, equal=True) self.assertEqual({"min": 0, "max": 4}, chance) def test_goal_5(self): chance = roll_success_range(0, 10, 5, above=False, equal=True) self.assertEqual({"min": 0, "max": 5}, chance) def test_goal_6(self): chance = roll_success_range(0, 10, 6, above=False, equal=True) self.assertEqual({"min": 0, "max": 6}, chance) def test_goal_7(self): chance = roll_success_range(0, 10, 7, above=False, equal=True) self.assertEqual({"min": 0, "max": 7}, chance) def test_goal_8(self): chance = roll_success_range(0, 10, 8, above=False, equal=True) self.assertEqual({"min": 0, "max": 8}, chance) def test_goal_9(self): chance = roll_success_range(0, 10, 9, above=False, equal=True) self.assertEqual({"min": 0, "max": 9}, chance) def test_goal_10(self): chance = roll_success_range(0, 10, 10, above=False, equal=True) self.assertEqual({"min": 0, "max": 10}, chance) def test_goal_above_max(self): chance = roll_success_range(0, 10, 20, above=False, equal=True) self.assertEqual({"min": 0, "max": 10}, chance) def test_goal_below_min(self): chance = roll_success_range(0, 10, -1, above=False, equal=True) self.assertEqual(None, chance) class TestOneToTenBelowEqual(unittest.TestCase): """ Tests the chance to go above with the range [1,10]. """ def test_goal_0(self): chance = roll_success_range(1, 10, 0, above=False, equal=True) self.assertEqual(None, chance) def test_goal_1(self): chance = roll_success_range(1, 10, 1, above=False, equal=True) self.assertEqual({"min": 1, "max": 1}, chance) def test_goal_2(self): chance = roll_success_range(1, 10, 2, above=False, equal=True) self.assertEqual({"min": 1, "max": 2}, chance) def test_goal_3(self): chance = roll_success_range(1, 10, 3, above=False, equal=True) self.assertEqual({"min": 1, "max": 3}, chance) def test_goal_4(self): chance = roll_success_range(1, 10, 4, above=False, equal=True) self.assertEqual({"min": 1, "max": 4}, chance) def test_goal_5(self): chance = roll_success_range(1, 10, 5, above=False, equal=True) self.assertEqual({"min": 1, "max": 5}, chance) def test_goal_6(self): chance = roll_success_range(1, 10, 6, above=False, equal=True) self.assertEqual({"min": 1, "max": 6}, chance) def test_goal_7(self): chance = roll_success_range(1, 10, 7, above=False, equal=True) self.assertEqual({"min": 1, "max": 7}, chance) def test_goal_8(self): chance = roll_success_range(1, 10, 8, above=False, equal=True) self.assertEqual({"min": 1, "max": 8}, chance) def test_goal_9(self): chance = roll_success_range(1, 10, 9, above=False, equal=True) self.assertEqual({"min": 1, "max": 9}, chance) def test_goal_10(self): chance = roll_success_range(1, 10, 10, above=False, equal=True) self.assertEqual({"min": 1, "max": 10}, chance) def test_goal_above_max(self): chance = roll_success_range(1, 10, 20, above=False, equal=True) self.assertEqual({"min": 1, "max": 10}, chance) def test_goal_below_min(self): chance = roll_success_range(1, 10, -1, above=False, equal=True) self.assertEqual(None, chance) class TestTenToOneHundredBelowEqual(unittest.TestCase): """ Tests the chance to go above with the range [10,100]. """ def test_no_goal(self): chance = roll_success_range(10, 100, 0, above=False, equal=True) self.assertEqual(None, chance) def test_goal_at_max(self): chance = roll_success_range(10, 100, 100, above=False, equal=True) self.assertEqual({"min": 10, "max": 100}, chance) def test_goal_at_min(self): chance = roll_success_range(10, 100, 10, above=False, equal=True) self.assertEqual({"min": 10, "max": 10}, chance) def test_goal_at_middle(self): chance = roll_success_range(10, 100, 50, above=False, equal=True) self.assertEqual({"min": 10, "max": 50}, chance) def test_goal_close_to_max(self): chance = roll_success_range(10, 100, 80, above=False, equal=True) self.assertEqual({"min": 10, "max": 80}, chance) def test_goal_above_max(self): chance = roll_success_range(10, 100, 200, above=False, equal=True) self.assertEqual({"min": 10, "max": 100}, chance) def test_goal_just_below_middle(self): chance = roll_success_range(10, 100, 40, above=False, equal=True) self.assertEqual({"min": 10, "max": 40}, chance) def test_goal_just_below_max(self): chance = roll_success_range(10, 100, 90, above=False, equal=True) self.assertEqual({"min": 10, "max": 90}, chance) def test_goal_below_min(self): chance = roll_success_range(10, 100, 5, above=False, equal=True) self.assertEqual(None, chance) def test_goal_just_below_min(self): chance = roll_success_range(10, 100, 9, above=False, equal=True) self.assertEqual(None, chance) class Test1d6BelowEqual(unittest.TestCase): """ Tests the chance to go above with the range [1,6], which is the range of a six sides die. """ def test_no_goal(self): chance = roll_success_range(1, 6, 0, above=False, equal=True) self.assertEqual(None, chance) def test_goal_above_max(self): chance = roll_success_range(1, 6, 10, above=False, equal=True) self.assertEqual({"min": 1, "max": 6}, chance) def test_goal_1(self): chance = roll_success_range(1, 6, 1, above=False, equal=True) self.assertEqual({"min": 1, "max": 1}, chance) def test_goal_2(self): chance = roll_success_range(1, 6, 2, above=False, equal=True) self.assertEqual({"min": 1, "max": 2}, chance) def test_goal_3(self): chance = roll_success_range(1, 6, 3, above=False, equal=True) self.assertEqual({"min": 1, "max": 3}, chance) def test_goal_4(self): chance = roll_success_range(1, 6, 4, above=False, equal=True) self.assertEqual({"min": 1, "max": 4}, chance) def test_goal_5(self): chance = roll_success_range(1, 6, 5, above=False, equal=True) self.assertEqual({"min": 1, "max": 5}, chance) def test_goal_6(self): chance = roll_success_range(1, 6, 6, above=False, equal=True) self.assertEqual({"min": 1, "max": 6}, chance) class Test1d6Norm0BelowEqual(unittest.TestCase): """ Tests the chance to go above with the range [1,6], which is the range of a six sides die. """ def test_goal_above_max(self): chance = roll_success_range(0, 5, 6, above=False, equal=True) self.assertEqual({"min": 0, "max": 5}, chance) def test_goal_0(self): chance = roll_success_range(0, 5, 0, above=False, equal=True) self.assertEqual({"min": 0, "max": 0}, chance) def test_goal_1(self): chance = roll_success_range(0, 5, 1, above=False, equal=True) self.assertEqual({"min": 0, "max": 1}, chance) def test_goal_2(self): chance = roll_success_range(0, 5, 2, above=False, equal=True) self.assertEqual({"min": 0, "max": 2}, chance) def test_goal_3(self): chance = roll_success_range(0, 5, 3, above=False, equal=True) self.assertEqual({"min": 0, "max": 3}, chance) def test_goal_4(self): chance = roll_success_range(0, 5, 4, above=False, equal=True) self.assertEqual({"min": 0, "max": 4}, chance) def test_goal_5(self): chance = roll_success_range(0, 5, 5, above=False, equal=True) self.assertEqual({"min": 0, "max": 5}, chance)
[ "programming@bernardomg.com" ]
programming@bernardomg.com
2e9784d9f5133d131dcf95aad42a8e25daf9771b
9f97c42310f47505eda2b5d6be28294dee7f0f15
/test/functional/wallet_import_with_label.py
4e5aebfaae9ccc25061c1b6fdfc3a6030877d25e
[ "MIT" ]
permissive
Madurajaya/cicoin
b7bc3cd65ef665e8c23d6787bb732d211b46e4f3
b48b11574ae38ae063670a755b9d50ef6960e1e8
refs/heads/master
2022-04-13T21:04:57.846103
2020-04-01T05:30:32
2020-04-01T05:30:32
296,742,986
1
0
MIT
2020-09-18T22:37:12
2020-09-18T22:37:12
null
UTF-8
Python
false
false
4,903
py
#!/usr/bin/env python3 # Copyright (c) 2018 The Cicoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the behavior of RPC importprivkey on set and unset labels of addresses. It tests different cases in which an address is imported with importaddress with or without a label and then its private key is imported with importprivkey with and without a label. """ from test_framework.test_framework import CicoinTestFramework from test_framework.wallet_util import test_address class ImportWithLabel(CicoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True def skip_test_if_missing_module(self): self.skip_if_no_wallet() def run_test(self): """Main test logic""" self.log.info( "Test importaddress with label and importprivkey without label." ) self.log.info("Import a watch-only address with a label.") address = self.nodes[0].getnewaddress() label = "Test Label" self.nodes[1].importaddress(address, label) test_address(self.nodes[1], address, iswatchonly=True, ismine=False, label=label) self.log.info( "Import the watch-only address's private key without a " "label and the address should keep its label." ) priv_key = self.nodes[0].dumpprivkey(address) self.nodes[1].importprivkey(priv_key) test_address(self.nodes[1], address, label=label) self.log.info( "Test importaddress without label and importprivkey with label." ) self.log.info("Import a watch-only address without a label.") address2 = self.nodes[0].getnewaddress() self.nodes[1].importaddress(address2) test_address(self.nodes[1], address2, iswatchonly=True, ismine=False, label="") self.log.info( "Import the watch-only address's private key with a " "label and the address should have its label updated." ) priv_key2 = self.nodes[0].dumpprivkey(address2) label2 = "Test Label 2" self.nodes[1].importprivkey(priv_key2, label2) test_address(self.nodes[1], address2, label=label2) self.log.info("Test importaddress with label and importprivkey with label.") self.log.info("Import a watch-only address with a label.") address3 = self.nodes[0].getnewaddress() label3_addr = "Test Label 3 for importaddress" self.nodes[1].importaddress(address3, label3_addr) test_address(self.nodes[1], address3, iswatchonly=True, ismine=False, label=label3_addr) self.log.info( "Import the watch-only address's private key with a " "label and the address should have its label updated." ) priv_key3 = self.nodes[0].dumpprivkey(address3) label3_priv = "Test Label 3 for importprivkey" self.nodes[1].importprivkey(priv_key3, label3_priv) test_address(self.nodes[1], address3, label=label3_priv) self.log.info( "Test importprivkey won't label new dests with the same " "label as others labeled dests for the same key." ) self.log.info("Import a watch-only p2sh-segwit address with a label.") address4 = self.nodes[0].getnewaddress("", "p2sh-segwit") label4_addr = "Test Label 4 for importaddress" self.nodes[1].importaddress(address4, label4_addr) test_address(self.nodes[1], address4, iswatchonly=True, ismine=False, label=label4_addr, embedded=None) self.log.info( "Import the watch-only address's private key without a " "label and new destinations for the key should have an " "empty label while the 'old' destination should keep " "its label." ) priv_key4 = self.nodes[0].dumpprivkey(address4) self.nodes[1].importprivkey(priv_key4) embedded_addr = self.nodes[1].getaddressinfo(address4)['embedded']['address'] test_address(self.nodes[1], embedded_addr, label="") test_address(self.nodes[1], address4, label=label4_addr) self.stop_nodes() if __name__ == "__main__": ImportWithLabel().main()
[ "cicxcoin2@gmail.com" ]
cicxcoin2@gmail.com
c913f8fbfe5cfedb9004bb0dd5b99c11a599285b
485ffbd9a08f72a4ecae63d1695fb82dccc6f195
/tm/tw3.py
0b12ecf535fc659e7a489babd72ce638c6387c22
[]
no_license
ka9epedia/test
77850d64ae2dc6c1032deebaf43f11b87276da2e
02b9c43335fc058b9fda936c2b119614c99eb7df
refs/heads/master
2020-04-17T07:36:12.112061
2019-01-18T09:18:07
2019-01-18T09:18:07
154,784,187
0
0
null
null
null
null
UTF-8
Python
false
false
14,522
py
# coding: utf-8 from requests_oauthlib import OAuth1Session from requests.exceptions import ConnectionError, ReadTimeout, SSLError import json, datetime, time, pytz, re, sys, traceback, pymongo from pymongo import MongoClient from collections import defaultdict from pprint import pprint import numpy as np import unicodedata import MeCab as mc import collections import json KEYS = { 'consumer_key': 'U6OCU525mGe27DntCYQnIlp70', 'consumer_secret': 'mZeQ8HdILVbnZB3lRQJht1T8gB7yKmQMnJkkUMLGoLtDHvr6Qn', 'access_token': '875272026281332737-nrx6TzruwZs7Pge90SXaAD89bxAbRoF', 'access_secret': 'wxSlu6NaXEhYpst7SeHL2fJLAh0a5McWzfL0zq6LLTbWg' } twitter = None connect = None db = None tweetdata = None meta = None freqwords = {} freqpair = {} max = 0 noun_score = 0 verb_score = 0 adjective_score = 0 noun_score_c, adjective_score_c, verb_score_c, adverb_score_c = 0, 0, 0, 0 # TwitterAPI, MongoDBへの接続設定 def initialize(): global twitter, connect, db, tweetdata, meta twitter = OAuth1Session(KEYS['consumer_key'], KEYS['consumer_secret'], KEYS['access_token'], KEYS['access_secret']) connect = MongoClient('localhost', 27017) db = connect.okymrestaurant #db = connect.anal1 tweetdata = db.tweetdata meta = db.metadata initialize() # 感情辞書によるポジネガ分析の前段処理 noun_words, adjective_words, verb_words, adverb_words = [], [], [], [] noun_point, adjective_point, verb_point, adverb_point = [], [], [], [] pn = open('/home/odalab/Desktop/kankou/tm/pn_corpus/pn_ja.dic.txt', 'r') positive_weight = 44861.0 / 49963.0 #1.0 negative_weight = 5122.0 / 49983.0 for line in pn: line = line.rstrip() x = line.split(':') if abs(float(x[3])) > 0: #ポイントの調整 if x[2] == '名詞': noun_words.append(x[0]) noun_point.append(x[3]) if x[2] == '形容詞': adjective_words.append(x[0]) adjective_point.append(x[3]) if x[2] == '動詞': verb_words.append(x[0]) verb_point.append(x[3]) if x[2] == '副詞': adverb_words.append(x[0]) adverb_point.append(x[3]) pn.close() # tweet検索 def getTweetData(search_word): global twitter url = 'https://api.twitter.com/1.1/search/tweets.json' params = { 'q': search_word, 'count': '100' } req = twitter.get(url, params = params) if req.status_code == 200: # 成功 timeline = json.loads(req.text) metadata = timeline['search_metadata'] statuses = timeline['statuses'] limit = req.headers['x-rate-limit-remaining'] if 'x-rate-limit-remaining' in req.headers else 0 reset = req.headers['x-rate-limit-reset'] if 'x-rate-limit-reset' in req.headers else 0 return { "result": True, "metadata": metadata, "statuses": statuses, "limit": limit, "reset_time": datetime.datetime.fromtimestamp(float(reset)), "reset_time_unix": reset } else: # 失敗 return { "result": False, "status_code": req.status_code } # 文字列を日本時間にタイムゾーンを合わせた日付型で返す def str_to_date_jp(str_date): dts = datetime.datetime.strptime(str_date, '%a %b %d %H:%M:%S +0000 %Y') return pytz.utc.localize(dts).astimezone(pytz.timezone('Asia/Tokyo')) # 現在時刻をUNIX時間で返す def now_unix_time(): return time.mktime(datetime.datetime.now().timetuple()) #お店情報取得 res = getTweetData(u'岡山市') if res['result'] == False: # 取得に失敗 print("Error! status code: {0:d}".format(res['status_code'])) if int(res['limit']) == 0: # API制限に達した。データはとれてきてる。 print("API制限に達したっぽい") else: print("API LIMIT:", res['limit']) if len(res['statuses']) == 0: # 例外投げる 検索結果0件 pass else: # mongoDBに入れる meta.insert({"metadata": res['metadata'], "insert_date": now_unix_time()}) for st in res['statuses']: tweetdata.insert(st) def mecab_analysis(sentence): t = mc.Tagger('-Ochasen -d /usr/local/lib/mecab/dic/mecab-ipadic-neologd/') sentence = sentence.replace('\n', ' ') text = sentence.encode('utf-8') node = t.parseToNode(text) result_dict = defaultdict(list) for i in range(140): # ツイートなのでMAX140文字 if node.surface != "": # ヘッダとフッタを除外 word_type = node.feature.split(",")[0] if word_type in ["名詞", "形容詞", "動詞"]: plain_word = node.feature.split(",")[6] if plain_word != "*": result_dict[word_type.decode('utf-8')].append(plain_word.decode('utf-8')) node = node.next if node is None: break return result_dict all_words_list = [] #全てのTweetデータに対して形態素に分けていく処理 for d in tweetdata.find({},{'_id':1, 'id':1, 'text':1, 'noun':1, 'verb':1, 'adjective':1}, no_cursor_timeout=True, timeout=False): freqwords = {} freqpair = {} max = 0 res = mecab_analysis(unicodedata.normalize('NFKC', d['text'])) # 半角カナを全角カナに words_list = [] hozon_list = {} freqp_word = [] # 品詞毎にフィールド分けして入れ込んでいく # 単語出現回数をカウント for k in res.keys(): if k == u'形容詞': # adjective adjective_list = [] for w in res[k]: words_list.append(w) all_words_list.append(w) adjective_list.append(w) words_cnt = collections.Counter(words_list) adjective_cnt = collections.Counter(adjective_list) # ポジネガ分析 s_cnt = 0 for i in adjective_words: if w == i: if adjective_point[s_cnt] >= 0: adjective_score += float(adjective_point[cnt]) * float(positive_weight) adjective_score_c = float(adjective_point[cnt]) * float(positive_weight) else: adjective_score += float(adjective_point[cnt]) * float(negative_weight) adjective_score_c = float(adjective_point[cnt]) * float(negative_weight) s_cnt += 1 #print res[k] #print w, noun_score #print w,i #print "test" #print all_words_list print str(w.encode('utf-8')), str(adjective_score) hozon_list[w] = {u'単語': words_list, u'品詞': k, u'出現頻度': words_cnt, u'ポジネガ分析結果(総和)': adjective_score, u'ポジネガ分析結果(単体)': adjective_score_c, u'共起頻度': 0} tweetdata.update({'_id' : d['_id']},{'$push': {'adjective':{'$each':adjective_list}}}) elif k == u'動詞': # verb verb_list = [] for w in res[k]: words_list.append(w) all_words_list.append(w) verb_list.append(w) words_cnt = collections.Counter(words_list) verb_cnt = collections.Counter(verb_list) # ポジネガ分析 s_cnt = 0 for i in verb_words: if w == i: if verb_point[s_cnt] >= 0: verb_score += float(verb_point[cnt]) * float(positive_weight) verb_score_c = float(verb_point[cnt]) * float(positive_weight) else: verb_score += float(verb_point[cnt]) * float(negative_weight) verb_score_c = float(verb_point[cnt]) * float(negative_weight) s_cnt += 1 #print res[k] #print w, noun_score #print w,i print str(w.encode('utf-8')), verb_score hozon_list[w] = {u'単語': words_list, u'品詞': k, u'出現頻度': words_cnt, u'ポジネガ分析結果(総和)': verb_score, u'ポジネガ分析結果(単体)': verb_score_c, u'共起頻度': 0} tweetdata.update({'_id' : d['_id']},{'$push': {'verb':{'$each':verb_list}}}) elif k == u'名詞': # noun noun_list = [] for w in res[k]: words_list.append(w) all_words_list.append(w) noun_list.append(w) words_cnt = collections.Counter(words_list) noun_cnt = collections.Counter(noun_list) # ポジネガ分析 s_cnt = 0 for i in noun_words: if w == i: if noun_point[s_cnt] >= 0: noun_score += float(noun_point[cnt]) * float(positive_weight) noun_score_c = float(noun_point[cnt]) * float(positive_weight) else: noun_score += float(noun_point[cnt]) * float(negative_weight) noun_score_c = float(noun_point[cnt]) * float(negative_weight) s_cnt += 1 #print res[k] #print w, noun_score #print w,i #no_noun += 1 print str(w.encode('utf-8')), str(noun_score) hozon_list[w] = {u'単語': words_list, u'品詞': k, u'出現頻度': words_cnt, u'ポジネガ分析結果(総和)': noun_score, u'ポジネガ分析結果(単体)': noun_score_c, u'共起頻度': 0} tweetdata.update({'_id' : d['_id']},{'$push': {'noun':{'$each':noun_list}}}) #elif k == u'副詞': # adverb # adverb_list = [] # for w in res[k]: # words_list.append(w) # adverb_list.append(w) # words_cnt = collections.Counter(words_list) # adverb_cnt = collections.Counter(adverb_list) # ポジネガ分析 # s_cnt = 0 # for i in noun_words: # if w == i: # if adverb_point[s_cnt] >= 0: # adverb_score += float(adverb_point[cnt]) * float(positive_weight) # else: # adverb_score += float(adverb_point[cnt]) * float(negative_weight) # s_cnt += 1 #print res[k] #print w, noun_score # print w,i #no_noun += 1 # tweetdata.update({'_id' : d['_id']},{'$push': {'adverb':{'$each':adverb_list}}}) # 共起単語出現回数をカウント print ("--- 共起頻度 ---") for i in range(len(words_list)): for j in range(len(freqwords)): if words_list[i] == freqwords: freqwords[words_list[i]] += 1 else: freqwords[words_list[i]] = 1 if max < freqwords[words_list[i]]: max = freqwords[words_list[i]] for j in range(i + 1, len(words_list)): if words_list[i] + "\t" + words_list[j] == freqpair: freqpair[words_list[i] + "\t" + words_list[j]] += 1 freqp_word.append(freqpair[words_list[i] + "\t" + words_list[j]]) else: freqpair[words_list[i] + "\t" + words_list[j]] = 1 hozon_list[words_list[i]] = {u'単語': words_list, u'品詞': k, u'出現頻度': words_cnt, u'ポジネガ分析結果': adjective_score, u'共起頻度': freqp_word} print max print("--- 指定した全品詞の出現頻度 ---") for word, cnt in sorted(words_cnt.iteritems(), key=lambda x: x[1], reverse=True): print str(word.encode('utf-8')), cnt # JSON化 print(json.dumps(word, indent=4, ensure_ascii=False, sort_keys=True)), print ", ", print(json.dumps(cnt, indent=4, ensure_ascii=False, sort_keys=True)) #f = open('output-okayama.json', 'w') #json.dump(word, f, indent=4) print("--- 名詞の出現頻度 ---") for word, cnt in sorted(noun_cnt.iteritems(), key=lambda x: x[1], reverse=True): print str(word.encode('utf-8')), cnt #print(json.dumps(word, # indent=4, # ensure_ascii=False, # sort_keys=True)), #print ", ", #print(json.dumps(cnt, # indent=4, # ensure_ascii=False, # sort_keys=True)) print("--- 動詞の出現頻度 ---") for word, cnt in sorted(verb_cnt.iteritems(), key=lambda x: x[1], reverse=True): print str(word.encode('utf-8')), cnt #print(json.dumps(word, # indent=4, # ensure_ascii=False, # sort_keys=True)), #print ", ", #print(json.dumps(cnt, # indent=4, # ensure_ascii=False, # sort_keys=True)) print("--- 形容詞の出現頻度 ---") for word, cnt in sorted(adjective_cnt.iteritems(), key=lambda x: x[1], reverse=True): print str(word.encode('utf-8')), cnt #print(json.dumps(word, # indent=4, # ensure_ascii=False, # sort_keys=True)), #print ", ", #print(json.dumps(cnt, # indent=4, # ensure_ascii=False, # sort_keys=True)) #単語出現回数、共起単語出現回数からシンプソン係数を計算 simp = {} for key, value in freqpair.iteritems(): if freqpair[key] == 1: continue p = re.compile('^([^\t]+)\t([^\t]+)$') m = p.search(key) if m == None: continue if freqwords[m.group(1)] < freqwords[m.group(2)]: simpson = float(value) / float(freqwords[m.group(1)]) else: simpson = float(value) / float(freqwords[m.group(2)]) if simpson < 0.1: continue simp[key] = simpson print "%s" % max for key, value in freqwords.iteritems(): print "%s\t%s" % (key, value) for key, value in simp.iteritems(): print "%s\t%s" % (key, value) f = open('output-okayama.json', 'w') json.dump(all_words_list, f, indent=4) f = open('output-okayama-simpson.json', 'w') json.dump(simp, f, indent=4)
[ "kagepedia@gmail.com" ]
kagepedia@gmail.com
620a896d4a884a98be6bc854d50f98a8b7d210d7
d85fa999d626ccab2523c8c551cc2f7eb100571c
/Task2E.py
91ec4b688e91583a21c1d8c811c9a09eb5c5d1c4
[ "MIT" ]
permissive
swan11jf/CUED-Flood-Warning-Project
dcb4f412525b576fe1e8cd89aadf09920d14fe1b
93636615ee85eb4ed5ba0ef7414bdbedccc0bcb4
refs/heads/main
2023-02-01T20:26:21.449331
2020-12-21T11:15:18
2020-12-21T11:15:18
323,312,508
0
0
null
null
null
null
UTF-8
Python
false
false
732
py
from floodsystem.stationdata import build_station_list from floodsystem.stationdata import update_water_levels from floodsystem.datafetcher import fetch_measure_levels from floodsystem.flood import stations_highest_rel_level from floodsystem.plot import plot_water_levels import datetime def run(): stations = build_station_list() update_water_levels(stations) stations_relative = stations_highest_rel_level(stations, 5) dt = 10 for station in stations_relative: dates, levels = fetch_measure_levels(station.measure_id, datetime.timedelta(days=dt)) plot_water_levels(station, dates, levels) if __name__ == '__main__': print("*** Task 2E: CUED Part IA Flood Warning System ***") run()
[ "noreply@github.com" ]
swan11jf.noreply@github.com
ffb75083105752e6e34ddf33fd1f369d3dcae145
0a8ef50b8dd8e5a843e6fe3e6692eeefbad9fd84
/Student9Week/Singleton.py
ff48188b22072087709a6281b2f2310b3621f9a3
[]
no_license
VitaliyKrytsun/Student
7e84e66e5ea14dbaced6c46a7e9af17d67c981ff
451cc4dbb4d2fb8f78f63e6a9d712b807d9c74dc
refs/heads/master
2020-08-17T07:39:01.441525
2019-12-10T11:58:17
2019-12-10T11:58:17
215,633,757
0
0
null
null
null
null
UTF-8
Python
false
false
963
py
# metaclass # class Singleton(type): # __cls_instances = {} # def __call__(cls, *args, **kwargs): # if cls not in Singleton.__cls_instances: # Singleton.__cls_instances[cls] = super().__call__(*args, **kwargs) # return Singleton.__cls_instances[cls] # class A(metaclass=Singleton): # pass # a1 = A() # a2 = A() # print(id(a1) == id(a2)) # class # class A: # __instances = None # def __new__(cls, *args, **kwargs): # if A.__instances is None: # A.__instances = super().__new__(cls, *args, **kwargs) # return A.__instances # a1 = A() # a2 = A() # print(id(a1) == id(a2)) # decorator def Singleton(cls): objs_dict = {} def wrapper(*args, **kwargs): if cls not in objs_dict: objs_dict[cls] = cls(*args, **kwargs) return objs_dict return wrapper @Singleton class A(): pass a1 = A() a2 = A() print(id(a1) == id(a2)) print(type(A))
[ "56654633+VitaliyKrytsun@users.noreply.github.com" ]
56654633+VitaliyKrytsun@users.noreply.github.com
18a62f5f58f3eacf0f4b6e83ac4fda4770a77484
a2e638cd0c124254e67963bda62c21351881ee75
/Extensions/StructuredProductsDealPackage/FPythonCode/SP_ModuleReload.py
ed019b05682e9d07250ac27a96aa65a7a6824bdd
[]
no_license
webclinic017/fa-absa-py3
1ffa98f2bd72d541166fdaac421d3c84147a4e01
5e7cc7de3495145501ca53deb9efee2233ab7e1c
refs/heads/main
2023-04-19T10:41:21.273030
2021-05-10T08:50:05
2021-05-10T08:50:05
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,569
py
# Need to add # Additional Info # - Participation (DealPackage) # - CapitalProtection (DealPackage) # - StrikePricePct (Instrument) # - BarrierLevelPct (Instrument) # - ProductQuantity (Deal Package) # - AccumulatorLeverage # Exotic Events # - Initial Fixing # ChoiceLists # - AccDec (Val Group) # - accDecModelDesc (Valuation Extension) import SP_DealPackageHelper import importlib importlib.reload(SP_DealPackageHelper) import SP_BusinessCalculations importlib.reload(SP_BusinessCalculations) import CompositeComponentBase importlib.reload(CompositeComponentBase) import CompositeExoticEventComponents importlib.reload(CompositeExoticEventComponents) import CompositeExoticComponents importlib.reload(CompositeExoticComponents) import CompositeOptionAdditionComponents importlib.reload(CompositeOptionAdditionComponents) import CompositeCashFlowComponents importlib.reload(CompositeCashFlowComponents) import CompositeOptionComponents importlib.reload(CompositeOptionComponents) import CompositeBasketComponents importlib.reload(CompositeBasketComponents) import CompositeBasketOptionComponents importlib.reload (CompositeBasketOptionComponents) import CompositeTradeComponents importlib.reload(CompositeTradeComponents) import StructuredProductBase importlib.reload(StructuredProductBase) import Validation_BarrierReverseConvertible importlib.reload(Validation_BarrierReverseConvertible) import SP_BarrierReverseConvertible importlib.reload(SP_BarrierReverseConvertible) import SP_CapitalProtectedNote importlib.reload(SP_CapitalProtectedNote) import SP_EqStraddle importlib.reload(SP_EqStraddle) import SP_CallPutSpread importlib.reload(SP_CallPutSpread) import SP_DualCurrencyDeposit importlib.reload(SP_DualCurrencyDeposit) import SP_WeddingCakeDeposit importlib.reload(SP_WeddingCakeDeposit) import SP_AccumulatorSetup importlib.reload(SP_AccumulatorSetup) import SP_AccumulatorCustomInsDef importlib.reload(SP_AccumulatorCustomInsDef) import SP_AccumulatorValuation importlib.reload(SP_AccumulatorValuation) import SP_AccumulatorModel importlib.reload(SP_AccumulatorModel) import SP_AccumulatorDealPackage importlib.reload(SP_AccumulatorDealPackage) import SP_Autocall importlib.reload(SP_Autocall) import SP_CapitalProtectedCertificate importlib.reload(SP_CapitalProtectedCertificate) import SP_CustomTradeActions importlib.reload(SP_CustomTradeActions) import SP_InvokeTradeActions importlib.reload(SP_InvokeTradeActions) import CustomLifeCycleEvents importlib.reload(CustomLifeCycleEvents)
[ "nencho.georogiev@absa.africa" ]
nencho.georogiev@absa.africa
ede721e55e9c3c008214b8d056bfb0e827d08a68
ea55badf6640e807a4ed50190290dfe97db06e6c
/scikit-learn/Iris.py
8637a2173705c4c36614a414f69cc2576ff10938
[]
no_license
Natsu-Yuki/PythonCode
31b7f161c8dfc05ac36a5dec9b9bab9b5f4b5b86
0cf856d33b008b811a3747a98a6224e5b3e3af30
refs/heads/master
2020-03-29T12:03:20.869303
2018-09-22T14:32:41
2018-09-22T14:32:41
149,882,790
0
0
null
null
null
null
UTF-8
Python
false
false
1,891
py
from sklearn.datasets import load_iris from sklearn.model_selection import train_test_split import pandas as pd from sklearn.neighbors import KNeighborsClassifier import numpy as np iris_dataset = load_iris() X_train, X_test, y_train, y_test = train_test_split(iris_dataset['data'], iris_dataset['target'], random_state=0) knn=KNeighborsClassifier(n_neighbors=1) knn.fit(X_train,y_train) X_new =np.array([[5,2.9,1,0.2]]) prediction=knn.predict(X_new) y_pre=knn.predict(X_test) def acquaintance_data(): print("Keys of isir_dataset:\n{}".format(iris_dataset.keys()) + "\n\n.........") print("Target names of isir_dataset:\n{}".format(iris_dataset['target_names']) + "\n\n.........") print("Feature names of isir_dataset:\n{}".format(iris_dataset['feature_names']) + "\n\n.........") print("Data of isir_dataset:\n{}".format(iris_dataset['data'][:5]) + "\n\n.........") print("Target of isir_dataset:\n{}".format(iris_dataset['target'][:5]) + "\n\n.........") def train_test_data(): print("X_train shape:{}".format(X_train.shape)) print("X_test shape:{}".format(X_test.shape)) print("y_train shape:{}".format(y_train.shape)) print("y_test shape:{}".format(y_test.shape)) def scatter_plot(): iris_dataframe=pd.DataFrame(X_train,columns=iris_dataset['feature_names']) grr=pd.scatter_matrix(iris_dataframe,c=y_train,figsize=(15,15),marker='o', hist_kwds={'bins':20},s=60,alpha=0.8 ) def main(): print('\n') #print(knn.fit(X_train,y_train)) print("Prediction :{}".format(prediction)) print("Prediction target name:{}".format(iris_dataset['target_names'][prediction])) print("Test set preditions:{}\n".format(y_pre)) print("Test set score:{:.2f}".format(np.mean(y_pre==y_test))) print("Test set score:{:.2f}".format(knn.score(X_test,y_test))) main()
[ "ynatsu233@gmail.com" ]
ynatsu233@gmail.com
c0c53574fe0228d93d1f83423937147a06d97cef
e177125a896527f0917852db687d8837f41e0ceb
/topoy/either.py
3b40fc10436242d3c0626a9eaeebf54d11c63df1
[]
no_license
beezee/topoy
f73fa10eb850ad781c1c507516ced6da19be739d
c56c6627a4430456f1034f4d1b0830c5a654ee52
refs/heads/master
2022-07-31T14:29:38.100374
2019-11-17T16:59:48
2019-11-17T16:59:48
219,234,274
0
0
null
null
null
null
UTF-8
Python
false
false
3,799
py
from topoy.applicative import Applicative from topoy.apply import Apply, tuple from topoy.monad import Monad from topoy.hkt import HKT from topoy.functor import Functor from topoy.traverse import Traverse from topoy.typevars import * from topoy.semigroup import KeepLeft, Semigroup from topoy.sum import append2sg, bind2, F1, F2, fold2, map2, Sum2 from typing import Callable, cast, Generic, Tuple class EitherF(Generic[B]): pass class Either(HKT[EitherF[B], A]): @staticmethod def inj(e: 'Either[B, A]') -> 'HKT[EitherF[B], A]': return cast(HKT[EitherF[B], A], e) @staticmethod def proj(hkt: 'HKT[EitherF[B], A]') -> 'Either[B, A]': return cast('Either[B, A]', hkt) def __init__(self, run: Sum2[B, A]) -> None: self.run = run def left_map(self, f: Callable[[B], C]) -> 'Either[C, A]': return fold2[B, A, 'Either[C, A]'](( lambda l: Either(F1(f(l))), lambda r: Either(F2(r))))(self.run) def map(self, f: Callable[[A], C]) -> 'Either[B, C]': return fold2[B, A, 'Either[B, C]'](( lambda l: Either(F1(l)), lambda r: Either(F2(f(r)))))(self.run) def bimap(self, fl: Callable[[B], C], fr: Callable[[A], D]) -> 'Either[C, D]': return self.map(fr).left_map(fl) def fold(self, fl: Callable[[B], C], fr: Callable[[A], C]) -> C: return fold2((fl, fr))(self.run) def bind(self, afb: Callable[[A], 'Either[B, C]']) -> 'Either[B, C]': return fold2[B, A, 'Either[B, C]'](( lambda l: Either(F1(l)), lambda r: afb(r)))(self.run) def ap(self, fab: 'Either[B, Callable[[A], C]]', sg: Semigroup[B] = KeepLeft[B]()) -> 'Either[B, C]': return Either(append2sg(self.run, fab.run, sg)).map(lambda x: x[1](x[0])) def tuple(self, fb: 'Either[B, C]') -> 'Either[B, Tuple[A, C]]': return Either.proj( tuple(EitherApplicative(), self, fb)) def traverse(self, ap: Applicative[G], f: Callable[[A], HKT[G, C]]) -> HKT[G, 'Either[B, C]']: return fold2[B, A, HKT[G, 'Either[B, C]']](( lambda l: ap.pure(LeftOf[C].put(l)), lambda r: ap.map(f(r), lambda x: RightOf[B].put(x))))(self.run) def __str__(self) -> str: return fold2[B, A, str](( lambda l: 'Left(' + str(l) + ')', lambda r: 'Right(' + str(r) + ')'))(self.run) class LeftOf(Generic[A]): @classmethod def put(cls, b: B) -> Either[B, A]: return Either[B, A](F1(b)) class RightOf(Generic[A]): @classmethod def put(cls, b: B) -> Either[A, B]: return Either[A, B](F2(b)) class EitherFunctor(Generic[C], Functor[EitherF[C]]): def map(self, fa: HKT[EitherF[C], A], f: Callable[[A], B]) -> HKT[EitherF[C], B]: return Either.proj(fa).map(f) class EitherMonad(Generic[C], EitherFunctor[C], Monad[EitherF[C]]): def point(self, a: A) -> HKT[EitherF[C], A]: return RightOf[C].put(a) def bind(self, fa: HKT[EitherF[C], A], f: Callable[[A], HKT[EitherF[C], B]]) -> HKT[EitherF[C], B]: return Either.proj(fa).bind(lambda x: Either.proj(f(x))) class EitherApply(Generic[C], Apply[EitherF[C]], EitherFunctor[C]): def __init__(self, sg: Semigroup[C] = KeepLeft[C]()) -> None: self._sg = sg def ap(self, fa: HKT[EitherF[C], A], fab: HKT[EitherF[C], Callable[[A], B]]) -> HKT[EitherF[C], B]: return Either.proj(fa).ap(Either.proj(fab), self._sg) class EitherApplicative(Generic[C], Applicative[EitherF[C]], EitherApply[C]): def pure(self, a: A) -> HKT[EitherF[C], A]: return RightOf[C].put(a) class EitherTraverse(Generic[C], Traverse[EitherF[C]], EitherFunctor[C]): def traverse(self, ap: Applicative[G], fa: HKT[EitherF[C], A], f: Callable[[A], HKT[G, B]]) -> HKT[G, HKT[EitherF[C], B]]: return ap.map(Either.proj(fa).traverse(ap, f), Either.inj)
[ "brian.zeligson@gmail.com" ]
brian.zeligson@gmail.com
f0d46ede1b3ecc911d9874cce0d0c7cca9e0d770
15f94f7b66d33ca1e80ad2cb2c7821fb3c4ca453
/DataDash/DataModel/apps.py
41c9d627581b199ca0003cbc6487fb6cb78ab27f
[ "MIT" ]
permissive
DS921020/AnalysisManager
570fe2b08ba413e0616a057897c34fd2d4415c22
e16d6fff807738b644174da73d15ddb2bb9f9ac4
refs/heads/main
2023-03-03T14:46:34.718085
2020-12-10T05:01:44
2020-12-10T05:01:44
320,133,997
0
0
null
null
null
null
UTF-8
Python
false
false
98
py
from django.apps import AppConfig class TestmodelConfig(AppConfig): name = 'DataModel'
[ "ds110293@163.com" ]
ds110293@163.com
d60cd1bfe7525f7f1d1505b330008095c64c52b2
5e59252778f8b6465f6e9c4a1890297624cab8f8
/shell.py
15b5a123b00f2886e529971c6a178f4639a69ac8
[]
no_license
tazjel/rpathcmd
fa62dfed77d56ea100c8f76a035486b2761058ee
0ebffe639f329665824fdd94d8b5c89ce695f153
refs/heads/master
2021-01-16T20:03:25.225459
2012-11-05T16:09:17
2012-11-05T16:09:17
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,104
py
# # Licensed under the GNU General Public License Version 3 # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # # Copyright 2012 James Tanner <tanner.jc@gmail.com> # # NOTE: the 'self' variable is an instance of RpathShell import atexit, logging, os, readline, re, sys from cmd import Cmd from pwd import getpwuid from rpathcmd.utils import * import pdb class RpathShell(Cmd): __module_list = [ 'api', 'projects', 'groups', 'systems', 'images', 'platforms', 'targets', 'packages'] # a SyntaxError is thrown if we don't wrap this in an 'exec' for module in __module_list: exec 'from %s import *' % module # maximum length of history file HISTORY_LENGTH = 1024 cmdqueue = [] completekey = 'tab' stdout = sys.stdout #prompt_template = 'rpathcmd {SSM:##}> ' prompt_template = 'rpathcmd> ' current_line = '' # do nothing on an empty line emptyline = lambda self: None def __init__(self, options): self.session = '' self.username = '' self.server = '' self.ssm = {} self.postcmd(False, '') # make the options available everywhere self.options = options #pdb.set_trace() userinfo = getpwuid(os.getuid()) self.conf_dir = os.path.join(userinfo[5], '.spacecmd') try: if not os.path.isdir(self.conf_dir): os.mkdir(self.conf_dir, 0700) except OSError: logging.error('Could not create directory %s' % self.conf_dir) self.history_file = os.path.join(self.conf_dir, 'history') try: # don't split on hyphens or colons during tab completion newdelims = readline.get_completer_delims() newdelims = re.sub(':|-|/', '', newdelims) readline.set_completer_delims(newdelims) if not options.nohistory: try: if os.path.isfile(self.history_file): readline.read_history_file(self.history_file) readline.set_history_length(self.HISTORY_LENGTH) # always write the history file on exit atexit.register(readline.write_history_file, self.history_file) except IOError: logging.error('Could not read history file') except: pass # handle commands that exit the shell def precmd(self, line): # remove leading/trailing whitespace line = re.sub('^\s+|\s+$', '', line) # don't do anything on empty lines if line == '': return '' # terminate the shell if re.match('quit|exit|eof', line, re.I): print sys.exit(0) # don't attempt to login for some commands if re.match('help|login|logout|whoami|history|clear', line, re.I): return line # login before attempting to run a command #if not self.session: #pdb.set_trace() #self.do_login('') #if self.session == '': return '' parts = line.split() if len(parts): command = parts[0] else: return '' if len(parts[1:]): args = ' '.join(parts[1:]) else: args = '' # print the help message if the user passes '--help' if re.search('--help', line): return 'help %s' % command # should we look for an item in the history? if command[0] != '!' or len(command) < 2: return line # remove the '!*' line from the history self.remove_last_history_item() history_match = False if command[1] == '!': # repeat the last command line = readline.get_history_item( readline.get_current_history_length()) if line: history_match = True else: logging.warning('%s: event not found' % command) return '' # attempt to find a numbered history item if not history_match: try: number = int(command[1:]) line = readline.get_history_item(number) if line: history_match = True else: raise Exception except IndexError: pass except ValueError: pass # attempt to match the beginning of the string with a history item if not history_match: history_range = range(1, readline.get_current_history_length()) history_range.reverse() for i in history_range: item = readline.get_history_item(i) if re.match(command[1:], item): line = item history_match = True break # append the arguments to the substituted command if history_match: line += ' %s' % args readline.add_history(line) print line return line else: logging.warning('%s: event not found' % command) return '' # update the prompt with the SSM size def postcmd(self, stop, line): self.prompt = re.sub('##', str(len(self.ssm)), self.prompt_template) # vim:ts=4:expandtab:
[ "tanner.jc@gmail.com" ]
tanner.jc@gmail.com
390d44eedc5bd62912d37c37ae5ccbcd9582d8af
3d6787af8b9bb74b7a80e6b51ea9d64d01455d73
/opinion.mining.from.online.hotel.reviews.a.text.summerization.approach/models/authorcredibility.py
e18717fcb24c2c86d897c5d80646285928bd5ef2
[]
no_license
beiranvand-karim/data.mining
bbac24d3ffa93c382cb4b5c250e2d22552d55c8d
85437e59792c2369581efbe76e0dd0d815f9f4e7
refs/heads/master
2020-04-04T23:54:42.834596
2018-11-29T12:23:16
2018-11-29T12:23:16
156,376,046
0
0
null
null
null
null
UTF-8
Python
false
false
134
py
from mongoengine import * class AuthorCredibility(Document): authorId = ObjectIdField(required=True) measure = FloatField()
[ "beiranvand.karim@gmail.com" ]
beiranvand.karim@gmail.com
676f594537bc9c7e4d4a487af70a88783494133b
843cda9d64985676524db33395d8f4439f0cdf50
/reviews/migrations/0002_auto_20210109_2143.py
83750986d64ef284cc3a178ca0bfd0128e6f103c
[]
no_license
orangeberry/airbnb-clone
7bc37f1e0b4af142edf88c38ca84db71a98a9fca
6d9ecee9a3190f8cee3ae3fcd416261f633ab581
refs/heads/master
2023-03-04T11:31:39.685227
2021-02-16T16:39:16
2021-02-16T16:39:16
324,152,827
0
0
null
null
null
null
UTF-8
Python
false
false
859
py
# Generated by Django 3.1.4 on 2021-01-09 12:43 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('rooms', '0008_auto_20210109_2143'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('reviews', '0001_initial'), ] operations = [ migrations.AlterField( model_name='review', name='room', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviews', to='rooms.room'), ), migrations.AlterField( model_name='review', name='user', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='reviews', to=settings.AUTH_USER_MODEL), ), ]
[ "orangeberry@kakao.com" ]
orangeberry@kakao.com
bb3553154a2dbaa5f002445d6690063caaacc7ac
274b4c50375c2cf62cec65805efade97931ccf18
/bikeshare.py
56ceefcd06dabd949286ac737cc8b92733dbd86c
[]
no_license
JonJacobs770/pdsnd_github
65c65a48bf8bf32266823085d2454005422938c7
13f728205001c715de8ef33d98266c4e379588f0
refs/heads/master
2022-08-21T10:02:14.425315
2020-05-31T11:19:00
2020-05-31T11:19:00
268,144,699
0
0
null
2020-05-30T19:15:45
2020-05-30T19:15:44
null
UTF-8
Python
false
false
11,355
py
import time import pandas as pd import numpy as np import math CITY_DATA = { 'chicago': 'chicago.csv', 'new york': 'new_york_city.csv', 'washington': 'washington.csv' } def get_filters(): """ Asks user to specify a city, month, and day to analyze. Returns: (str) city - name of the city to analyze (str) month - name of the month to filter by, or "all" to apply no month filter (str) day - name of the day of week to filter by, or "all" to apply no day filter """ print('Hello! Let\'s explore some US bikeshare data!\nWhich of the following cities would you like to see more information about Chicago, New York, or Washington?\n') # TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs while True: city_chosen = input().lower() if city_chosen.lower() not in CITY_DATA.keys(): print("That is not one of the available cities. Please select Chicago, New York, or Washington.") continue else: print('Nice choice! We\'ll use %s.' % city_chosen.lower().title()) break # TO DO: get user input for month (all, january, february, ... , june) while True: month_chosen = input("\n In which month would you like to see data for? January, February, March, April, May, June. Type 'all' if you do not have any preference?\n").lower() if month_chosen not in ('january', 'february', 'march', 'april', 'may', 'june', 'all'): print("It seems you have either not entered the month's full name or you have entered a month on a different planet. Please, try again.") continue else: print('Ok then! We\'ll use %s.' % month_chosen.lower().title()) break # TO DO: get user input for day of week (all, monday, tuesday, ... sunday) while True: day_chosen = input("\nAre you looking for a particular day? If so, kindly enter the day as follows: Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday or type 'all' if you do not have any preference.\n").lower() if day_chosen not in ('sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'all'): print("It seems you have not entered a valid day of the week. kindly enter the day as follows: Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday or type 'all' if you do not have any preference.\n") continue else: print('Sounds good! We\'ll use %s.' % day_chosen.lower().title()) break print('-'*40) return city_chosen, month_chosen, day_chosen def load_data(city, month, day): """ Loads data for the specified city and filters by month and day if applicable. Args: (str) city - name of the city to analyze (str) month - name of the month to filter by, or "all" to apply no month filter (str) day - name of the day of week to filter by, or "all" to apply no day filter Returns: df - Pandas DataFrame containing city data filtered by month and day """ # load the city data file into a dataframe df = pd.read_csv(CITY_DATA[city]) # convert the Start Time column to datetime df['Start Time'] = pd.to_datetime(df['Start Time']) # extract month, day of week and start hour from Start Time to create new columns df['month'] = df['Start Time'].dt.month df['day_of_week'] = df['Start Time'].dt.day_name() df['hour'] = df['Start Time'].dt.hour # filter by month if applicable if month != 'all': # use the index of the months list to get the corresponding int months = ['january', 'february', 'march', 'april', 'may', 'june'] month = months.index(month) + 1 # filter by month to create the new dataframe df = df[df['month'] == month] # filter by day of week if applicable if day != 'all': # filter by day of week to create the new dataframe df = df[df['day_of_week'] == day.title()] return df def time_stats(df): """Displays statistics on the most frequent times of travel.""" print('\nCalculating The Most Frequent Times of Travel...\n') start_time = time.time() # TO DO: display the most popular month month_list = ['january','february','march','april','may','june','all'] common_month_num = df['month'].mode()[0] popular_month = month_list[common_month_num-1].title() print('Most popular month:', popular_month) # TO DO: display the most popular day of week popular_day = df['day_of_week'].mode()[0] print('Most popular day:', popular_day) # TO DO: display the most popular start hour popular_start_hour = df['hour'].mode()[0] print('Most common hour:', popular_start_hour) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def station_stats(df): """Displays statistics on the most popular stations and trip.""" print('\nDetermining the most Popular Stations and Trip...\n') start_time = time.time() # TO DO: display most commonly used start station # idxmx() get the row label of the maximum value Start_Station = df['Start Station'].value_counts().idxmax() print('Most Commonly used start station:', Start_Station) # TO DO: display most commonly used end station End_Station = df['End Station'].value_counts().idxmax() print('\nMost Commonly used end station:', End_Station) # TO DO: display most frequent combination of start station and end station trip frequent_journey =df.groupby(['Start Station', 'End Station']).size().nlargest(1) print("\nThe most frequent trip from start to end is:\n{}".format(frequent_journey)) print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def round_up(n, decimals=0): multiplier = 10 ** decimals return math.ceil(n * multiplier) / multiplier def trip_duration_stats(df): """Displays statistics on the total and average trip duration.""" print('\nCalculating Trip Duration...\n') start_time = time.time() # TO DO: display total travel time Total_Travel_Time = sum(df['Trip Duration']) # Converting seconds to days print('Total travel time:', round_up((Total_Travel_Time/86400),2), " Days") # TO DO: display mean travel time Mean_Travel_Time = df['Trip Duration'].mean() # Converting seconds to minutes print('Mean travel time:', round_up((Mean_Travel_Time/60),2), " Minutes") print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def user_stats(df): """Displays statistics on bikeshare users.""" print('\nCalculating User Stats...\n') start_time = time.time() # TO DO: Display counts of user types # Washington does not have data for earliest, most recent, and most common year of birth therefore try and except were used. user_types = df['User Type'].value_counts() print('User Types:\n', user_types) # TO DO: Display counts of gender try: gender_types = df['Gender'].value_counts() print('\nGender Types:\n', gender_types) except KeyError: print("\nGender Types:\nThere does not seem to be data available to display information about genders for this city.") # TO DO: Display earliest, most recent, and most common year of birth try: Earliest_Year = df['Birth Year'].min() print('\nThe oldest person using the system was born in:', int(Earliest_Year)) except KeyError: print("\nThe oldest person using the system was born in:\nThere does not seem to be data available to determine the oldest person for this city.") try: Most_Recent_Year = df['Birth Year'].max() print('\nThe youngest person using the systen was born in:', int(Most_Recent_Year)) except KeyError: print("\nThe youngest person using the systen was born in:\nThere does not seem to be data available to determine the youngest person for this city.") # idxmax() get the row label of the maximum value try: Most_Common_Year = df['Birth Year'].value_counts().idxmax() print('\nMost common year that people using the system were born in:', int(Most_Common_Year)) except KeyError: print("\nMost common year that people using the system were born in:\nThere does not seem to be data available to determine the oldest person for this city.") print("\nThis took %s seconds." % (time.time() - start_time)) print('-'*40) def display_data(df): """ Display individual trip data Args: bikeshare dataframe. Returns: None. """ start = 0 end = 5 choice = '' while choice.lower() not in ['yes', 'no']: choice = input('Do you want to view indiviual trip data? Enter \'Yes\' or \'No\'.\n') if choice.lower() not in ['yes', 'no']: print('There are only two options \'Yes\' or \'No\'. Please try again.\n') elif choice.lower() == "yes": print(df.iloc[start:end]) while True: second_choice = input('\nDo you want to view more trip data? Enter \'Yes\' or \'No\'.\n') if second_choice.lower() not in ['yes', 'no']: print('Maybe you made a typo. Please try again.\n') elif second_choice.lower() == "yes": start += 5 end += 5 print(df.iloc[start:end]) elif second_choice == "no": return elif choice.lower() == "no": return return def restart(): restart = input('\nWould you like to restart your search? Enter \'Yes\' or \'No\'.\n') if restart.lower() != 'yes': return else: main() def main(): while True: city, month, day = get_filters() print('Fetching some insightful data from {} for you...'.format(city).title()) df = load_data(city, month, day) confirm_choice = '' while confirm_choice.lower() not in ['yes', 'no']: if month != 'all' and day != 'all': confirm_choice = input('Just to confirm you would like to see data for {} in {} on a {}. Type \'Yes\' or \'No\'.\n'.format(city.title(),month.title(),day.title())) elif month == 'all' and day == 'all': confirm_choice = input('Just to confirm you would like to see \'all\' data for {}. Type \'Yes\' or \'No\'.\n'.format(city.title())) elif month == 'all': confirm_choice = input('Just to confirm you would like to see data for {} for {} months on a {} . Type \'Yes\' or \'No\'.\n'.format(city.title(),month.title(),day.title())) else: confirm_choice = input('Just to confirm you would like to see data for {} in {} for {} days. Type \'Yes\' or \'No\'.\n'.format(city.title(),month.title(),day.title())) if confirm_choice.lower() not in ['yes', 'no']: print('Maybe you made a typo. Please try again\n') elif confirm_choice.lower() == "yes": break elif confirm_choice.lower() == "no": restart() time_stats(df) station_stats(df) trip_duration_stats(df) user_stats(df) display_data(df) restart() break if __name__ == "__main__": main()
[ "jonty.jacobs@gmail.com" ]
jonty.jacobs@gmail.com
d5d6859ced095e77ba26501b5cb023a48cdd6535
8d6ba22866a97b551de7ecccea75769c055afded
/HW/hw8_q4.py
496c50712582022223828e7fd6ce1a850528c3bc
[]
no_license
adibsxion19/CS1114
c52b8815ea3841e0e47a2442cceb5e9c5a81806a
0425d3d96664ee7a5ef88e146d51759e4a0bf50f
refs/heads/main
2023-04-10T23:30:31.427021
2021-04-06T05:03:50
2021-04-06T05:03:50
348,604,834
0
0
null
null
null
null
UTF-8
Python
false
false
6,354
py
# Author: Aadiba Haque # Assignment / Part: HW8 - Q4 # Date due: 2020-04-24 # I pledge that I have completed this assignment without # collaborating with anyone else, in conformance with the # NYU School of Engineering Policies and Procedures on # Academic Misconduct. def clean_data(complete_data_filename, cleaned_data_filename): #sig: string, string complete_data = open(complete_data_filename,"r") cleaned_data = open(cleaned_data_filename,"w") output = '' lst_of_indices = (2,3,4,7,8,9) for lines in complete_data: lines = lines.strip() lst_values = lines.split(',') for index in lst_of_indices: output += lst_values[index] + ',' output = output.strip(',') output += '\n' print(output, file=cleaned_data) complete_data.close() cleaned_data.close() def convert_date_time_to_edt(cleaned_data_filename, edt_file_name): #sig: string, string #UTC is 4 hours ahead of EDT cleaned_data = open(cleaned_data_filename,"r") edt_file = open(edt_file_name,"w") counter = 0 output = '' lst_of_indices = (1,0,2,3,4,5) for lines in cleaned_data: lines = lines.strip() if lines == '': continue lst_values = lines.split(',') if counter == 0: temp = lst_values[1] lst_values[1] = lst_values[0] lst_values[0] = temp lst_values[2] = "Last Update Date" lst_values.insert(3,"Last Update Time (EDT)") output += ','.join(lst_values) else: for index in lst_of_indices: if index == 2: last_update = lst_values[2].split() date = last_update[0] time = last_update[1].split(':') if int(time[0]) < 4: date_lst = date.split('/') date_lst[1] = str(int(date_lst[1])- 1) output += '/'.join(date_lst) + ',' else: output += date + ',' last_update_time = int(time[0]) - 4 if last_update_time < 0: last_update_time += 24 time[0] = str(last_update_time) output += ':'.join(time) + ',' else: output += lst_values[index] + ',' counter += 1 output = output.strip(',') output += '\n' print(output, file=edt_file) cleaned_data.close() edt_file.close() def print_percentages_per_location(location, data_filename, type): data = open(data_filename,"r") header = True for lines in data: if header: header = False continue else: lines = lines.strip() if lines == '': continue lst_values = lines.split(',') if lst_values[1] == location: confirmed_cases = int(lst_values[4]) num_types = 0 if type == "death": num_types = int(lst_values[5]) elif type == "recovered": num_types = int(lst_values[6]) if num_types >= confirmed_cases: print("There were {} {} of {} confirmed cases, or approximately 100.000%".format(num_types,type,confirmed_cases)) else: percent = num_types/confirmed_cases print("There were {} {} of {} confirmed cases, or approximately {:.3%}".format(num_types,type,confirmed_cases,percent)) data.close() def difference_in_cases(location1, location2,edt_file_name): #location1 and location2 are the names of two states/provinces #edt_file_name is the name of the input file #This function prints the number of confirmed cases for each location1 and location2 and prints #to the screen the difference (absolute value) between the number of confirmed cases in location1 and location2 #number of confirmed cases selected for each location is the first non-zero occurence found in the input file #if there are no non-zero occurences or the location is not found, the value of 0 is used as the number of cases for each location import math data = open(edt_file_name,"r") location1_cases = 0 location2_cases = 0 header = True for lines in data: if header: header = False continue #so header is not counted in this function else: lines = lines.strip() if lines == '': #accounts for the last line, if it is blank continue lst_values = lines.split(',') if lst_values[1] == location1 and location1_cases == 0: location1_cases = int(lst_values[4]) elif lst_values[1] == location2 and location2_cases == 0: location2_cases = int(lst_values[4]) if location1_cases != 0 and location2_cases != 0: break difference = int(math.fabs(location1_cases - location2_cases)) print("{}'s confirmed cases: {} \n{}'s confirmed cases: {} \nDifference: {}".format(location1,location1_cases,location2,location2_cases,difference)) data.close() def main(): complete_data_filename = "03-25-2020.csv" cleaned_data_filename = "CleanedCovidData.csv" clean_data(complete_data_filename, cleaned_data_filename) edt_file_name = "NewCovidData.csv" convert_date_time_to_edt(cleaned_data_filename, edt_file_name) location = "New York" type = 'recovered' print_percentages_per_location(location, edt_file_name, type) print("What two states'/provinces' number of confirmed cases would you like to compare? ") #asking user names of states to compare their number of confirmed cases location1 = input() #name of first state assigned to variable location1 location2 = input() #name of first state assigned to variable location2 difference_in_cases(location1, location2,edt_file_name) #Calls the function and gives the difference between the number of confirmed cases for location1 and location2 main()
[ "noreply@github.com" ]
adibsxion19.noreply@github.com
a98c0f87c5e54efc98415dca9576d0bcecc3346f
aae551baa369fda031f363c2afbdf1984467f16d
/Machine_Learning/Programming_Assignments/CS15B001_PA3/Code/q2/bernoulli.py
59000649f234d836785dc85871bffe40b30ef448
[]
no_license
ameet-1997/Course_Assignments
37f7d4115baec383ccf029772efcf9c33beb2a23
629e9d5cfc6fa6cf37a96c5fcc33bc669cbdc59d
refs/heads/master
2021-05-16T16:23:32.731296
2018-02-03T05:57:01
2018-02-03T05:57:01
119,939,202
0
0
null
null
null
null
UTF-8
Python
false
false
3,114
py
import pandas as pd import numpy as np from scipy import sparse import os import functions import time from sklearn.model_selection import KFold from sklearn.metrics import precision_recall_fscore_support from tabulate import tabulate from sklearn.naive_bayes import BernoulliNB, MultinomialNB from sklearn.metrics import precision_recall_curve from sklearn.model_selection import train_test_split import matplotlib.pyplot as plt # Load the data data_matrix = functions.load_sparse_csr('data_sparse').todense() labels_matrix = np.loadtxt('labels.csv', delimiter=',') # Cross Validation kf = KFold(n_splits=5) counter = 0 [avr_prec, avr_rec, avr_fsc] = [.0,.0,.0] for train_index, test_index in kf.split(data_matrix): counter += 1 data_train, data_test = data_matrix[train_index], data_matrix[test_index] labels_train, labels_test = labels_matrix[train_index], labels_matrix[test_index] b = BernoulliNB() b.fit(data_train, labels_train) predicted_labels = b.predict(data_test) # # Estimate the class priors # spam_prior = float(np.count_nonzero(labels_train == 0))/labels_train.shape[0] # ham_prior = float(np.count_nonzero(labels_train == 1))/labels_train.shape[0] # # Estimate the conditional probabilities # # Get all spam articles and get the column sum # # Do the same for all ham articles # # Add-1 smoothing is performed here # cond_ham = ((np.count_nonzero(data_train[labels_train==1], axis=0)+1).astype(dtype=float))/(data_train[labels_train==1].shape[0]+2) # cond_spam = ((np.count_nonzero(data_train[labels_train==0], axis=0)+1).astype(dtype=float))/(data_train[labels_train==0].shape[0]+2) # # Using log so that there are no underflow problems # predicted_labels = np.ones(shape=labels_test.shape, dtype=float) # for i in range(predicted_labels.shape[0]): # score_ham = np.sum(np.multiply(np.log(cond_ham), data_test[i,:]))+np.log(ham_prior) # score_spam = np.sum(np.multiply(np.log(cond_spam), data_test[i,:]))+np.log(spam_prior) # if score_spam > score_ham: # predicted_labels[i] = 0 # else: # predicted_labels[i] = 1 # print("Fold Number "+str(counter)) [prec,rec,fsc,sup] = precision_recall_fscore_support(labels_test, predicted_labels) avr_prec += prec[1] avr_rec += rec[1] avr_fsc += fsc[1] # print tabulate([prec, rec, fsc], headers=['Spam', 'Ham']) # print("") print("") print("Average Scores for Spam Class") print("Precision: "+str(avr_prec/5)) print("Recall: "+str(avr_rec/5)) print("FScore: "+str(avr_fsc/5)) # Plot the PR Curves train_data, test_data, train_labels, test_labels = train_test_split(data_matrix, labels_matrix, test_size=0.33, random_state=42) m = BernoulliNB() m.fit(train_data, train_labels) probab = m.predict_proba(test_data) precision_, recall_, threshold_ = precision_recall_curve(test_labels, probab[:,1]) fig = plt.figure() fig.suptitle('Precision Recall Curve') ax = fig.add_subplot(111) ax.set_xlabel('Precision') ax.set_ylabel('Recall') # ax.fill(precision_,np.zeros(shape=precision_.shape),'b') p = [0] r = [1] p.extend(list(precision_)) r.extend(list(recall_)) ax.fill(p, r,'b', zorder=5) plt.plot(p, r) plt.show()
[ "ameetsd97@gmail.com" ]
ameetsd97@gmail.com
42f3d2a0282e52d317b7e91d150c762f62c4d55a
5bd79964e4770770e972551b017d990036c1c600
/code/generateMeetings.py
d003722bafaaa4618abe9ee18b10542604c4c14b
[ "Apache-2.0" ]
permissive
IIIF/trc
8d63fd200927d6ad0170a03d8afc2d97cf193b59
d7226551a3bd081e9ff7518b8037527f9f041c6d
refs/heads/master
2021-08-04T11:14:03.504771
2021-07-21T22:29:23
2021-07-21T22:29:23
157,777,140
1
1
Apache-2.0
2020-12-03T12:36:13
2018-11-15T21:50:00
Python
UTF-8
Python
false
false
1,795
py
#!/usr/bin/python import sys from ics import Calendar, Event from datetime import datetime,timedelta from dateutil import tz def timezone(timeInstance, timezone): return timeInstance.astimezone(tz.gettz(timezone)).time() if __name__ == "__main__": if len(sys.argv) != 3 and len(sys.argv) != 4: print ('Usage:\n\t./code/calendar.py [start_date YYYY-MM-DD] [Occurrence count] [Japan time frequency]') sys.exit(0) if len(sys.argv) == 3: frequency = 4 else: frequency = int(sys.argv[3]) cal = Calendar() first_meeting = datetime(int(sys.argv[1][0:4]), int(sys.argv[1][5:7]), int(sys.argv[1][8:10]), 12, 0, 0, 0, tz.gettz('America/New_York')) occurences = sys.argv[2] next_meeting = first_meeting for i in range(int(occurences)): issues_shared = next_meeting - timedelta(days=7) voting_closes = next_meeting + timedelta(days=7*2) if i % frequency == 0 and i != 0: meeting_time = next_meeting.replace(hour=19) else: meeting_time = next_meeting timestr = '{} Europe / {} UK / {} US Eastern / {} US Pacific / {} Japan'.format(timezone(meeting_time, 'Europe/Paris'), timezone(meeting_time, 'Europe/London'), timezone(meeting_time, 'America/New_York'), timezone(meeting_time, 'America/Los_Angeles'), timezone(meeting_time, 'Asia/Tokyo')) print ('TRC meeting: {} ({}), \nSend out issues: {}, \nVoting closes: {}\n'.format(meeting_time.date(), timestr, issues_shared.date(), voting_closes.date())) e = Event() e.name = 'IIIF Technical Review Committee' e.begin = next_meeting cal.events.add(e) next_meeting += timedelta(days=4*7) with open('/tmp/trc.ics', 'w') as ics_file: ics_file.writelines(cal)
[ "glen.robson@gmail.com" ]
glen.robson@gmail.com
3c1b9ff25bff83a5b2ab154d29fca1246527a50a
5a18af4dd1eb7244ed6b75b8a59c29f7360cf468
/pose_regression/models/top_models.py
077581c48b4e505abe1ebd662626cb4a4f5d9cca
[]
no_license
asiron/deep-camera-relocalization
b585ef1b3ce63b307fcc57979eaf01462268a82c
9277826c605be9a28deff81261dbc68b721c7ae4
refs/heads/master
2020-03-18T22:19:36.813331
2018-10-20T20:36:01
2018-10-20T20:36:01
135,338,870
3
1
null
null
null
null
UTF-8
Python
false
false
5,466
py
from keras.layers import ( Dropout, Dense, LSTM, ELU, GRU, CuDNNGRU, CuDNNLSTM, Lambda, TimeDistributed, Activation, Bidirectional, Reshape, Concatenate, PReLU, BatchNormalization) from keras.regularizers import l2 from .layers import QuaternionNormalization import keras.backend as K import tensorflow as tf class TopModel(object): def __init__(self, **kwargs): self.kwargs = kwargs def build(self, input_tensor): return Lambda(lambda x: x, name='prediction')(input_tensor) class Regressor(TopModel): def build(self, input_tensor): assert len(input_tensor._keras_shape[1:]) in [1,2] # dense_1 = Dense(input_tensor._keras_shape[1], # activation='relu', # ))(input_tensor) dense_1 = Dense(self.kwargs['units'], activation='relu', kernel_regularizer=l2(self.kwargs['l2']))(input_tensor) dropout_1 = Dropout(self.kwargs['dropout'])(dense_1) dense_2 = Dense(7)(dropout_1) quat_norm = QuaternionNormalization(name='quat_norm')(dense_2) return super(Regressor, self).build(quat_norm) class SpatialLSTM(TopModel): def build(self, input_tensor): assert len(input_tensor._keras_shape[1:]) is 1 dense_1 = Dense(2048, activation='relu', kernel_regularizer=l2(self.kwargs['l2']))(input_tensor) rect_shape = (64, 32) dropout_1 = Dropout(self.kwargs['dropout'])(dense_1) reshaped = Reshape(rect_shape)(dropout_1) reshaped_reversed = Lambda(lambda x: K.reverse(x, axes=1))(reshaped) transposed = Lambda(lambda x: K.permute_dimensions(x, (0,2,1)))(reshaped) transposed_reversed = Lambda(lambda x: K.reverse(x, axes=1))(transposed) lstm_top_down = CuDNNLSTM(rect_shape[0], return_sequences=False)(reshaped) lstm_bottom_up = CuDNNLSTM(rect_shape[0], return_sequences=False)(reshaped_reversed) lstm_left_right = CuDNNLSTM(rect_shape[1], return_sequences=False)(transposed) lstm_right_left = CuDNNLSTM(rect_shape[1], return_sequences=False)(transposed_reversed) merged = Concatenate()([ lstm_left_right, lstm_right_left, lstm_top_down, lstm_bottom_up ]) dense_2 = Dense(7)(merged) quat_norm = QuaternionNormalization(name='quat_norm')(dense_2) return super(SpatialLSTM, self).build(quat_norm) class StatefulLSTM(TopModel): def build(self, input_tensor): assert len(input_tensor.shape[2:]) == 1 lstm_units = self.kwargs['units'] #dense_1 = TimeDistributed(Dense(1024, activation='relu'))(input_tensor) lstm_1 = CuDNNGRU(512, return_sequences=True, stateful=True)(input_tensor) dense_2 = TimeDistributed(Dense(7))(lstm_1) quat_norm = TimeDistributed(QuaternionNormalization(name='quat_norm'))(dense_2) return TimeDistributed(Lambda(lambda x: x, name='inner_prediction'), name='prediction')(quat_norm) class StandardLSTM(TopModel): def build(self, input_tensor): assert len(input_tensor.shape[2:]) == 1 lstm_units = self.kwargs['units'] dense_1 = TimeDistributed(Dense(1024))(input_tensor) merged_rev = TimeDistributed(Lambda(lambda x: K.reverse(x, axes=1)))(dense_1) lstm_1 = CuDNNLSTM(512, return_sequences=True)(dense_1) lstm_1_rev = CuDNNLSTM(512, return_sequences=True)(merged_rev) merged_bidirectional = Concatenate(axis=-1)([lstm_1, lstm_1_rev]) dropout_1 = Dropout(self.kwargs['dropout'])(merged_bidirectional) dense_2 = TimeDistributed(Dense(7))(dropout_1) quat_norm = TimeDistributed(QuaternionNormalization(name='quat_norm'))(dense_2) return TimeDistributed(Lambda(lambda x: x, name='inner_prediction'), name='prediction')(quat_norm) ''' dense_1 = TimeDistributed(Dense(2048, activation='relu', kernel_regularizer=l2(self.kwargs['l2'])))(merged_bidirectional) rect_shape = (64, 32) reshaped = TimeDistributed(Reshape(rect_shape))(dense_1) reshaped_reversed = TimeDistributed(Lambda(lambda x: K.reverse(x, axes=1)))(reshaped) transposed = TimeDistributed(Lambda(lambda x: K.permute_dimensions(x, (0,2,1))))(reshaped) transposed_reversed = TimeDistributed(Lambda(lambda x: K.reverse(x, axes=1)))(transposed) lstm_top_down = TimeDistributed(CuDNNLSTM(rect_shape[0], return_sequences=False))(reshaped) lstm_bottom_up = TimeDistributed(CuDNNLSTM(rect_shape[0], return_sequences=False))(reshaped_reversed) lstm_left_right = TimeDistributed(CuDNNLSTM(rect_shape[1], return_sequences=False))(transposed) lstm_right_left = TimeDistributed(CuDNNLSTM(rect_shape[1], return_sequences=False))(transposed_reversed) merged_spatial = Concatenate(axis=-1)([ lstm_left_right, lstm_right_left, lstm_top_down, lstm_bottom_up ]) merged_spatial_rev = TimeDistributed(Lambda(lambda x: K.reverse(x, axes=1)))(merged_spatial) lstm_2 = CuDNNLSTM(2*rect_shape[0] + 2*rect_shape[1], return_sequences=True)(merged_spatial) lstm_2_rev = CuDNNLSTM(512, return_sequences=True)(merged_spatial_rev) merged_bidirectional = Concatenate(axis=-1)([lstm_2, lstm_2_rev]) dense_2 = TimeDistributed(Dense(7))(merged_bidirectional) quat_norm = TimeDistributed(QuaternionNormalization(name='quat_norm'))(dense_2) return TimeDistributed(Lambda(lambda x: x, name='inner_prediction'), name='prediction')(quat_norm) '''
[ "maciej.zurad@gmail.com" ]
maciej.zurad@gmail.com
58e2029cc20575a0699ac989d2bd2bceb0f0ad0d
5c14e3a42410b386b8a062ad5c8ef4d35b54c10e
/LabQuestion4.py
b809de618049996ab3598d3ff07ddbd8829a6e23
[]
no_license
CStratton00/CST-215-Programming-Assignments
ae158504dca1b1dbf85b73cb8f6967f353c6e0ca
cc72b91380dd2dec23ed1adce8461b3d399ce34e
refs/heads/main
2023-02-27T13:42:29.074624
2021-02-10T18:16:06
2021-02-10T18:16:06
337,812,288
0
0
null
null
null
null
UTF-8
Python
false
false
3,303
py
A = True B = True def APT(): return "T" if(A == True) else "F" def BPT(): return "T" if(B == True) else "F" def abAND(): return "T" if(A and B) else "F" def abOR(): return "T" if(A or B) else "F" def abNAND(): return "T" if(not(A and B)) else "F" def abNOR(): return "T" if(not(A or B)) else "F" def abXOR(): return "T" if(A != B) else "F" def aNOT(): return "T" if(not(A)) else "F" print("<----------------------------->") print("| And Gate |") print("<----------------------------->") print("| A = " + APT() + ", B = " + BPT() + " | A and B = " + abAND() + " |") B = False print("| A = " + APT() + ", B = " + BPT() + " | A and B = " + abAND() + " |") A = False B = True print("| A = " + APT() + ", B = " + BPT() + " | A and B = " + abAND() + " |") B = False print("| A = " + APT() + ", B = " + BPT() + " | A and B = " + abAND() + " |") print("<----------------------------->") A = True B = True print("<----------------------------->") print("| OR Gate |") print("<----------------------------->") print("| A = " + APT() + ", B = " + BPT() + " | A or B = " + abOR() + " |") B = False print("| A = " + APT() + ", B = " + BPT() + " | A or B = " + abOR() + " |") A = False B = True print("| A = " + APT() + ", B = " + BPT() + " | A or B = " + abOR() + " |") B = False print("| A = " + APT() + ", B = " + BPT() + " | A or B = " + abOR() + " |") print("<----------------------------->") A = True B = True print("<----------------------------->") print("| NAND Gate |") print("<----------------------------->") print("| A = " + APT() + ", B = " + BPT() + " | A nand B = " + abNAND() + " |") B = False print("| A = " + APT() + ", B = " + BPT() + " | A nand B = " + abNAND() + " |") A = False B = True print("| A = " + APT() + ", B = " + BPT() + " | A nand B = " + abNAND() + " |") B = False print("| A = " + APT() + ", B = " + BPT() + " | A nand B = " + abNAND() + " |") print("<----------------------------->") A = True B = True print("<----------------------------->") print("| NOR Gate |") print("<----------------------------->") print("| A = " + APT() + ", B = " + BPT() + " | A nor B = " + abNOR() + " |") B = False print("| A = " + APT() + ", B = " + BPT() + " | A nor B = " + abNOR() + " |") A = False B = True print("| A = " + APT() + ", B = " + BPT() + " | A nor B = " + abNOR() + " |") B = False print("| A = " + APT() + ", B = " + BPT() + " | A nor B = " + abNOR() + " |") print("<----------------------------->") A = True B = True print("<----------------------------->") print("| XOR Gate |") print("<----------------------------->") print("| A = " + APT() + ", B = " + BPT() + " | A xor B = " + abXOR() + " |") B = False print("| A = " + APT() + ", B = " + BPT() + " | A xor B = " + abXOR() + " |") A = False B = True print("| A = " + APT() + ", B = " + BPT() + " | A xor B = " + abXOR() + " |") B = False print("| A = " + APT() + ", B = " + BPT() + " | A xor B = " + abXOR() + " |") print("<----------------------------->") A = True print("<----------------->") print("| Not Gate |") print("<----------------->") print("| A = " + APT() + " | A' = " + aNOT() + " |") A = False print("| A = " + APT() + " | A' = " + aNOT() + " |") print("<----------------->")
[ "noreply@github.com" ]
CStratton00.noreply@github.com
bb9382cac06758cde9a8cf0d6815e7c641a53b4e
cfe31cde0d64026925c9a5747216ba83856122d8
/Sex_Determination/parallel_ASEreadcounter_XIST.py
630398e4cecd81c226a883b9bbf1f9c81b1d5fbe
[ "MIT" ]
permissive
SViswanathanLab/XIST-Males-Scripts
f8d4fe22dee4d90f8a7a343a1138e721f3e5473a
4f2a0e281296df1dd86e3c68b13192e3337c1e8a
refs/heads/main
2023-04-11T05:52:21.329845
2022-08-20T22:01:28
2022-08-20T22:01:28
400,788,194
0
0
null
null
null
null
UTF-8
Python
false
false
1,182
py
#qsub -t 1:86 submit_script.qsub import glob, os task_id = int(os.getenv("SGE_TASK_ID")) temp_path = "download_dir/" #where files are downloaded to parameters_list = [x[0].split("/")[-1] for x in os.walk(temp_path)] samples_per_node = 18 val = min(task_id*samples_per_node, len(parameters_list)) parameter_sublist = parameters_list[(task_id - 1)*samples_per_node:val] def runSimulation(parameter): os.system("java -Djava.io.tmpdir=xist_aser -Xmx8000m -jar gatk.jar ASEReadCounter -R Homo_sapiens_assembly38.fasta --read-filter PassesVendorQualityCheckReadFilter --read-filter HasReadGroupReadFilter --read-filter NotDuplicateReadFilter --read-filter MappingQualityAvailableReadFilter --read-filter NotSecondaryAlignmentReadFilter --read-filter MappingQualityReadFilter --minimum-mapping-quality 30 --read-filter OverclippedReadFilter --filter-too-short 25 --read-filter GoodCigarReadFilter --read-filter AmbiguousBaseReadFilter -V hapmap_3.3.hg38.vcf.gz --lenient --seconds-between-progress-updates 100 -I $temp_path/%s/normal.bam -L chrX -O output_dir/%s.out" % (parameter,parameter)) for parameter in parameter_sublist: runSimulation(parameter = parameter)
[ "noreply@github.com" ]
SViswanathanLab.noreply@github.com
5c27bedc6a666dd8e3f85a7a92005b76c278ea8c
51a705c1c3c749cd339ebdfc1997770e9de0f71e
/partie2/partie_2.py
a1844d5e76b369dec7eed1c19e1aa6eb679c29d6
[]
no_license
jabertSI/Gps-haversin-to-kml
49c932cadf25fb123d435acdbf3385897d06fc1e
163e862185e3e1d670ed52c1a6389a06f9f9ec28
refs/heads/master
2021-06-12T08:24:45.823195
2017-03-10T13:26:44
2017-03-10T13:26:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,746
py
from math import radians, cos, sin, atan2, sqrt, ceil terre = 6371 # LECTURE DU FICHIER fichier_ville = 'ville.txt' with open(fichier_ville, 'r') as f: # Creation liste de liste lines = [line.strip('\n') for line in f.readlines()] carnet = [line.split() for line in lines] #Decoupage de la chaine de caractere # Conversion string to FLoat print("INFORMATION VILLE :") for adr in carnet: adr[1] = float(adr[1]) adr[2] = float(adr[2]) print(adr[0] + ' : latitude = ' + str(adr[1]) + ' : longitude = ' + str(adr[2]) + ': altitude = ' + adr[3] + "m") for ville1 in carnet: ville1[1], ville1[2] = map(radians, [ville1[1], ville1[2]]) # conversion degre en radian distances = [] # Generateur de couple print("DISTANCE ENTRE LES VILLES :") for ville1 in carnet: for ville2 in carnet: if ville1 != ville2: # Calcule de la distance. lat = ville2[1] - ville1[1] # delta latitude lng = ville2[2] - ville1[2] # delta longitude # formule haversine d = sin(lat * 0.5) ** 2 + cos(ville1[1]) * cos(ville2[1]) * sin(lng * 0.5) ** 2 h = 2 * atan2(sqrt(d),sqrt(1-d)) distance = h * terre print("La distance entre", ville1[0],"et", ville2[0], "est de", ceil(distance), "km") # 90 couple possible # Fin partie 1 # Debut partie 2 distances.append([ville1[0], ville2[0], ceil(distance)]) # Création de la list afin de stock les couples avec leurs distance antre elles distances.sort(key=lambda x:x[2]) # FOnction qui dit que le tri se fait sur la key n° 2 donc la distance en km for dist in distances: print("La distance entre", dist[0], "et", dist[1], "est de", ceil(dist[2]), "km") # Affichage super beau
[ "noreply@github.com" ]
jabertSI.noreply@github.com
2159307633408b2e9ff64b9bb7270d5a919c593b
ad5e9d790c564cdf6923670bbf8454f1be88624a
/projects_robotics/cfg/dynamic_ric.cfg
63113e0c4d6189cce67406622d5ceb4541eb0c45
[]
no_license
zhangxuelei86/ROS-odometry-car
677cdaeba5c2aaea014a115c0bfd09f85f32c9e8
284d0581dcf5e6a68d164b2e79a93976aa677a3f
refs/heads/master
2022-04-10T09:30:25.096406
2020-03-25T11:12:05
2020-03-25T11:12:05
null
0
0
null
null
null
null
UTF-8
Python
false
false
546
cfg
#!/usr/bin/env python PACKAGE = "projects_robotics" from dynamic_reconfigure.parameter_generator_catkin import * gen = ParameterGenerator() gen.add("computation_type", int_t, 0, "0 Differential Drive, 1 Ackermann", 0, 0, 1) gen.add("reset_signal", int_t, 0, "change to reset", 0, 0, 1) gen.add("change_coordinates_signal", int_t, 0, "switch to change coordinates", 0,0,1) gen.add("x_coordinate", double_t, 0, "New X", 0) gen.add("y_coordinate", double_t, 0, "New Y", 0) exit(gen.generate(PACKAGE, "projects_robotics", "dynamic_ric"))
[ "alessia.paccagnella@mail.polimi.it" ]
alessia.paccagnella@mail.polimi.it
ac215caf0b42fede5612998b3aad73bcf7068688
468daac37b861ce6e9f7e18f491754ba0acd9818
/TTTBoard.py
236ff071bf3cb9086d343156dc5b8c689841942a
[]
no_license
charlesdaniel/Bioloid_TicTacToe
49ec570bdf5704abf394d0cb567bd6b3c8257b18
37b1e45135627fb6513dd5cd3b440566bfd08952
refs/heads/master
2016-08-05T14:27:32.220491
2011-09-05T13:59:09
2011-09-05T14:01:35
2,328,502
0
0
null
null
null
null
UTF-8
Python
false
false
4,480
py
## Import the base TTTPlayer class (we'll need it later to prompt for input) from TTTPlayer import * ## The TTTBoard is the engine that drives the game. It handles prompting for input from ## any TTTPlayer class (this includes TTTPlayerAI since that inherits from TTTPlayer). ## Additionally this class prints out the board to the screen, checks for any winnings, ## and tells the winner if they won. class TTTBoard(): def __init__(self, player0, player1): self.players = [player0, player1] # The Board is a 1 dimensional array layed out like so # 0 | 1 | 2 # ----------- # 3 | 4 | 5 # ----------- # 6 | 7 | 8 # # The values in the cells are ' ' or a player's index from the players array (ie. "0" or "1") self.board = [] def resetBoard(self): # This method resets the board values to a space character (no piece) in each cell self.board = [ ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' '] def checkWin(self): ## This method scans through all the possible winning combinations of cells and sees if the values ## in those cells are the same (and not " " empty). If it finds a combination then it returns the ## combination back. Otherwise it returns the value None. # These are all the winning cell combinations in Tic-Tac-Toe winningCombinations = [ [0,1,2], [3,4,5], [6,7,8], [0,3,6], [1,4,7], [2,5,8], [0,4,8], [2,4,6] ] # Run through those winning combinations looking for matching values in each cell in each combination # Also make sure the cells actually have a value other than ' ' for c in winningCombinations: if((self.board[c[0]] != ' ') and (self.board[c[0]] == self.board[c[1]]) and (self.board[c[1]] == self.board[c[2]])): return c # Found a combination which has all the same values, win return None # Didn't find any combinations that won def printBoard(self): ## This method prints out the nice board print " %s | %s | %s " % (self.board[0], self.board[1], self.board[2]) print "-----------" print " %s | %s | %s " % (self.board[3], self.board[4], self.board[5]) print "-----------" print " %s | %s | %s " % (self.board[6], self.board[7], self.board[8]) def playGame(self): ## This method is the main engine of a (one) game (the driver). # Clear the board of pieces self.resetBoard() winningMove = None # This will hold the winning combination numMoves = 0 p = 0 # The index of the current player in the self.players array # Main Loop: Prompt for move and check winnings until there's a winner or 8 moves have been made while ((numMoves < 9) and (winningMove == None)): # Print the board for the user self.printBoard() # Ask the current player for a move via the TTTPlayer/TTTPlayerAI getMove() method m = self.players[p].getMove(self.board) # Check to see if that cell is empty or not if(self.board[m] == ' '): # Place the piece (value of p either 0 or 1) on that cell self.board[m] = p # Tells the current player the piece has been placed successfully # (this is so the TTTPlayer/TTTPlayerAI can move the robot arms to put # the marking in that cell. self.players[p].placePiece(m) # Increment the count of moves taken numMoves = numMoves + 1 # Toggle to make the other player the current player p = 1 - p # Simple trick to toggle between p = 0 and p = 1 else: # If we're here then it means the cell was not empty print "ILLEGAL MOVE PLAYER ", self.players[p].name, " TRY AGAIN " # We check to see if anybody won winningMove = self.checkWin() # We are outside the main game loop here. So we print the final board out for the user to see. self.printBoard() # We check to see how we exited the main game loop (either winningMove contains the winning combination # or we reached the maximum number of moves). So this if statement checks to see if winningMove is not None # like we initialized it before the loop. if (winningMove != None): # We find out the piece (0 or 1) that won from the first cell of the winningMove array winner = self.board[winningMove[0]] # Tell them they won print "PLAYER ", self.players[winner].name, " HAS WON THIS GAME USING POSITIONS ", winningMove # Tell the TTTPlayer/TTTPlayerAI to draw their winning line (using the arms) self.players[winner].placeWinningLine(winningMove) else: # If we're here then we must have exited the loop because we reached the limit of moves print "NOBODY WON !"
[ "charles.sam.daniel@gmail.com" ]
charles.sam.daniel@gmail.com
2241916c7d68776e94af575a2559596e236b1ca4
6c298f03496560276fb9f478cbefc218ecd24e9a
/VoiceInput/program/lib/voiceinput.py
7f661347d3c4a859be5930192ef02c22284a2b7f
[]
no_license
koenschepens/OldPhone
1f3fccd6018e14e779373243a0e90a759a7425f9
5ac9247d0c9e08d6af8fb384479c53b48c174aa6
refs/heads/master
2021-01-10T08:31:43.368378
2016-03-26T19:06:07
2016-03-26T19:06:07
43,725,567
0
0
null
null
null
null
UTF-8
Python
false
false
2,048
py
import sys import xbmc, xbmcgui, xbmcaddon try: import simplejson except ImportError: import json as simplejson import httplib __addon__ = xbmcaddon.Addon() __cwd__ = __addon__.getAddonInfo('path').decode("utf-8") __language__ = __addon__.getLocalizedString class InputWindow(xbmcgui.WindowXMLDialog): def __init__( self, *args, **kwargs ): self.Kodi14 = False self.CTL_NUM_START = 48 self.CTL_NUM_END = 57 self.CTL_LABEL_EDIT = 310 self.strEdit = kwargs.get("default").decode('utf-8') or u"" self.strHeading = kwargs.get("heading") or "" self.bIsConfirmed = False self.oldPhone = True self.keyType = LOWER self.words = [] self.hzcode = '' self.pos = 0 self.num = 0 xbmcgui.WindowXMLDialog.__init__(self) xbmc.log(msg="HEE HALLO@!!", level=xbmc.LOGDEBUG) def initControl(self): pEdit = self.getControl(self.CTL_LABEL_EDIT) px = pEdit.getX() py = pEdit.getY() pw = pEdit.getWidth() ph = pEdit.getHeight() self.listw = pw - 95 self.CTL_HZCODE = xbmcgui.ControlLabel(px, py + ph, 90, 30, '') self.CTL_HZLIST = xbmcgui.ControlLabel(px + 95, py + ph, pw - 95, 30, '') self.addControl(self.CTL_HZCODE) self.addControl(self.CTL_HZLIST) def getText(self): return "MONGOL!" class Keyboard: def __init__( self, default='', heading='' ): self.bIsConfirmed = False self.strEdit = default self.strHeading = heading def doModal (self): self.win = InputWindow("DialogKeyboard.xml", __cwd__, heading=self.strHeading, default=self.strEdit ) self.win.doModal() self.bIsConfirmed = self.win.isConfirmed() self.strEdit = self.win.getText() del self.win def setHeading(self, heading): self.strHeading = "WHOWHOWWWWOOOOO" def isConfirmed(self): return self.bIsConfirmed def getText(self): return "youtube"
[ "kschepens@gmail.com" ]
kschepens@gmail.com
e0804b03b742211cf22e225595431d99051e0976
a6ab576fcdb7f7258bf579765c92a664530b6574
/Exceptions.py
deb9da8256dbc666dff8ca553ac66a4d872c7199
[]
no_license
ILYSHI/Python-code
36a7322be62dcda1c3e7949f23fed927657d40fa
ff110688e32be6f91a0fce5d38c2775a062c1225
refs/heads/main
2023-01-19T03:55:38.041634
2020-11-21T12:45:04
2020-11-21T12:45:04
314,808,790
0
0
null
null
null
null
UTF-8
Python
false
false
207
py
class BadName(Exception): pass def greet(name): if name[0].isupper(): return "Hello, " + name else: raise BadName(name + ' is inappropriate name') print('Import is execution')
[ "termit63@gmail.com" ]
termit63@gmail.com
1419006fc8c21bcce2139ded545ad3d7af085e95
97379f2f2ab5ffa58ad0bbfbb5a2b1b2bc46d6fe
/modelwrangler/corral/dense_feedforward.py
22e65f0cfad2e415643b656689bb4bfc3c1256fd
[ "MIT" ]
permissive
brenton-enigma/modelwrangler
6afffbdf0b929e566adfd4497b60f8c613ef57d5
541d3c3267f70ff57a30a8c954b82c039ecff7aa
refs/heads/master
2021-05-09T20:16:29.687571
2017-12-17T18:33:45
2017-12-17T18:33:45
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,602
py
"""Module sets up Dense Autoencoder model""" import tensorflow as tf from modelwrangler.model_wrangler import ModelWrangler import modelwrangler.tf_ops as tops from modelwrangler.tf_models import BaseNetworkParams, BaseNetwork, LayerConfig class DenseFeedforwardParams(BaseNetworkParams): """Dense autoencoder params """ LAYER_PARAM_TYPES = { "hidden_params": LayerConfig, "output_params": LayerConfig, } MODEL_SPECIFIC_ATTRIBUTES = { "name": "ff", "in_size": 10, "out_size": 2, "hidden_nodes": [5, 5], "hidden_params": { "dropout_rate": 0.1 }, "output_params": { "dropout_rate": None, "activation": None, "act_reg": None }, } class DenseFeedforwardModel(BaseNetwork): """Dense autoencoder model """ # pylint: disable=too-many-instance-attributes PARAM_CLASS = DenseFeedforwardParams def setup_layers(self, params): """Build all the model layers """ # # Input layer # layer_stack = [ tf.placeholder( "float", name="input", shape=[None, params.in_size] ) ] in_layer = layer_stack[0] for idx, num_nodes in enumerate(params.hidden_nodes): layer_stack.append( self.make_dense_layer( layer_stack[-1], num_nodes, 'hidden_{}'.format(idx), params.hidden_params ) ) preact_out_layer, out_layer = self.make_dense_output_layer( layer_stack[-1], params.out_size, params.output_params ) target_layer = tf.placeholder( "float", name="target", shape=[None, params.out_size] ) if params.output_params.activation in ['sigmoid']: loss = tops.loss_sigmoid_ce(preact_out_layer, target_layer) elif params.output_params.activation in ['softmax']: loss = tops.loss_softmax_ce(preact_out_layer, target_layer) else: loss = tops.loss_mse(target_layer, out_layer) return in_layer, out_layer, target_layer, loss class DenseFeedforward(ModelWrangler): """Dense Autoencoder """ def __init__(self, in_size=10, **kwargs): super(DenseFeedforward, self).__init__( model_class=DenseFeedforwardModel, in_size=in_size, **kwargs)
[ "bmcmenamin@gmail.com" ]
bmcmenamin@gmail.com
38cde75832792f190c10e244e0589cd487f12a03
27341bdbb9bc564905f8bbc01604eef9cefe6ca4
/venv/lib/python3.9/site-packages/sqlalchemy_jsonfield/__init__.py
cbc8ad531207ccddd2148b41318e6364443c230f
[]
no_license
derek-johns/nba-batch-pipeline
fb10ae171e21537d7d83a8ac89c3c2c8b7072f21
65898c80c1eea448c75ba07b553b49c7a93837b2
refs/heads/main
2023-02-18T16:52:52.192766
2021-01-09T20:34:28
2021-01-09T20:34:28
328,239,908
0
1
null
null
null
null
UTF-8
Python
false
false
1,213
py
# Copyright 2016 Alexey Stepanov aka penguinolog # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implement JSONField for SQLAlchemy.""" # Local Implementation from .jsonfield import JSONField from .jsonfield import mutable_json_field try: # Local Implementation from ._version import version as __version__ except ImportError: pass __all__ = ("JSONField", "mutable_json_field") __author__ = "Alexey Stepanov <penguinolog@gmail.com>" __author_email__ = "penguinolog@gmail.com" __url__ = "https://github.com/penguinolog/sqlalchemy_jsonfield" __description__ = "SQLALchemy JSONField implementation for storing dicts at SQL" __license__ = "Apache License, Version 2.0"
[ "d.johnson13879@gmail.com" ]
d.johnson13879@gmail.com
967a7bd4f120ca55ba37aef4cb2e8af1f8b53ff8
393eb8b5e87de5572e4bd31902c9a42edf91e2f2
/mysite/home/models.py
6409d9c97f4d90e5c82308b3d52d593562195328
[]
no_license
heyyysus/Yeetboard
ea185160e89cd343d360981f96204f44a4eb7b18
ecd0aaa416a03028b973619b3e4eeb42ea04cf6e
refs/heads/master
2021-09-29T00:55:38.598684
2018-11-22T01:21:35
2018-11-22T01:21:35
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,013
py
from django.db import models from django.contrib.auth.models import ( BaseUserManager, AbstractBaseUser, User ) from django.db.models.signals import post_save from django.dispatch import receiver class Profile(models.Model): user = models.OneToOneField(User, on_delete=models.CASCADE) bio = models.TextField(max_length=500, blank=True) email = models.TextField(max_length=50, blank=True) isActivated = models.BooleanField(default=False) @receiver(post_save, sender=User) def create_user_profile(sender, instance, created, **kwargs): if created: Profile.objects.create(user=instance) @receiver(post_save, sender=User) def save_user_profile(sender, instance, **kwargs): instance.profile.save() class EmailVerification(models.Model): user = models.CharField(max_length = 150, default=""); activation_code = models.CharField(max_length = 50) redeemed = models.BooleanField(default = False) lastsent = models.DateTimeField(null=True) class Post(models.Model): author = models.CharField(max_length = 32, blank=False) title = models.CharField(max_length = 200, blank=False) content = models.TextField(max_length = 5000, blank=True) timestamp = models.DateTimeField(null=False) isNsfw = models.BooleanField(default=False) isSpoiler = models.BooleanField(default=False) post_id = models.CharField(max_length=7, blank=False) def as_dict(self): return { "id": self.id, "author": self.author, "title": self.title, "content": self.content, "timestamp": self.timestamp, "isNsfw": self.isNsfw, "isSpoiler": self.isSpoiler, "post_id": self.post_id } class UserActions(models.Model): user = models.CharField(max_length = 32, blank=False, default="<GUEST>") action = models.CharField(max_length = 256, blank=False) timestamp = models.DateTimeField(null=False) ipv4 = models.CharField(max_length = 16, blank=False)
[ "jesus.velarde07@gmail.com" ]
jesus.velarde07@gmail.com
841b47896afe9f1263de4f58ffd78c05f3001e4a
a78ddbca5c691acc739cfb65d5914fcf27bf06cb
/DataFilter.py
04facf6863c268b8ae4239019bc6dc52e8d41c76
[]
no_license
yangshuoc/pku_SVT_plus
4620355bace7fdd2ea96f18255698ac1f0f98dea
fd67d945526631c821a092fb0585a801dc94d7f4
refs/heads/master
2020-03-24T17:20:51.686792
2018-08-02T01:20:59
2018-08-02T01:20:59
142,855,721
0
0
null
null
null
null
UTF-8
Python
false
false
1,889
py
import csv import random # DATA_FILE = 'data.csv' # MISS_TAG = 0 # MISSING_ROW_NUM = 900 # MISSING_COL_NUM = 6 DATA_FILE = 'vocab_vector.csv' MISS_TAG = 0 MISSING_ROW_NUM = 200 # MISSING_COL_NUM = 70 MISSING_COL_NUM = 50 # DATA_FILE = 'svt_matrix.csv' # MISS_TAG = 0 # MISSING_ROW_NUM = 70 # MISSING_COL_NUM = 250 def loadCSVData(file): csv_reader = csv.reader(open(file, encoding='utf-8')) strMatrix = [] for r in csv_reader: strMatrix.append(r) numMatrix = [] for i in range(len(strMatrix)): if i == 0: continue row = [] for x in strMatrix[i]: row.append(float(x)) numMatrix.append(row) return numMatrix def buildTestMatrix(matrix): n = len(matrix) m = len(matrix[0]) selectedN = range(n) selectedM = range(m) selectedN = random.sample(selectedN,n-MISSING_ROW_NUM) selectedM = random.sample(selectedM,m-MISSING_COL_NUM) selectedN.sort() selectedM.sort() # print(selectedM) # print(selectedN) for i in range(n): for j in range(m): if i in selectedN or j in selectedM: continue matrix[i][j] = MISS_TAG return selectedN,selectedM,matrix def getCsrMatrix(M,row,col): n = len(M) m = len(M[0]) vector = [] for i in range(n): for j in range(m): if M[i][j] != MISS_TAG: row.append(i) col.append(j) vector.append(M[i][j]) # print("finished") return vector,n,m if __name__ == '__main__': matrix = loadCSVData(DATA_FILE) selectedN,selectedM,matrix = buildTestMatrix(matrix) row = [] col = [] vector = getCsrMatrix(matrix,row,col) # print(vector) for row in matrix: print(row)
[ "noreply@github.com" ]
yangshuoc.noreply@github.com
78b7821eb4c330884d87322bb2e9c99f8af8f6d8
acf2d43575f4be1fc97d0368073e338188e8bfae
/1_twosum.py
9383da9effb20171eb6ecb6763371d53564f67f0
[]
no_license
mingshaofeng/leetCode
87e6011285168eabdcfad89a445c371aad1d0f46
0fbf165d4ff25a879db81c8958c191fa4728701f
refs/heads/master
2020-12-31T08:47:12.951354
2020-02-08T12:26:28
2020-02-08T12:26:28
238,958,568
0
0
null
null
null
null
UTF-8
Python
false
false
611
py
''' Given an array of integers, return indices of the two numbers such that they add up to a specific target. You may assume that each input would have exactly one solution, and you may not use the same element twice. Example: Given nums = [2, 7, 11, 15], target = 9, Because nums[0] + nums[1] = 2 + 7 = 9, return [0, 1]. ''' # -*- coding:utf-8 -*- def twoSum(nums,target): hashmap={} for i,n in enumerate(nums): if target-n in hashmap: return [hashmap[target-n],i] hashmap[n]=i if __name__=='__main__': nums=[2,7,11,15] target=9 print(twoSum(nums,target))
[ "1812611764@qq.com" ]
1812611764@qq.com
dd16775a4926161b4b8d7e6769c6edfd9685d2c3
b68af7ed59f8cb357abb45cc01c4c90e69d0dac4
/conftest.py
fd2c2f101438991b85a765037d2097de4b849720
[ "MIT" ]
permissive
paultro708/DataReduction
fb4197c889f47fb35cd89812c76c3bdde7badf17
ef63b74f3c93e7eb7887c8bc2f25ce0200460d3d
refs/heads/master
2023-02-02T06:48:42.691450
2020-12-18T11:16:17
2020-12-18T11:16:17
287,581,390
5
0
null
null
null
null
UTF-8
Python
false
false
1,005
py
import pytest from tests.const import names, basic_reduction, classifiers from InstanceReduction.DataPreparation import DataPreparation from InstanceReduction.Raport import Raport @pytest.fixture(params=names, scope = 'module') def data(request): return DataPreparation(request.param) @pytest.fixture(params = basic_reduction, scope = 'module') def reduction_alg(request, data): return request.param(data) @pytest.fixture(params = basic_reduction, scope = 'module') def reduction_alg_names(request): return request.param @pytest.fixture(scope = 'module') def data_prepar_iris(request): return DataPreparation('iris') @pytest.fixture(params = basic_reduction, scope = 'module') def reduction_alg_iris(request): return request.param(DataPreparation('iris')) @pytest.fixture(scope='module') def raport_iris(reduction_alg_iris, data_prepar_iris): reduction_alg_iris.reduce_instances() return Raport(data_prepar_iris, reduction_alg_iris.red_data, reduction_alg_iris.red_lab)
[ "32535575+paultro708@users.noreply.github.com" ]
32535575+paultro708@users.noreply.github.com
5957fddb7c3f6e6aa3a69b0ba94279abc367d105
d09b14a13e05adcd3d0f1714384b3ab65be4aa7c
/controller/UserRoleManagementDialog.py
88b9e2a569b3cea650f01750526b2b90f2bedf97
[]
no_license
ankhbold/lm2
bd61a353b95d6d8e351cf4b0af48b1b936db8b9f
30dfbeced57f123d39a69cb4d643a15429b8bfde
refs/heads/master
2021-07-24T20:57:16.534659
2017-11-03T16:33:43
2017-11-03T16:33:43
90,375,636
1
0
null
null
null
null
UTF-8
Python
false
false
64,172
py
__author__ = 'ankhaa' from PyQt4.QtGui import * from PyQt4.QtCore import * from sqlalchemy import exc, or_ from sqlalchemy.exc import DatabaseError, SQLAlchemyError from sqlalchemy.exc import SQLAlchemyError from sqlalchemy.orm.exc import NoResultFound from sqlalchemy import func, or_, and_, desc,extract from inspect import currentframe from ..view.Ui_UserRoleManagementDialog import * from ..model.SetRole import * from ..model.AuLevel1 import * from ..model.AuLevel2 import * from ..model.LM2Exception import LM2Exception from ..model.DialogInspector import DialogInspector from ..model.ClPositionType import * from ..model.ClGroupRole import * from ..model.SetPositionGroupRole import * from ..model.SetUserPosition import * from ..model.SetUserGroupRole import * from ..utils.PluginUtils import * from ..controller.UserRoleManagementDetialDialog import * from uuid import getnode as get_mac import commands import datetime import socket import sys import struct INTERFACE_NAME = "eth0" class UserRoleManagementDialog(QDialog, Ui_UserRoleManagementDialog): GROUP_SEPARATOR = '-----' PW_PLACEHOLDER = '0123456789' def __init__(self, has_privilege , user, parent=None): super(UserRoleManagementDialog, self).__init__(parent) self.setupUi(self) self.db_session = SessionHandler().session_instance() self.has_privilege = has_privilege self.__username = user self.__privilage() self.__setup_combo_boxes() self.__populate_user_role_lwidget() self.__populate_group_lwidget() self.__populate_au_level1_cbox() self.close_button.clicked.connect(self.reject) # permit only alphanumeric characters for the username reg_ex = QRegExp(u"[a-z]{4}[0-9]{6}") validator = QRegExpValidator(reg_ex, None) reg_ex = QRegExp(u"[a-z_0-9]+") validator_pass = QRegExpValidator(reg_ex, None) database = QSettings().value(SettingsConstants.DATABASE_NAME) self.username_edit.setText('user'+ database[-4:]) self.username_edit.setValidator(validator) self.password_edit.setValidator(validator_pass) self.retype_password_edit.setValidator(validator_pass) self.__setup_validators() self.selected_user = None # self.mac_address = self.get_mac_address() # self.mac_address_edit.setText(self.mac_address) self.__setup_twidget() self.__load_default_ritht_grud() def __setup_twidget(self): self.user_twidget.setSelectionMode(QAbstractItemView.SingleSelection) self.user_twidget.setSelectionBehavior(QAbstractItemView.SelectRows) self.user_twidget.setEditTriggers(QAbstractItemView.NoEditTriggers) self.user_twidget.setSortingEnabled(True) self.position_twidget.setSelectionMode(QAbstractItemView.SingleSelection) self.position_twidget.setSelectionBehavior(QAbstractItemView.SelectRows) self.position_twidget.setEditTriggers(QAbstractItemView.NoEditTriggers) self.position_twidget.setSortingEnabled(True) self.settings_position_twidget.setAlternatingRowColors(True) self.settings_position_twidget.setSelectionMode(QAbstractItemView.SingleSelection) self.settings_position_twidget.setSelectionBehavior(QAbstractItemView.SelectRows) self.settings_right_grud_twidget.setSelectionMode(QAbstractItemView.SingleSelection) self.settings_right_grud_twidget.setSelectionBehavior(QAbstractItemView.SelectRows) self.settings_right_grud_twidget.setEditTriggers(QAbstractItemView.NoEditTriggers) self.settings_right_grud_twidget.setSortingEnabled(True) self.settings_right_grud_twidget.setColumnWidth(0, 170) self.settings_right_grud_twidget.setColumnWidth(1, 170) self.settings_right_grud_twidget.setColumnWidth(2, 45) self.settings_right_grud_twidget.setColumnWidth(3, 45) self.settings_right_grud_twidget.setColumnWidth(4, 45) self.settings_right_grud_twidget.setColumnWidth(5, 45) self.right_grud_twidget.setSelectionMode(QAbstractItemView.SingleSelection) self.right_grud_twidget.setSelectionBehavior(QAbstractItemView.SelectRows) self.right_grud_twidget.setEditTriggers(QAbstractItemView.NoEditTriggers) self.right_grud_twidget.setSortingEnabled(True) self.right_grud_twidget.setColumnWidth(0, 170) self.right_grud_twidget.setColumnWidth(1, 45) self.right_grud_twidget.setColumnWidth(2, 45) self.right_grud_twidget.setColumnWidth(3, 45) self.right_grud_twidget.setColumnWidth(4, 45) @pyqtSlot(int) def on_get_mac_checkbox_stateChanged(self, state): if state == Qt.Checked: self.mac_address = self.get_mac_address() self.mac_address_edit.setText(self.mac_address) else: self.mac_address_edit.clear() def __setup_validators(self): self.mac_validator = QRegExpValidator( QRegExp("[a-zA-Z0-9]{2}:[a-zA-Z0-9]{2}:[a-zA-Z0-9]{2}:[a-zA-Z0-9]{2}:[a-zA-Z0-9]{2}:[a-zA-Z0-9]{2}"), None) self.mac_address_edit.setValidator(self.mac_validator) def get_mac_address(self): if sys.platform == 'win32': for line in os.popen("ipconfig /all"): if line.lstrip().startswith('Physical Address'): mac = line.split(':')[1].strip().replace('-', ':') if len(mac) == 17: mac = line.split(':')[1].strip().replace('-', ':') break else: for line in os.popen("/sbin/ifconfig"): if line.find('Ether') > -1: mac = line.split()[4] if len(mac) == 17: mac = line.split(':')[1].strip().replace('-', ':') break return mac def get_macaddress(self, host): """ Returns the MAC address of a network host, requires >= WIN2K. """ # http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/347812 import ctypes import socket import struct # Check for api availability try: SendARP = ctypes.windll.Iphlpapi.SendARP except: raise NotImplementedError('Usage only on Windows 2000 and above') # Doesn't work with loopbacks, but let's try and help. if host == '127.0.0.1' or host.lower() == 'localhost': host = socket.gethostname() # gethostbyname blocks, so use it wisely. try: inetaddr = ctypes.windll.wsock32.inet_addr(host) if inetaddr in (0, -1): raise Exception except: hostip = socket.gethostbyname(host) inetaddr = ctypes.windll.wsock32.inet_addr(hostip) buffer = ctypes.c_buffer(6) addlen = ctypes.c_ulong(ctypes.sizeof(buffer)) if SendARP(inetaddr, 0, ctypes.byref(buffer), ctypes.byref(addlen)) != 0: raise WindowsError('Retreival of mac address(%s) - failed' % host) # Convert binary data into a string. macaddr = '' for intval in struct.unpack('BBBBBB', buffer): if intval > 15: replacestr = '0x' else: replacestr = 'x' if macaddr != '': macaddr = ':'.join([macaddr, hex(intval).replace(replacestr, '')]) else: macaddr = ''.join([macaddr, hex(intval).replace(replacestr, '')]) return macaddr.upper() def __privilage(self): if not self.has_privilege: self.groupBox_2.setEnabled(False) self.add_button.setEnabled(False) self.delete_button.setEnabled(False) self.username_edit.setEnabled(False) self.phone_edit.setEnabled(False) self.surname_edit.setEnabled(False) self.firstname_edit.setEnabled(False) self.email_edit.setEnabled(False) self.position_cbox.setEnabled(False) self.mac_address_edit.setEnabled(False) self.groupBox_3.setEnabled(False) def __setup_combo_boxes(self): try: positions = self.db_session.query(ClPositionType).all() for position in positions: self.position_cbox.addItem(position.description, position.code) except SQLAlchemyError, e: PluginUtils.show_error(self, self.tr("Query Error"), self.tr("Error in line {0}: {1}").format(currentframe().f_lineno, e.message)) return # def set_username(self, username): # # self.__username = username def __populate_user_role_lwidget(self): self.user_role_lwidget.clear() if self.has_privilege: users = self.db_session.query(SetRole.user_name).order_by(SetRole.user_name).group_by(SetRole.user_name) else: users = self.db_session.query(SetRole.user_name).filter(SetRole.user_name == self.__username).group_by(SetRole.user_name).all() try: for user in users: item = QListWidgetItem(QIcon(":/plugins/lm2/person.png"), user.user_name) # if user.user_name == self.__logged_on_user(): item.setForeground(Qt.blue) # if self.__is_db_role(user.user_name): self.user_role_lwidget.addItem(item) except (DatabaseError, SQLAlchemyError), e: PluginUtils.show_error(self, self.tr("Database Error"), e.message) def __is_db_role(self, user_name): try: sql = "SELECT count(*) FROM pg_roles WHERE rolname = '{0}' and rolcanlogin = true".format(user_name) count = self.db_session.execute(sql).fetchone() return True if count[0] == 1 else False except SQLAlchemyError, e: PluginUtils.show_error(self, self.tr("Database Query Error"), self.tr("Could not execute: {0}").format(e.message)) def __populate_group_lwidget(self): self.group_lwidget.clear() self.member_lwidget.clear() QListWidgetItem("land_office_administration", self.group_lwidget) QListWidgetItem("db_creation", self.group_lwidget) QListWidgetItem("role_management", self.group_lwidget) QListWidgetItem(self.GROUP_SEPARATOR, self.group_lwidget) QListWidgetItem("application_view", self.group_lwidget) QListWidgetItem("application_update", self.group_lwidget) QListWidgetItem("cadastre_view", self.group_lwidget) QListWidgetItem("cadastre_update", self.group_lwidget) QListWidgetItem("contracting_view", self.group_lwidget) QListWidgetItem("contracting_update", self.group_lwidget) QListWidgetItem("reporting", self.group_lwidget) QListWidgetItem("log_view", self.member_lwidget) def __populate_au_level1_cbox(self): try: PluginUtils.populate_au_level1_cbox(self.aimag_cbox, True, False, False) except DatabaseError, e: PluginUtils.show_error(self, self.tr("Database Query Error"), self.tr("Could not execute: {0}").format(e.message)) @pyqtSlot() def on_aimag_lwidget_itemSelectionChanged(self): try: self.soum_cbox.clear() self.soum_cbox.addItem("*", "*") if self.aimag_lwidget.currentItem() is None: return # if self.aimag_lwidget.count() > 1: # return au_level1_code = self.aimag_lwidget.currentItem().data(Qt.UserRole) PluginUtils.populate_au_level2_cbox(self.soum_cbox, au_level1_code, True, False, False) except DatabaseError, e: PluginUtils.show_error(self, self.tr("Database Query Error"), self.tr("Could not execute: {0}").format(e.message)) @pyqtSlot() def on_user_role_lwidget_itemSelectionChanged(self): self.selected_user = self.user_role_lwidget.currentItem().text() user_name = self.user_role_lwidget.currentItem().text() try: user_c = self.db_session.query(SetRole). \ filter(SetRole.user_name == user_name).count() if user_c == 1: user = self.db_session.query(SetRole). \ filter(SetRole.user_name == user_name).one() else: user = self.db_session.query(SetRole).\ filter(SetRole.user_name == user_name).\ filter(SetRole.is_active == True).one() except NoResultFound: return self.username_real_lbl.setText(user.user_name_real) self.username_edit.setText(user.user_name) self.surname_edit.setText(user.surname) self.firstname_edit.setText(user.first_name) self.email_edit.setText(user.email) self.position_cbox.setCurrentIndex(self.position_cbox.findData(user.position)) # self.position_edit.setText(user.position) self.phone_edit.setText(user.phone) self.mac_address_edit.setText(user.mac_addresses) self.password_edit.setText(self.PW_PLACEHOLDER) self.retype_password_edit.setText(self.PW_PLACEHOLDER) self.register_edit.setText(user.user_register) # populate groups self.__populate_group_lwidget() groups = self.__groupsByUser(user_name) for group in groups: group_name = group[0] items = self.group_lwidget.findItems(group_name, Qt.MatchExactly) if len(items) > 0: item = items[0] self.member_lwidget.addItem(item.text()) self.group_lwidget.takeItem(self.group_lwidget.row(item)) # populate admin units self.aimag_lwidget.clear() self.soum_lwidget.clear() restriction_au_level1 = user.restriction_au_level1 aimag_codes = restriction_au_level1.split(',') try: if len(aimag_codes) == self.db_session.query(AuLevel1).count(): # all Aimags item = QListWidgetItem("*") item.setData(Qt.UserRole, "*") self.aimag_lwidget.addItem(item) self.soum_lwidget.addItem(item) else: for code in aimag_codes: code = code.strip() aimag = self.db_session.query(AuLevel1).filter(AuLevel1.code == code).one() item = QListWidgetItem(aimag.name) item.setData(Qt.UserRole, aimag.code) self.aimag_lwidget.addItem(item) restriction_au_level2 = user.restriction_au_level2 soum_codes = restriction_au_level2.split(',') # Find districts among the Aimags: l1_district_entries = filter(lambda x: x.startswith('1') or x.startswith('01'), aimag_codes) l2_district_entries = filter(lambda x: x.startswith('1') or x.startswith('01'), soum_codes) true_aimags = filter(lambda x: not x.startswith('1') and not x.startswith('01'), aimag_codes) if len(aimag_codes)-len(l1_district_entries) == 1 and \ len(soum_codes)-len(l2_district_entries) == self.db_session.query(AuLevel2)\ .filter(AuLevel2.code.startswith(true_aimags[0]))\ .count(): item = QListWidgetItem("*") item.setData(Qt.UserRole, "*") self.soum_lwidget.addItem(item) else: for code in soum_codes: code = code.strip() soum = self.db_session.query(AuLevel2).filter(AuLevel2.code == code).one() item = QListWidgetItem(soum.name+'_'+soum.code) item.setData(Qt.UserRole, soum.code) self.soum_lwidget.addItem(item) except NoResultFound: pass def reject(self): SessionHandler().destroy_session() QDialog.reject(self) @pyqtSlot() def on_add_button_clicked(self): try: if self.__add_or_update_role(): PluginUtils.show_message(self, self.tr("User Role Management"), self.tr('New user created.')) except DatabaseError, e: self.db_session.rollback() PluginUtils.show_error(self, self.tr("Database Query Error"), self.tr("Could not execute: {0}").format(e.message)) @pyqtSlot() def on_update_button_clicked(self): try: if self.__add_or_update_role('UPDATE'): PluginUtils.show_message(self, self.tr("User Role Management"), self.tr('User information updated.')) except DatabaseError, e: self.db_session.rollback() PluginUtils.show_error(self, self.tr("Database Query Error"), self.tr("Could not execute: {0}").format(e.message)) def __add_or_update_role(self, mode='ADD'): if not self.__validate_user_input(mode): return False user_name = self.username_edit.text().strip() surname = self.surname_edit.text().strip() first_name = self.firstname_edit.text().strip() user_register = self.register_edit.text().strip() phone = self.phone_edit.text().strip() # position = self.position_edit.text().strip() position = self.position_cbox.itemData(self.position_cbox.currentIndex()) mac_addresses = self.mac_address_edit.text().strip() password = self.password_edit.text().strip() email = '' if self.email_edit.text(): email = self.email_edit.text().strip() if self.has_privilege: try: self.db_session.execute("SET ROLE role_management") except DatabaseError, e: self.db_session.rollback() PluginUtils.show_error(self, self.tr("Database Query Error"), self.tr("You must login different username with member of role management")) return if mode == 'ADD': sql = "SELECT count(*) FROM pg_roles WHERE rolname = '{0}' and rolcanlogin = true".format(user_name) count = self.db_session.execute(sql).fetchone() if count[0] == 0: self.db_session.execute(u"CREATE ROLE {0} login PASSWORD '{1}'".format(user_name, password)) else: message_box = QMessageBox() message_box.setText(self.tr("Could not execute: {0} already exists. Do you want to connect selected soums?").format(user_name)) yes_button = message_box.addButton(self.tr("Yes"), QMessageBox.ActionRole) message_box.addButton(self.tr("Cancel"), QMessageBox.ActionRole) message_box.exec_() if not message_box.clickedButton() == yes_button: return else: if password != self.PW_PLACEHOLDER: self.db_session.execute(u"ALTER ROLE {0} PASSWORD '{1}'".format(user_name, password)) groups = self.__groupsByUser(user_name) for group in groups: self.db_session.execute(u"REVOKE {0} FROM {1}".format(group[0], user_name)) for index in range(self.member_lwidget.count()): item = self.member_lwidget.item(index) sql = "SELECT count(*) FROM pg_roles WHERE rolname = '{0}' and rolcanlogin = true".format(user_name) count = self.db_session.execute(sql).fetchone() if count[0] == 0: self.db_session.execute(u"CREATE ROLE {0} login PASSWORD '{1}'".format(user_name, password)) self.db_session.execute(u"GRANT {0} TO {1}".format(item.text(), user_name)) self.db_session.execute("RESET ROLE") restriction_au_level1 = '' restriction_au_level2 = '' is_first = 0 for index in range(self.aimag_lwidget.count()): item = self.aimag_lwidget.item(index) if item.text() == '*': # all Aimags for index2 in range(self.aimag_cbox.count()): au_level1_code = str(self.aimag_cbox.itemData(index2, Qt.UserRole)) if au_level1_code != '*': restriction_au_level1 += au_level1_code + ',' # Special treatment for UB's districts: if au_level1_code.startswith('1') or au_level1_code.startswith('01'): restriction_au_level2 += au_level1_code + '00' + ',' self.db_session.execute("SET ROLE role_management") self.db_session.execute(u"GRANT s{0}00 TO {1}".format(au_level1_code, user_name)) self.db_session.execute("RESET ROLE") for au_level2 in self.db_session.query(AuLevel2).filter(AuLevel2.code.startswith(au_level1_code))\ .order_by(AuLevel2.code): restriction_au_level2 += au_level2.code + ',' self.db_session.execute("SET ROLE role_management") self.db_session.execute(u"GRANT s{0} TO {1}".format(au_level2.code, user_name)) self.db_session.execute(u"GRANT s{0} TO {1}".format(au_level2.code, user_name)) self.db_session.execute("RESET ROLE") break else: au_level1_code = str(item.data(Qt.UserRole)) restriction_au_level1 += au_level1_code + ',' # Special treatment for UB's districts: # if au_level1_code.startswith('1') or au_level1_code.startswith('01'): # restriction_au_level2 += au_level1_code + '00' + ',' # self.db_session.execute("SET ROLE role_management") # self.db_session.execute(u"GRANT s{0}00 TO {1}".format(au_level1_code, user_name)) # self.db_session.execute("RESET ROLE") if is_first == 0: is_first = 1 for index2 in range(self.soum_lwidget.count()): item = self.soum_lwidget.item(index2) if item.text() == '*': for au_level2 in self.db_session.query(AuLevel2).filter(AuLevel2.code.startswith(au_level1_code))\ .order_by(AuLevel2.code): restriction_au_level2 += au_level2.code + ',' self.db_session.execute("SET ROLE role_management") self.db_session.execute(u"GRANT s{0} TO {1}".format(au_level2.code, user_name)) self.db_session.execute("RESET ROLE") else: try: au_level2_code = str(item.data(Qt.UserRole)) restriction_au_level2 += au_level2_code + ',' self.db_session.execute("SET ROLE role_management") self.db_session.execute(u"GRANT s{0} TO {1}".format(au_level2_code, user_name)) self.db_session.execute("RESET ROLE") except DatabaseError, e: self.db_session.rollback() PluginUtils.show_error(self, self.tr("Database Query Error"), self.tr("You must login different username with member of role management")) return restriction_au_level1 = restriction_au_level1[:len(restriction_au_level1)-1] restriction_au_level2 = restriction_au_level2[:len(restriction_au_level2)-1] pa_from = datetime.datetime.today() pa_till = datetime.date.max role_c = self.db_session.query(SetRole).filter(SetRole.user_name == user_name).count() if self.register_edit.text() == None or self.register_edit.text() == '': PluginUtils.show_message(None, self.tr("None register"), self.tr("Register not null!")) return if mode == 'ADD': if role_c != 0: role_count = self.db_session.query(SetRole).filter( SetRole.user_register == self.register_edit.text()).count() # if role_count > 0: # PluginUtils.show_message(None, self.tr("Duplicate user"), # self.tr("This user already registered!")) # return is_active_user = False if role_c == 0: is_active_user = True else: active_role_count = self.db_session.query(SetRole).filter(SetRole.user_name == user_name).filter(SetRole.is_active == True).count() if active_role_count == 0: is_active_user = True else: is_active_user = False try: count = self.db_session.query(SetRole) \ .filter(SetRole.user_name == user_name) \ .order_by(func.substr(SetRole.user_name_real, 11, 12).desc()).count() except SQLAlchemyError, e: PluginUtils.show_error(self, self.tr("File Error"), self.tr("Error in line {0}: {1}").format(currentframe().f_lineno, e.message)) return if count > 0: try: max_number_user = self.db_session.query(SetRole) \ .filter(SetRole.user_name == user_name) \ .order_by(func.substr(SetRole.user_name_real, 11, 12).desc()).first() except SQLAlchemyError, e: PluginUtils.show_error(self, self.tr("File Error"), self.tr("Error in line {0}: {1}").format(currentframe().f_lineno, e.message)) return user_numbers = max_number_user.user_name_real[-2:] new_user_number = (str(int(user_numbers[1]) + 1).zfill(2)) last_user_name = user_name[:10] + new_user_number user_name_real = last_user_name employee_type = 2 else: user_name_real = self.username_edit.text()+'01' employee_type = 1 role = SetRole(user_name=user_name, surname=surname, first_name=first_name, phone=phone, user_register=user_register, mac_addresses=mac_addresses, position=position, restriction_au_level1=restriction_au_level1, user_name_real = user_name_real, employee_type = employee_type, restriction_au_level2=restriction_au_level2, pa_from=pa_from, pa_till=pa_till, is_active=is_active_user, email=email) self.db_session.add(role) else: active_role_count = self.db_session.query(SetRole).filter(SetRole.user_name == user_name).filter( SetRole.is_active == True).count() if active_role_count == 1: role = self.db_session.query(SetRole).filter(SetRole.user_name == user_name).filter(SetRole.is_active == True).one() else: role = self.db_session.query(SetRole).filter(SetRole.user_name == user_name).filter(SetRole.user_name_real == self.username_real_lbl.text()).one() # for role in roles: # print role.user_name_real role.surname = surname role.first_name = first_name role.phone = phone role.user_register = user_register role.mac_addresses = mac_addresses if active_role_count == 0: role.is_active = True role.position = position role.restriction_au_level1 = restriction_au_level1 role.restriction_au_level2 = restriction_au_level2 role.email = email self.db_session.commit() self.__populate_user_role_lwidget() item = self.user_role_lwidget.findItems(user_name, Qt.MatchExactly)[0] row = self.user_role_lwidget.row(item) self.user_role_lwidget.setCurrentRow(row) return True else: if password != self.PW_PLACEHOLDER: self.db_session.execute(u"ALTER ROLE {0} PASSWORD '{1}'".format(user_name, password)) self.db_session.commit() self.__populate_user_role_lwidget() item = self.user_role_lwidget.findItems(user_name, Qt.MatchExactly)[0] row = self.user_role_lwidget.row(item) self.user_role_lwidget.setCurrentRow(row) return True def __validate_user_input(self, mode='ADD'): if mode == 'UPDATE': if self.username_edit.text().strip() != self.selected_user: PluginUtils.show_message(None, self.tr("Username can't be modified"), self.tr("The username of an existing user cannot be modified!")) self.username_edit.setText(self.selected_user) return False if self.username_edit.text().strip() == 'role_manager' \ and not self.member_lwidget.findItems('role_management', Qt.MatchExactly): PluginUtils.show_message(self, self.tr("Required group"), self.tr("The user 'role_manager' must be member of group 'role_management'.")) return False if len(self.username_edit.text().strip()) == 0: PluginUtils.show_message(self, self.tr("No Username"), self.tr("Provide a valid username!")) return False if len(self.password_edit.text().strip()) < 8: PluginUtils.show_message(self, self.tr("Invalid Password"), self.tr("Provide a valid password that consists of 8 characters or more!")) return False if self.password_edit.text().strip() != self.retype_password_edit.text().strip(): PluginUtils.show_message(self, self.tr("Passwords Not Matching"), self.tr("Password and retyped password are not identical!")) return False if len(self.surname_edit.text().strip()) == 0: PluginUtils.show_message(self, self.tr("No Surname"), self.tr("Provide a valid surname!")) return False if len(self.firstname_edit.text().strip()) == 0: PluginUtils.show_message(self, self.tr("No First Name"), self.tr("Provide a valid first name!")) return False if len(self.email_edit.text().strip()) == 0: PluginUtils.show_message(self, self.tr("No Email"), self.tr("Provide a valid email!")) return False if len(self.firstname_edit.text().strip()) == 0: PluginUtils.show_message(self, self.tr("No Position"), self.tr("Provide a valid position!")) return False if self.member_lwidget.count() == 0: PluginUtils.show_message(self, self.tr("No Group Membership"), self.tr("The user must be member of at least one group!")) return False if not self.member_lwidget.findItems('role_management', Qt.MatchExactly) \ and not self.member_lwidget.findItems('db_creation', Qt.MatchExactly): if self.aimag_lwidget.count() == 0: PluginUtils.show_message(self, self.tr("No Aimag/Duureg"), self.tr("The user must be granted at least one Aimag/Duureg!")) return False if self.soum_lwidget.count() == 0: PluginUtils.show_message(self, self.tr("No Soum"), self.tr("The user must granted at least one Soum!")) return False return True @pyqtSlot() def on_down_groups_button_clicked(self): if not self.group_lwidget.currentItem(): return group = self.group_lwidget.currentItem().text() if group.find(self.GROUP_SEPARATOR) != -1: return self.group_lwidget.takeItem(self.group_lwidget.row(self.group_lwidget.currentItem())) self.member_lwidget.addItem(group) if group == 'land_office_administration': item_list = self.member_lwidget.findItems('contracting_update', Qt.MatchExactly) if len(item_list) == 0: contracting_update_item = self.group_lwidget.findItems('contracting_update', Qt.MatchExactly)[0] self.group_lwidget.takeItem(self.group_lwidget.row(contracting_update_item)) self.member_lwidget.addItem(contracting_update_item.text()) # elif group == 'contracting_update': # item_list = self.member_lwidget.findItems('cadastre_update', Qt.MatchExactly) # if len(item_list) == 0: # cadastre_update_item = self.group_lwidget.findItems('cadastre_update', Qt.MatchExactly)[0] # self.group_lwidget.takeItem(self.group_lwidget.row(cadastre_update_item)) # self.member_lwidget.addItem(cadastre_update_item.text()) @pyqtSlot() def on_up_groups_button_clicked(self): if not self.member_lwidget.currentItem(): return group = self.member_lwidget.currentItem().text() if group == 'log_view': # cannot be removed from member widget return self.member_lwidget.takeItem(self.member_lwidget.row(self.member_lwidget.currentItem())) if group == 'role_management' or group == 'db_creation' or group == 'land_office_administration': self.group_lwidget.insertItem(0, group) else: self.group_lwidget.addItem(group) # if group == 'contracting_update': # item_list = self.group_lwidget.findItems('land_office_administration', Qt.MatchExactly) # if len(item_list) == 0: # land_office_admin_item = self.member_lwidget.findItems('land_office_administration', Qt.MatchExactly)[0] # self.member_lwidget.takeItem(self.member_lwidget.row(land_office_admin_item)) # self.group_lwidget.insertItem(0, land_office_admin_item.text()) # elif group == 'cadastre_update': # item_list = self.group_lwidget.findItems('contracting_update', Qt.MatchExactly) # if len(item_list) == 0: # contracting_update_item = self.member_lwidget.findItems('contracting_update', Qt.MatchExactly)[0] # self.member_lwidget.takeItem(self.member_lwidget.row(contracting_update_item)) # self.group_lwidget.addItem(contracting_update_item.text()) @pyqtSlot() def on_down_aimag_button_clicked(self): au_level1_name = self.aimag_cbox.currentText() au_level1_code = self.aimag_cbox.itemData(self.aimag_cbox.currentIndex(), Qt.UserRole) if len(self.aimag_lwidget.findItems(au_level1_name, Qt.MatchExactly)) == 0: if len(self.aimag_lwidget.findItems("*", Qt.MatchExactly)) == 0: if au_level1_name == '*': self.aimag_lwidget.clear() self.soum_lwidget.clear() item = QListWidgetItem("*") item.setData(Qt.UserRole, "*") self.soum_lwidget.addItem(item) item = QListWidgetItem(au_level1_name) item.setData(Qt.UserRole, au_level1_code) self.aimag_lwidget.addItem(item) self.aimag_lwidget.setCurrentItem(item) if self.aimag_lwidget.count() > 1: self.soum_lwidget.clear() item = QListWidgetItem("*") item.setData(Qt.UserRole, "*") self.soum_lwidget.addItem(item) @pyqtSlot() def on_up_aimag_button_clicked(self): self.aimag_lwidget.takeItem(self.aimag_lwidget.row(self.aimag_lwidget.currentItem())) if self.aimag_lwidget.count() > 0: self.aimag_lwidget.setItemSelected(self.aimag_lwidget.item(0), False) self.aimag_lwidget.setCurrentItem(self.aimag_lwidget.item(0)) self.soum_lwidget.clear() @pyqtSlot() def on_down_soum_button_clicked(self): au_level2_name = self.soum_cbox.currentText() au_level2_code = self.soum_cbox.itemData(self.soum_cbox.currentIndex(), Qt.UserRole) itemsList = self.aimag_lwidget.selectedItems() if len(self.soum_lwidget.findItems(au_level2_name +'_'+ au_level2_code, Qt.MatchExactly)) == 0: if len(self.soum_lwidget.findItems("*", Qt.MatchExactly)) == 0: if au_level2_name == '*': self.soum_lwidget.clear() item = QListWidgetItem(au_level2_name +'_'+ au_level2_code) item.setData(Qt.UserRole, au_level2_code) self.soum_lwidget.addItem(item) @pyqtSlot() def on_up_soum_button_clicked(self): self.soum_lwidget.takeItem(self.soum_lwidget.row(self.soum_lwidget.currentItem())) @pyqtSlot() def on_delete_button_clicked(self): item = self.user_role_lwidget.currentItem() if item is None: return user_name = item.text() if user_name == 'role_manager': PluginUtils.show_message(self, self.tr("Delete User"), self.tr("The user 'role_manager' is a required role and cannot be deleted.")) return # The user logged on must not delete himself: if self.__logged_on_user() == user_name: PluginUtils.show_message(self, self.tr("Delete User"), self.tr("The user currently logged on cannot be deleted.")) return message = "Delete user role {0}".format(user_name) if QMessageBox.No == QMessageBox.question(self, self.tr("Delete User Role"), message, QMessageBox.Yes | QMessageBox.No, QMessageBox.No): return try: user_role = self.db_session.query(SetRole).filter(SetRole.user_name == user_name).one() self.db_session.delete(user_role) self.db_session.execute("SET ROLE role_management") self.db_session.execute(u"DROP ROLE {0}".format(user_name)) self.db_session.execute("RESET ROLE") self.db_session.commit() self.__populate_user_role_lwidget() PluginUtils.show_message(self, self.tr("User Role Management"), self.tr('User role deleted.')) except DatabaseError, e: self.db_session.rollback() PluginUtils.show_error(self, self.tr("Database Query Error"), self.tr("Could not execute: {0}").format(e.message)) def __groupsByUser(self, user_name): sql = "select rolname from pg_user join pg_auth_members on (pg_user.usesysid=pg_auth_members.member) " \ "join pg_roles on (pg_roles.oid=pg_auth_members.roleid) where pg_user.usename=:bindName" result = self.db_session.execute(sql, {'bindName': user_name}).fetchall() return result def __logged_on_user(self): result = self.db_session.execute("SELECT USER") current_user = result.fetchone() return current_user[0] @pyqtSlot() def on_help_button_clicked(self): os.system("hh.exe "+ str(os.path.dirname(os.path.realpath(__file__))[:-10]) +"help\output\help_lm2.chm::/html/user_role_management.htm") @pyqtSlot(QListWidgetItem) def on_user_role_lwidget_itemDoubleClicked(self, item): username = item.text() dlg = UserRoleManagementDetialDialog(username) dlg.exec_() @pyqtSlot() def on_settings_button_clicked(self): if not self.user_role_lwidget.currentItem(): return username = self.user_role_lwidget.currentItem().text() dlg = UserRoleManagementDetialDialog(username) dlg.exec_() def __load_default_ritht_grud(self): aa = self.db_session.query(ClGroupRole).all() positions = self.db_session.query(ClPositionType).all() for position in positions: # right_grud = self.db_session.query(SetPositionGroupRole) row = self.settings_position_twidget.rowCount() self.settings_position_twidget.insertRow(row) item = QTableWidgetItem(u'{0}'.format(position.description)) item.setData(Qt.UserRole, position.code) self.settings_position_twidget.setItem(row, 0, item) @pyqtSlot() def on_load_users_button_clicked(self): self.__load_user_roles() def __load_user_roles(self): self.user_twidget.setRowCount(0) user_start = "user" + "%" users = self.db_session.query(SetRole).filter(SetRole.user_name.like(user_start)).all() for user in users: row = self.user_twidget.rowCount() self.user_twidget.insertRow(row) full_name = '('+ user.user_name_real +') '+ user.surname[:1] + '.' + user.first_name item = QTableWidgetItem(u'{0}'.format(full_name)) item.setData(Qt.UserRole, user.user_name_real) self.user_twidget.setItem(row, 0, item) @pyqtSlot() def on_load_position_button_clicked(self): self.__load_all_positions() def __load_all_positions(self): self.position_twidget.setRowCount(0) selected_items = self.user_twidget.selectedItems() if len(selected_items) == 0: PluginUtils.show_message(self, self.tr("Selection"), self.tr("Please select user.")) return cur_row = self.user_twidget.currentRow() item = self.user_twidget.item(cur_row, 0) user_name_real = item.data(Qt.UserRole) positions = self.db_session.query(ClPositionType).all() for position in positions: row = self.position_twidget.rowCount() self.position_twidget.insertRow(row) user_positions_count = self.db_session.query(SetUserPosition).\ filter(SetUserPosition.user_name_real == user_name_real).\ filter(SetUserPosition.position == position.code).count() item = QTableWidgetItem(u'{0}'.format(position.description)) item.setData(Qt.UserRole, position.code) if user_positions_count == 0: item.setCheckState(Qt.Unchecked) else: item.setCheckState(Qt.Checked) self.position_twidget.setItem(row, 0, item) @pyqtSlot(QTableWidgetItem) def on_user_twidget_itemClicked(self, item): self.position_twidget.setRowCount(0) self.right_grud_twidget.setRowCount(0) cur_row = self.user_twidget.currentRow() item = self.user_twidget.item(cur_row, 0) user_name_real = item.data(Qt.UserRole) self.__load_user_positions(user_name_real) self.__load_user_right_types(user_name_real) def __load_user_right_types(self, user_name_real): right_types = self.db_session.query(ClGroupRole).all() for right_type in right_types: user_right_types_count = self.db_session.query(SetUserGroupRole). \ filter(SetUserGroupRole.user_name_real == user_name_real).\ filter(SetUserGroupRole.group_role == right_type.code).count() if user_right_types_count == 1: user_right_type = self.db_session.query(SetUserGroupRole). \ filter(SetUserGroupRole.user_name_real == user_name_real). \ filter(SetUserGroupRole.group_role == right_type.code).one() row = self.right_grud_twidget.rowCount() self.right_grud_twidget.insertRow(row) item = QTableWidgetItem(u'{0}'.format(right_type.description)) item.setData(Qt.UserRole, right_type.code) self.right_grud_twidget.setItem(row, 0, item) item = QTableWidgetItem() item.setData(Qt.UserRole, right_type.code) if user_right_types_count == 0: item.setCheckState(Qt.Unchecked) else: if not user_right_type.r_view: item.setCheckState(Qt.Unchecked) else: item.setCheckState(Qt.Checked) self.right_grud_twidget.setItem(row, 1, item) item = QTableWidgetItem() item.setData(Qt.UserRole, right_type.code) if user_right_types_count == 0: item.setCheckState(Qt.Unchecked) else: if not user_right_type.r_add: item.setCheckState(Qt.Unchecked) else: item.setCheckState(Qt.Checked) self.right_grud_twidget.setItem(row, 2, item) item = QTableWidgetItem() item.setData(Qt.UserRole, right_type.code) if user_right_types_count == 0: item.setCheckState(Qt.Unchecked) else: if not user_right_type.r_remove: item.setCheckState(Qt.Unchecked) else: item.setCheckState(Qt.Checked) self.right_grud_twidget.setItem(row, 3, item) item = QTableWidgetItem() item.setData(Qt.UserRole, right_type.code) if user_right_types_count == 0: item.setCheckState(Qt.Unchecked) else: if not user_right_type.r_update: item.setCheckState(Qt.Unchecked) else: item.setCheckState(Qt.Checked) self.right_grud_twidget.setItem(row, 4, item) def __load_user_positions(self, user_name_real): user_positions = self.db_session.query(SetUserPosition). \ filter(SetUserPosition.user_name_real == user_name_real).all() set_role = self.db_session.query(SetRole).filter(SetRole.user_name_real == user_name_real).one() position = self.db_session.query(ClPositionType). \ filter(ClPositionType.code == set_role.position).one() user_positions_count = self.db_session.query(SetUserPosition). \ filter(SetUserPosition.user_name_real == user_name_real). \ filter(SetUserPosition.position == position.code).count() if user_positions_count == 0: row = self.position_twidget.rowCount() self.position_twidget.insertRow(row) item = QTableWidgetItem(u'{0}'.format(position.description)) item.setData(Qt.UserRole, position.code) item.setCheckState(Qt.Checked) self.position_twidget.setItem(row, 0, item) for user_position in user_positions: position = self.db_session.query(ClPositionType). \ filter(ClPositionType.code == user_position.position).one() row = self.position_twidget.rowCount() self.position_twidget.insertRow(row) user_positions_count = self.db_session.query(SetUserPosition). \ filter(SetUserPosition.user_name_real == user_name_real). \ filter(SetUserPosition.position == position.code).count() item = QTableWidgetItem(u'{0}'.format(position.description)) item.setData(Qt.UserRole, position.code) if user_positions_count == 0: item.setCheckState(Qt.Unchecked) else: item.setCheckState(Qt.Checked) self.position_twidget.setItem(row, 0, item) @pyqtSlot() def on_load_default_settings_button_clicked(self): self.right_grud_twidget.setRowCount(0) cur_row = self.user_twidget.currentRow() item = self.user_twidget.item(cur_row, 0) user_name_real = item.data(Qt.UserRole) user = self.db_session.query(SetRole).filter_by(user_name_real = user_name_real).one() position_code = user.position position_gruds = self.db_session.query(SetPositionGroupRole). \ filter(SetPositionGroupRole.position == position_code).all() for position_grud in position_gruds: group_role = self.db_session.query(ClGroupRole).filter(ClGroupRole.code == position_grud.group_role).one() row = self.right_grud_twidget.rowCount() self.right_grud_twidget.insertRow(row) item = QTableWidgetItem(u'{0}'.format(group_role.description)) item.setData(Qt.UserRole, group_role.code) self.right_grud_twidget.setItem(row, 0, item) item = QTableWidgetItem() item.setData(Qt.UserRole, group_role.code) if not position_grud.r_view: item.setCheckState(Qt.Unchecked) else: item.setCheckState(Qt.Checked) self.right_grud_twidget.setItem(row, 1, item) item = QTableWidgetItem() item.setData(Qt.UserRole, group_role.code) if not position_grud.r_add: item.setCheckState(Qt.Unchecked) else: item.setCheckState(Qt.Checked) self.right_grud_twidget.setItem(row, 2, item) item = QTableWidgetItem() item.setData(Qt.UserRole, group_role.code) if not position_grud.r_remove: item.setCheckState(Qt.Unchecked) else: item.setCheckState(Qt.Checked) self.right_grud_twidget.setItem(row, 3, item) item = QTableWidgetItem() item.setData(Qt.UserRole, group_role.code) if not position_grud.r_update: item.setCheckState(Qt.Unchecked) else: item.setCheckState(Qt.Checked) self.right_grud_twidget.setItem(row, 4, item) @pyqtSlot(QTableWidgetItem) def on_settings_position_twidget_itemClicked(self, item): self.settings_right_grud_twidget.setRowCount(0) cur_row = self.settings_position_twidget.currentRow() item = self.settings_position_twidget.item(cur_row, 0) position_code = item.data(Qt.UserRole) position_gruds = self.db_session.query(SetPositionGroupRole).\ filter(SetPositionGroupRole.position == position_code).all() group_roles = self.db_session.query(ClGroupRole).all() for group_role in group_roles: position_grud_c = self.db_session.query(SetPositionGroupRole). \ filter(SetPositionGroupRole.position == position_code). \ filter(SetPositionGroupRole.group_role == group_role.code).count() if position_grud_c == 1: position_grud = self.db_session.query(SetPositionGroupRole). \ filter(SetPositionGroupRole.position == position_code).\ filter(SetPositionGroupRole.group_role == group_role.code).one() row = self.settings_right_grud_twidget.rowCount() self.settings_right_grud_twidget.insertRow(row) item = QTableWidgetItem(u'{0}'.format(group_role.description_en)) item.setData(Qt.UserRole, group_role.code) self.settings_right_grud_twidget.setItem(row, 0, item) item = QTableWidgetItem(u'{0}'.format(group_role.description)) item.setData(Qt.UserRole, group_role.code) self.settings_right_grud_twidget.setItem(row, 1, item) item = QTableWidgetItem() item.setData(Qt.UserRole, group_role.code) if not position_grud.r_view: item.setCheckState(Qt.Unchecked) else: item.setCheckState(Qt.Checked) self.settings_right_grud_twidget.setItem(row, 2, item) item = QTableWidgetItem() item.setData(Qt.UserRole, group_role.code) if not position_grud.r_add: item.setCheckState(Qt.Unchecked) else: item.setCheckState(Qt.Checked) self.settings_right_grud_twidget.setItem(row, 3, item) item = QTableWidgetItem() item.setData(Qt.UserRole, group_role.code) if not position_grud.r_remove: item.setCheckState(Qt.Unchecked) else: item.setCheckState(Qt.Checked) self.settings_right_grud_twidget.setItem(row, 4, item) item = QTableWidgetItem() item.setData(Qt.UserRole, group_role.code) if not position_grud.r_update: item.setCheckState(Qt.Unchecked) else: item.setCheckState(Qt.Checked) self.settings_right_grud_twidget.setItem(row, 5, item) else: row = self.settings_right_grud_twidget.rowCount() self.settings_right_grud_twidget.insertRow(row) item = QTableWidgetItem(u'{0}'.format(group_role.description_en)) item.setData(Qt.UserRole, group_role.code) self.settings_right_grud_twidget.setItem(row, 0, item) item = QTableWidgetItem(u'{0}'.format(group_role.description)) item.setData(Qt.UserRole, group_role.code) self.settings_right_grud_twidget.setItem(row, 1, item) item = QTableWidgetItem() item.setData(Qt.UserRole, group_role.code) item.setCheckState(Qt.Unchecked) self.settings_right_grud_twidget.setItem(row, 2, item) item = QTableWidgetItem() item.setData(Qt.UserRole, group_role.code) item.setCheckState(Qt.Unchecked) self.settings_right_grud_twidget.setItem(row, 3, item) item = QTableWidgetItem() item.setData(Qt.UserRole, group_role.code) item.setCheckState(Qt.Unchecked) self.settings_right_grud_twidget.setItem(row, 4, item) item = QTableWidgetItem() item.setData(Qt.UserRole, group_role.code) item.setCheckState(Qt.Unchecked) self.settings_right_grud_twidget.setItem(row, 5, item) def __start_fade_out_timer(self): self.timer = QTimer() self.timer.timeout.connect(self.__fade_status_message) self.time_counter = 500 self.timer.start(10) def __fade_status_message(self): opacity = int(self.time_counter * 0.5) self.status_label.setStyleSheet("QLabel {color: rgba(255,0,0," + str(opacity) + ");}") self.status_label.setText(self.tr('Changes applied successfully.')) if self.time_counter == 0: self.timer.stop() self.time_counter -= 1 def __save_settings(self): try: self.__save_right_settings() self.__save_user_positions() self.__save_user_right_type() return True except exc.SQLAlchemyError, e: PluginUtils.show_error(self, self.tr("SQL Error"), e.message) return False def __save_user_right_type(self): selected_items = self.user_twidget.selectedItems() if len(selected_items) == 0: return cur_row = self.user_twidget.currentRow() item = self.user_twidget.item(cur_row, 0) user_name_real = item.data(Qt.UserRole) for row in range(self.right_grud_twidget.rowCount()): check_item = self.right_grud_twidget.item(row, 0) group_role = check_item.data(Qt.UserRole) user_right_count = self.db_session.query(SetUserGroupRole).\ filter(SetUserGroupRole.group_role == group_role) .\ filter(SetUserGroupRole.user_name_real == user_name_real).count() check_view_item = self.right_grud_twidget.item(row, 1) check_add_item = self.right_grud_twidget.item(row, 2) check_delete_item = self.right_grud_twidget.item(row, 3) check_update_item = self.right_grud_twidget.item(row, 4) if user_right_count == 0: user_right = SetUserGroupRole() user_right.user_name_real = user_name_real user_right.group_role = group_role if check_view_item.checkState() == Qt.Checked: user_right.r_view = True else: user_right.r_view = False if check_add_item.checkState() == Qt.Checked: user_right.r_add = True else: user_right.r_add = False if check_delete_item.checkState() == Qt.Checked: user_right.r_remove = True else: user_right.r_remove = False if check_update_item.checkState() == Qt.Checked: user_right.r_update = True else: user_right.r_update = False self.db_session.add(user_right) else: if user_right_count == 1: user_right = self.db_session.query(SetUserGroupRole). \ filter(SetUserGroupRole.group_role == group_role). \ filter(SetUserGroupRole.user_name_real == user_name_real).one() if check_view_item.checkState() == Qt.Checked: user_right.r_view = True else: user_right.r_view = False if check_add_item.checkState() == Qt.Checked: user_right.r_add = True else: user_right.r_add = False if check_delete_item.checkState() == Qt.Checked: user_right.r_remove = True else: user_right.r_remove = False if check_update_item.checkState() == Qt.Checked: user_right.r_update = True else: user_right.r_update = False def __save_user_positions(self): selected_items = self.user_twidget.selectedItems() if len(selected_items) == 0: return cur_row = self.user_twidget.currentRow() item = self.user_twidget.item(cur_row, 0) user_name_real = item.data(Qt.UserRole) for row in range(self.position_twidget.rowCount()): check_item = self.position_twidget.item(row, 0) position_code = check_item.data(Qt.UserRole) user_positions_count = self.db_session.query(SetUserPosition).\ filter(SetUserPosition.position == position_code) .\ filter(SetUserPosition.user_name_real == user_name_real).count() if check_item.checkState() == Qt.Checked: if user_positions_count == 0: user_position = SetUserPosition() user_position.user_name_real = user_name_real user_position.position = position_code self.db_session.add(user_position) else: if user_positions_count == 1: self.db_session.query(SetUserPosition). \ filter(SetUserPosition.position == position_code). \ filter(SetUserPosition.user_name_real == user_name_real).delete() def __save_right_settings(self): selected_items = self.settings_position_twidget.selectedItems() if len(selected_items) == 0: return cur_row = self.settings_position_twidget.currentRow() item = self.settings_position_twidget.item(cur_row, 0) position_code = item.data(Qt.UserRole) for row in range(self.settings_right_grud_twidget.rowCount()): group_role = self.settings_right_grud_twidget.item(row, 0).data(Qt.UserRole) position_gruds_c = self.db_session.query(SetPositionGroupRole). \ filter(SetPositionGroupRole.position == position_code). \ filter(SetPositionGroupRole.group_role == group_role).count() if position_gruds_c == 1: position_gruds = self.db_session.query(SetPositionGroupRole).\ filter(SetPositionGroupRole.position == position_code). \ filter(SetPositionGroupRole.group_role == group_role).one() check_view_item = self.settings_right_grud_twidget.item(row, 2) check_add_item = self.settings_right_grud_twidget.item(row, 3) check_delete_item = self.settings_right_grud_twidget.item(row, 4) check_update_item = self.settings_right_grud_twidget.item(row, 5) if check_view_item.checkState() == Qt.Checked: position_gruds.r_view = True else: position_gruds.r_view = False if check_add_item.checkState() == Qt.Checked: position_gruds.r_add = True else: position_gruds.r_add = False if check_delete_item.checkState() == Qt.Checked: position_gruds.r_remove = True else: position_gruds.r_remove = False if check_update_item.checkState() == Qt.Checked: position_gruds.r_update = True else: position_gruds.r_update = False else: position_gruds = SetPositionGroupRole() position_gruds.group_role = group_role position_gruds.position = position_code check_view_item = self.settings_right_grud_twidget.item(row, 2) check_add_item = self.settings_right_grud_twidget.item(row, 3) check_delete_item = self.settings_right_grud_twidget.item(row, 4) check_update_item = self.settings_right_grud_twidget.item(row, 5) if check_view_item.checkState() == Qt.Checked: position_gruds.r_view = True else: position_gruds.r_view = False if check_add_item.checkState() == Qt.Checked: position_gruds.r_add = True else: position_gruds.r_add = False if check_delete_item.checkState() == Qt.Checked: position_gruds.r_remove = True else: position_gruds.r_remove = False if check_update_item.checkState() == Qt.Checked: position_gruds.r_update = True else: position_gruds.r_update = False self.db_session.add(position_gruds) @pyqtSlot() def on_apply_button_clicked(self): if not self.__save_settings(): return self.db_session.commit() self.__start_fade_out_timer()
[ "aagii_csms@yahoo.com" ]
aagii_csms@yahoo.com
c2a62a2b0eee72815aacb4a39fe389383b1c3109
d7980f818fa101ca7c1e86df3b5dd39c75a58daf
/swmif/urls.py
c301bff1c785b94101ce3555c727b91927290941
[]
no_license
HarshSonawane/SmartIF
dbe39e35b3d6ac1e45d1255e1080c9a1dd80f929
ba91f421e28dad7372c7db503e109cba8c193518
refs/heads/master
2022-12-19T01:24:35.082665
2020-09-20T06:11:18
2020-09-20T06:11:18
227,466,838
0
1
null
2020-10-01T06:00:51
2019-12-11T21:49:23
JavaScript
UTF-8
Python
false
false
1,257
py
"""swmif URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include from django.conf import settings from django.conf.urls.static import static from . import views urlpatterns = [ path('', views.index, name='landing'), path('user/',include('user.urls')), path('admin/', admin.site.urls), path('accounts/',include('allauth.urls')), path('rest-auth/', include('rest_auth.urls')), path('rest/', include('rest.urls')), ] urlpatterns = urlpatterns + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) urlpatterns = urlpatterns + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
[ "sonawaneharshwardhan@gmail.com" ]
sonawaneharshwardhan@gmail.com
f31e15dee4055a30fb9f5aa5ef69fe6ab9a62139
734719be1ef3ceb2de58c56949969a76170f65a6
/binary_tree_max_path_sum.py
6a260837d8e220f765e80290b5fc20c073aced69
[]
no_license
raunaqjain/leetcode_solutions
26e174eb98700951624ca83ab7661dbc35a23729
a373085712dba7971ff90bc982f3a36dffd14b5a
refs/heads/master
2023-01-09T02:42:28.871140
2020-11-01T01:43:20
2020-11-01T01:43:20
309,002,412
0
0
null
2020-11-01T01:43:21
2020-11-01T01:25:54
Python
UTF-8
Python
false
false
437
py
class Solution: def helper(self, root): if not root: return 0 left = max(0, self.helper(root.left)) right = max(0, self.helper(root.right)) self.ans = max(self.ans, left + right + root.val) return max(left, right) + root.val def maxPathSum(self, root: TreeNode) -> int: self.ans = float('-inf') self.helper(root) return self.ans
[ "jraunaq18@gmail.com" ]
jraunaq18@gmail.com
6afef4ba6551705e3a2732735b93faeda61ffeb7
63d302d31105ed9ce059d12d8d13b48c633e58a3
/part03-e13_read_series/src/read_series.py
16f604eab227a190338497ad9349a6497d0d7493
[]
no_license
doyu/hy-data-analysis-with-python-summer-2021
55ccc3a089d3865bd1ae89b92c9e1784c44e1935
1a34983d2c3b9a20473d16209ba8a74f9d68daf2
refs/heads/main
2023-08-22T02:14:05.785821
2021-09-29T11:41:59
2021-09-29T11:41:59
null
0
0
null
null
null
null
UTF-8
Python
false
false
592
py
#!/usr/bin/env python3 import pandas as pd def read_series(): serie = pd.Series([], dtype='object') line = input() value_list = [] index_list = [] while line != "": try: line = line.split() value = line[1] index = line[0] index_list.append(index) value_list.append(value) except: print("Error") line = input() serie2 = pd.Series(value_list, index = index_list) return serie.append(serie2) def main(): print(read_series()) if __name__ == "__main__": main()
[ "hiroshi.doyu@gmail.com" ]
hiroshi.doyu@gmail.com
90bc1bde27330e94fc612e4da753803e61b9d6f6
eb5c9aa97ecaded5f68167fc0220539ba3e4b1b7
/Q28_Implement strStr()_ver2.py
d005f2e4cecbac082c5effc5dc5c19dfeb793a03
[]
no_license
Annie-Chu/practice-weekly
27e7b518a08f7a69f07a1c1592050cbad014bddc
07c7dbbfa1465eae650d3b58e99bbcc2ef211226
refs/heads/master
2022-10-20T00:09:35.275016
2020-06-11T06:58:08
2020-06-11T06:58:08
263,295,443
0
0
null
null
null
null
UTF-8
Python
false
false
273
py
def strStr(haystack: str, needle: str) -> int: if needle == '': return 0 elif needle not in haystack: print(-1) else: string = haystack.split(needle) print(len(string[0])) if __name__ == '__main__': strStr("hello", "ll")
[ "anniechu65@gmail.com" ]
anniechu65@gmail.com
5505cd4011c837c9e22cf9e9d81addb8442e050d
11cd362cdd78c2fc48042ed203614b201ac94aa6
/apps/oozie/src/oozie/migrations/0005_initial.py
2688a433ed8dcc89995fc5f9b23a9defb2088449
[ "CC-BY-3.0", "LicenseRef-scancode-other-copyleft", "LicenseRef-scancode-unknown-license-reference", "ZPL-2.0", "Unlicense", "LGPL-3.0-only", "CC0-1.0", "LicenseRef-scancode-other-permissive", "CNRI-Python", "LicenseRef-scancode-warranty-disclaimer", "GPL-2.0-or-later", "Python-2.0", "GPL-3.0-only", "CC-BY-4.0", "LicenseRef-scancode-jpython-1.1", "AFL-2.1", "JSON", "WTFPL", "MIT", "LicenseRef-scancode-generic-exception", "LicenseRef-scancode-jython", "GPL-3.0-or-later", "LicenseRef-scancode-python-cwi", "BSD-3-Clause", "LGPL-3.0-or-later", "Zlib", "LicenseRef-scancode-free-unknown", "Classpath-exception-2.0", "LicenseRef-scancode-proprietary-license", "GPL-1.0-or-later", "LGPL-2.0-or-later", "MPL-2.0", "ISC", "GPL-2.0-only", "ZPL-2.1", "BSL-1.0", "Apache-2.0", "LGPL-2.0-only", "LicenseRef-scancode-public-domain", "Xnet", "BSD-2-Clause" ]
permissive
cloudera/hue
b42343d0e03d2936b5a9a32f8ddb3e9c5c80c908
dccb9467675c67b9c3399fc76c5de6d31bfb8255
refs/heads/master
2023-08-31T06:49:25.724501
2023-08-28T20:45:00
2023-08-28T20:45:00
732,593
5,655
2,244
Apache-2.0
2023-09-14T03:05:41
2010-06-21T19:46:51
JavaScript
UTF-8
Python
false
false
1,402
py
# -*- coding: utf-8 -*- # Generated by Django 1.11 on 2018-06-06 18:55 from __future__ import unicode_literals from django.conf import settings import django.core.validators from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('oozie', '0004_initial'), ] operations = [ migrations.AddField( model_name='link', name='child', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='parent_node', to='oozie.Node', verbose_name=b''), ), migrations.AddField( model_name='link', name='parent', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='child_node', to='oozie.Node'), ), migrations.AddField( model_name='job', name='owner', field=models.ForeignKey(help_text='Person who can modify the job.', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Owner'), ), migrations.AddField( model_name='history', name='job', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='oozie.Job'), ), ]
[ "romain.rigaux@gmail.com" ]
romain.rigaux@gmail.com
9361bb61cd85e87954f06cd6e55599d0840b2082
efd9c0d47e94dbac8e6e700f45c2f7747ded094b
/Email_Template/Email/migrations/0003_auto_20181023_1457.py
2f8bca92437323d6af215b43d5f72c3c90b616eb
[]
no_license
SoumyaPuj/EmployeeDatabaseRequirement
dd1b2f21c13f47835e7390c3f831f5d96ef611b6
d9b3fe22e2327af67aaf6e8d47e50dea30aa01c5
refs/heads/master
2020-04-04T19:04:52.513386
2018-11-05T11:32:13
2018-11-05T11:32:13
156,191,071
0
0
null
null
null
null
UTF-8
Python
false
false
362
py
# Generated by Django 2.1.2 on 2018-10-23 09:27 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('Email', '0002_auto_20181023_1456'), ] operations = [ migrations.RenameModel( old_name='Email_Design', new_name='Email_Information', ), ]
[ "noreply@github.com" ]
SoumyaPuj.noreply@github.com
0acfd5a67da2070d576a8fb6eb33f195f4b5c0d5
1debf486be97ea914c49f69208ab56b4a4d3c013
/lab.py
894ef26c5241bbbd05400b303fb2abde0df3dc23
[]
no_license
XingyuHe/Monopoly
ee483387a723dc8ce2511d75ba86021db6a5acde
00e9d7e963af29501daac988b966bc03ab276353
refs/heads/master
2021-08-16T16:35:46.013197
2017-11-20T04:22:24
2017-11-20T04:22:24
null
0
0
null
null
null
null
UTF-8
Python
false
false
388
py
import json from cs110graphics import * monopoly_data = json.load(open("monopoly.json")) print (monopoly_data) options = monopoly_data['CHANCE']['options'] print(options) print(len(options)) print(options[str(2)]) # def program(win): # win.set_height(1600) # win.set_width(1600) # rect1 = Rectangle(win) # win.add(rect1) # def main(): # StartGraphicsSystem(program)
[ "32248504+PeterWiIIiam@users.noreply.github.com" ]
32248504+PeterWiIIiam@users.noreply.github.com
06d58311b1ea626d1a63e334daea32050b9d3781
0c06237432abf0ebbcde87bb1c05d290e1400da6
/app/graph/views.py
6a26b0596deaa5ed12a913553ee8dc693b17d488
[]
no_license
stevefusaro/timelight
a03e7bda9d3e4977998a460a2fc277d9f1d00648
67783e373dcd549b856b03db37b09fd8d297ad67
refs/heads/master
2021-01-11T02:58:56.550641
2017-07-07T20:12:37
2017-07-07T20:12:37
70,871,959
0
0
null
null
null
null
UTF-8
Python
false
false
1,370
py
from rest_framework import authentication from rest_framework.viewsets import ViewSet from rest_framework.decorators import list_route from rest_framework.response import Response from neo4j.v1 import GraphDatabase, basic_auth driver = GraphDatabase.driver("bolt://localhost:7687", auth=basic_auth("neo4j", "ezpass"), encrypted=False) def _run_query(query, params=None): params = params or {} resp = [] with driver.session() as session: with session.begin_transaction() as tx: for row in tx.run(query): resp.append(row) return resp class GraphApi(ViewSet): authentication_classes = (authentication.TokenAuthentication,) permission_classes = () @list_route(methods=['get']) def q(self, request): query = "MATCH (a:Person) return a" return Response(_run_query(query)) @list_route(methods=['get']) def label_nodes(self, request): label = request.GET.get('label') assert label, 'Label is required in GET' query = "MATCH (person:{label}) RETURN person LIMIT 200".format(label=label) rows = _run_query(query, params={'label': label}) nodes = [row[0].__dict__ for row in rows] # keys: labels, properties, id for node in nodes: node['labels'] = list(node['labels']) # convert from set return Response(nodes)
[ "sfusaro1@gmail.com" ]
sfusaro1@gmail.com
3fc4cd0005e2760cac083464668dae41e0b0d4f9
f8b585a7132343a5da159d32966111fedd39e3d8
/LogDefer.py
509ca98e9057c2cf37db6a77a433b52e419dcccd
[]
no_license
mikep/LogDefer
54e471ca5d607a51547691599af86d44f96aa3b4
e12de81eba399a433410196605f3b66643dc0833
refs/heads/master
2016-09-06T20:07:22.536529
2013-12-22T00:50:08
2013-12-22T00:50:08
15,367,887
1
2
null
null
null
null
UTF-8
Python
false
false
4,065
py
import json import os import re import time __version__ = "0.1.0" class LogDefer(object): """ Generate log object conpatible with log-defer-viz https://github.com/hoytech/Log-Defer-Viz """ def __init__(self, options={}): self.levels = (40, 30, 20, 10) self.message = { 'start': time.time(), 'logs': [], 'timers': {}, 'data': {}, } def add_message(self, level='30', message="", data=None, *args): """ Add message to log object """ log = [self._get_et(), level, message] if data: if args: for arg in args: data = dict(list(data.items()) + list(arg.items())) log.append(data) self.message['logs'].append(log) def timer(self, name=None): """ Add timer to log object, If timer already exists, set the end time. """ self.name = name if name and name not in self.message['timers']: self.message['timers'][name] = { 'start': self._get_et(), 'name': name, } else: self.message['timers'][name]['end'] = self._get_et() return self def __enter__(self): self.timer(self.name) def __exit__(self, a, b, c): self.timer(self.name) def data(self, d=None): """ Add data to log object """ if d: self.message['data'] = dict( list(self.message['data'].items()) + list(d.items()) ) def finalize_log(self): """ Format and return the log object for logging. """ self.__format_log_message_output__() return self.__log_message_json__() def __format_log_message_output__(self): # Clean up, log-defer-viz doesn't like empty objects. for key in ('logs', 'timers', 'data'): if self.message[key] == [] or self.message[key] == {}: del self.message[key] # Convert timer to list. if 'timers' in self.message: timers = [] for timer in self.message['timers']: timers.append([ self.message['timers'][timer]['name'], self.message['timers'][timer]['start'], self.message['timers'][timer].get('end', self._get_et()) ]) self.message['timers'] = timers # Record end time. self.message['end'] = self._get_et() def __log_message_json__(self): try: return json.dumps(self.message) except: def serialize_fix(m): try: for i, x in enumerate(m): try: if type(m) == dict: json.dumps(m[x]) else: json.dumps(x) except: if type(m) == dict: m[x] = serialize_fix(m[x]) elif type(m) == list: m[i] = serialize_fix(x) else: m[x] = str(x) return m except: return str(m) return json.dumps(serialize_fix(self.message)) # Log level functions def error(self, message='', data=None, *args): self.add_message(10, message, data, *args) def warn(self, message='', data=None, *args): self.add_message(20, message, data, *args) def info(self, message='', data=None, *args): self.add_message(30, message, data, *args) def debug(self, message='', data=None, *args): self.add_message(40, message, data, *args) # Util functions def _get_et(self): """ log-defer-viz uses time since the start time in logs and timers """ return time.time() - self.message['start']
[ "michael.pucyk@gmail.com" ]
michael.pucyk@gmail.com
5ba300fb8fe455146525b436819e316a5e780da1
163bbb4e0920dedd5941e3edfb2d8706ba75627d
/Code/CodeRecords/2811/61132/294777.py
4cbb3ce02f1703d0fb35813ef04ff2bc5e50a6e3
[]
no_license
AdamZhouSE/pythonHomework
a25c120b03a158d60aaa9fdc5fb203b1bb377a19
ffc5606817a666aa6241cfab27364326f5c066ff
refs/heads/master
2022-11-24T08:05:22.122011
2020-07-28T16:21:24
2020-07-28T16:21:24
259,576,640
2
1
null
null
null
null
UTF-8
Python
false
false
237
py
p,n=map(int,input().split()) l=[] for i in range(n): l.append(int(input())) dic={} for pos,i in enumerate(l): key=i%p if dic.get(key,'')=='': print(pos+1) break else: dic[key]=i else: print(-1)
[ "1069583789@qq.com" ]
1069583789@qq.com
e400e3f7cfee1b0808a278fe8e94120ceb12437e
692b907d07eee8ce3ee32a1fda74b6d92fd6c548
/tests/models/validators/v1_3_0/jsd_d9bdb9034df99dba.py
4f3ce36a7f1b34bd26fe19e07e1dc62094323ae1
[ "MIT" ]
permissive
AltusConsulting/dnacentercli
04c9c7d00b25753a26c643994388dd4e23bf4c54
26ea46fdbd40fc30649ea1d8803158655aa545aa
refs/heads/master
2022-12-16T04:50:30.076420
2020-07-17T22:12:39
2020-07-17T22:12:39
212,206,213
0
0
MIT
2022-12-08T06:39:49
2019-10-01T21:50:42
Python
UTF-8
Python
false
false
2,451
py
# -*- coding: utf-8 -*- """DNA Center Get Site Count data model. Copyright (c) 2019 Cisco and/or its affiliates. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from __future__ import ( absolute_import, division, print_function, unicode_literals, ) import fastjsonschema import json from dnacentersdk.exceptions import MalformedRequest from builtins import * class JSONSchemaValidatorD9BdB9034Df99Dba(object): """Get Site Count request schema definition.""" def __init__(self): super(JSONSchemaValidatorD9BdB9034Df99Dba, self).__init__() self._validator = fastjsonschema.compile(json.loads( '''{ "properties": { "response": { "description": "Response", "type": [ "string", "null" ] }, "version": { "description": "Version", "type": [ "string", "null" ] } }, "type": "object" }'''.replace("\n" + ' ' * 16, '') )) def validate(self, request): try: self._validator(request) except fastjsonschema.exceptions.JsonSchemaException as e: raise MalformedRequest( '{} is invalid. Reason: {}'.format(request, e.message) )
[ "wastorga@altus.co.cr" ]
wastorga@altus.co.cr
038769006e9dcbff4aa1248ab9f5b7c86a38959a
5cd04ee165edb98c80fdfab4ca2ceaf3352f3a60
/cflearn/models/ddr/loss.py
4ae8915ad1fab6995fceed631a5eb62fe2106b0f
[ "MIT" ]
permissive
adbmd/carefree-learn
f99e620ead71e15d7e91c0a74bb564e05afa8ba5
10970de9e9b96673f56104bf410bbd4927e86334
refs/heads/master
2022-12-21T07:48:28.780174
2020-08-01T02:37:23
2020-08-01T02:37:23
null
0
0
null
null
null
null
UTF-8
Python
false
false
16,539
py
import torch import logging import torch.nn as nn from typing import * from cftool.ml import Anneal from cftool.misc import LoggingMixin from ...misc.toolkit import tensor_dict_type from ...modules.auxiliary import MTL class DDRLoss(nn.Module, LoggingMixin): def __init__(self, config: Dict[str, Any], device: torch.device): super().__init__() self._joint_training = config["joint_training"] self._use_dynamic_dual_loss_weights = config["use_dynamic_weights"] self._use_anneal, self._anneal_step = config["use_anneal"], config["anneal_step"] self._median_pressure = config.setdefault("median_pressure", 3.) self._median_pressure_inv = 1. / self._median_pressure self.mtl = MTL(16, config["mtl_method"]) self._target_loss_warned = False self._zero = torch.zeros([1], dtype=torch.float32).to(device) if self._use_anneal: anneal_config = config.setdefault("anneal_config", {}) anneal_methods = anneal_config.setdefault("methods", {}) anneal_ratios = anneal_config.setdefault("ratios", {}) anneal_floors = anneal_config.setdefault("floors", {}) anneal_ceilings = anneal_config.setdefault("ceilings", {}) default_anneal_methods = { "median_anneal": "linear", "main_anneal": "linear", "monotonous_anneal": "sigmoid", "anchor_anneal": "linear", "dual_anneal": "sigmoid", "recover_anneal": "sigmoid", "pressure_anneal": "sigmoid" } default_anneal_ratios = { "median_anneal": 0.25, "main_anneal": 0.25, "monotonous_anneal": 0.2, "anchor_anneal": 0.2, "dual_anneal": 0.75, "recover_anneal": 0.75, "pressure_anneal": 0.5 } default_anneal_floors = { "median_anneal": 1., "main_anneal": 0., "monotonous_anneal": 0., "anchor_anneal": 0., "dual_anneal": 0., "recover_anneal": 0., "pressure_anneal": 0. } default_anneal_ceilings = { "median_anneal": 2.5, "main_anneal": 0.8, "monotonous_anneal": 2.5, "anchor_anneal": 2., "dual_anneal": 0.1, "recover_anneal": 0.1, "pressure_anneal": 1., } for anneal in default_anneal_methods: anneal_methods.setdefault(anneal, default_anneal_methods[anneal]) anneal_ratios.setdefault(anneal, default_anneal_ratios[anneal]) anneal_floors.setdefault(anneal, default_anneal_floors[anneal]) anneal_ceilings.setdefault(anneal, default_anneal_ceilings[anneal]) for anneal in default_anneal_methods: attr = f"_{anneal}" if anneal_methods[anneal] is None: setattr(self, attr, None) else: setattr(self, attr, Anneal( anneal_methods[anneal], round(self._anneal_step * anneal_ratios[anneal]), anneal_floors[anneal], anneal_ceilings[anneal] )) def forward(self, predictions: tensor_dict_type, target: torch.Tensor, *, check_monotonous_only: bool = False) -> Tuple[torch.Tensor, tensor_dict_type]: # anneal if not self._use_anneal or not self.training or check_monotonous_only: main_anneal = median_anneal = None monotonous_anneal = anchor_anneal = None dual_anneal = recover_anneal = pressure_anneal = None else: main_anneal = None if self._main_anneal is None else self._main_anneal.pop() median_anneal = None if self._median_anneal is None else self._median_anneal.pop() monotonous_anneal = None if self._monotonous_anneal is None else self._monotonous_anneal.pop() anchor_anneal = None if self._median_anneal is None else self._anchor_anneal.pop() dual_anneal = None if self._median_anneal is None else self._dual_anneal.pop() recover_anneal = None if self._median_anneal is None else self._recover_anneal.pop() pressure_anneal = None if self._pressure_anneal is None else self._pressure_anneal.pop() self._last_main_anneal, self._last_pressure_anneal = main_anneal, pressure_anneal if self._use_anneal and check_monotonous_only: main_anneal, pressure_anneal = self._last_main_anneal, self._last_pressure_anneal # median median = predictions["predictions"] median_loss = nn.functional.l1_loss(median, target) if median_anneal is not None: median_loss = median_loss * median_anneal # get anchor_batch, cdf_raw = map(predictions.get, ["anchor_batch", "cdf_raw"]) sampled_anchors, sampled_cdf_raw = map(predictions.get, ["sampled_anchors", "sampled_cdf_raw"]) quantile_batch, median_residual, quantile_residual, quantile_sign = map( predictions.get, ["quantile_batch", "median_residual", "quantile_residual", "quantile_sign"]) sampled_quantiles, sampled_quantile_residual = map( predictions.get, ["sampled_quantiles", "sampled_quantile_residual"]) cdf_gradient, quantile_residual_gradient = map( predictions.get, ["cdf_gradient", "quantile_residual_gradient"]) dual_quantile, quantile_cdf_raw = map(predictions.get, ["dual_quantile", "quantile_cdf_raw"]) dual_cdf, cdf_quantile_residual = map(predictions.get, ["dual_cdf", "cdf_quantile_residual"]) # cdf fetch_cdf = cdf_raw is not None cdf_anchor_loss = cdf_monotonous_loss = None if not fetch_cdf or check_monotonous_only: cdf_loss = cdf_losses = None else: cdf_losses = self._get_cdf_loss(target, cdf_raw, anchor_batch, False) if main_anneal is not None: cdf_losses = cdf_losses * main_anneal cdf_loss = cdf_losses.mean() if sampled_cdf_raw is not None: cdf_anchor_loss = self._get_cdf_loss(target, sampled_cdf_raw, sampled_anchors, True) if anchor_anneal is not None: cdf_anchor_loss = cdf_anchor_loss * anchor_anneal # cdf monotonous if cdf_gradient is not None: cdf_monotonous_loss = nn.functional.relu(-cdf_gradient).mean() if anchor_anneal is not None: cdf_monotonous_loss = cdf_monotonous_loss * monotonous_anneal # quantile fetch_quantile = quantile_residual is not None quantile_anchor_loss = quantile_monotonous_loss = None if not fetch_quantile or check_monotonous_only: median_residual_loss = quantile_loss = quantile_losses = None else: target_median_residual = target - predictions["median_detach"] median_residual_loss = self._get_median_residual_loss( target_median_residual, median_residual, quantile_sign) if anchor_anneal is not None: median_residual_loss = median_residual_loss * anchor_anneal quantile_losses = self._get_quantile_residual_loss( target_median_residual, quantile_residual, quantile_batch, False) quantile_loss = quantile_losses.mean() + median_residual_loss if main_anneal is not None: quantile_loss = quantile_loss * main_anneal if sampled_quantile_residual is not None: quantile_anchor_loss = self._get_quantile_residual_loss( target_median_residual, sampled_quantile_residual, sampled_quantiles, True ) if anchor_anneal is not None: quantile_anchor_loss = quantile_anchor_loss * anchor_anneal # median pressure if not fetch_quantile: median_pressure_loss = None else: median_pressure_loss = self._get_median_pressure_loss(predictions) if pressure_anneal is not None: median_pressure_loss = median_pressure_loss * pressure_anneal # quantile monotonous quantile_monotonous_losses = [] if quantile_residual_gradient is not None: quantile_monotonous_losses.append(nn.functional.relu(-quantile_residual_gradient).mean()) if median_residual is not None and quantile_sign is not None: quantile_monotonous_losses.append( self._get_median_residual_monotonous_loss(median_residual, quantile_sign)) if quantile_monotonous_losses: quantile_monotonous_loss = sum(quantile_monotonous_losses) if anchor_anneal is not None: quantile_monotonous_loss = quantile_monotonous_loss * monotonous_anneal # dual if not self._joint_training or not fetch_cdf or not fetch_quantile or check_monotonous_only: dual_cdf_loss = dual_quantile_loss = None cdf_recover_loss = quantile_recover_loss = None else: # dual cdf (cdf -> quantile [recover loss] -> cdf [dual loss]) quantile_recover_loss, quantile_recover_losses, quantile_recover_loss_weights = \ self._get_dual_recover_loss(dual_quantile, anchor_batch, cdf_losses) if quantile_cdf_raw is None: dual_quantile_loss = None else: dual_quantile_losses = self._get_cdf_loss(target, quantile_cdf_raw, anchor_batch, False) if quantile_recover_losses is None or not self._use_dynamic_dual_loss_weights: dual_quantile_loss_weights = 1. else: quantile_recover_losses_detach = quantile_recover_losses.detach() dual_quantile_loss_weights = 0.5 * ( quantile_recover_loss_weights + 1 / (1 + 2 * torch.tanh(quantile_recover_losses_detach))) dual_quantile_loss = (dual_quantile_losses * dual_quantile_loss_weights).mean() # dual quantile (quantile -> cdf [recover loss] -> quantile [dual loss]) cdf_recover_loss, cdf_recover_losses, cdf_recover_loss_weights = \ self._get_dual_recover_loss(dual_cdf, quantile_batch, quantile_losses) if cdf_quantile_residual is None: dual_cdf_loss = None else: dual_cdf_losses = self._get_quantile_residual_loss( target, cdf_quantile_residual, quantile_batch, False) if cdf_recover_losses is None or not self._use_dynamic_dual_loss_weights: dual_cdf_loss_weights = 1. else: cdf_recover_losses_detach = cdf_recover_losses.detach() dual_cdf_loss_weights = 0.5 * ( cdf_recover_loss_weights + 1 / (1 + 10 * cdf_recover_losses_detach)) dual_cdf_loss = (dual_cdf_losses * dual_cdf_loss_weights).mean() + median_residual_loss if dual_anneal is not None: if dual_cdf_loss is not None: dual_cdf_loss = dual_cdf_loss * dual_anneal if dual_quantile_loss is not None: dual_quantile_loss = dual_quantile_loss * dual_anneal if recover_anneal is not None: if cdf_recover_loss is not None: cdf_recover_loss = cdf_recover_loss * recover_anneal if quantile_recover_loss is not None: quantile_recover_loss = quantile_recover_loss * recover_anneal # combine if check_monotonous_only: losses = {} else: losses = {"median": median_loss} if not self._joint_training: if cdf_anchor_loss is not None: losses["cdf_anchor"] = cdf_anchor_loss if quantile_anchor_loss is not None: losses["quantile_anchor"] = quantile_anchor_loss else: if fetch_cdf: losses["cdf"] = cdf_loss if cdf_anchor_loss is not None: losses["cdf_anchor"] = cdf_anchor_loss if fetch_quantile: losses["quantile"] = quantile_loss if quantile_anchor_loss is not None: losses["quantile_anchor"] = quantile_anchor_loss if fetch_cdf and fetch_quantile: losses["quantile_recover"], losses["cdf_recover"] = quantile_recover_loss, cdf_recover_loss losses["dual_quantile"], losses["dual_cdf"] = dual_quantile_loss, dual_cdf_loss if median_residual_loss is not None: losses["median_residual_loss"] = median_residual_loss if median_pressure_loss is not None: key = "synthetic_median_pressure_loss" if check_monotonous_only else "median_pressure_loss" losses[key] = median_pressure_loss if cdf_monotonous_loss is not None: key = "synthetic_cdf_monotonous" if check_monotonous_only else "cdf_monotonous" losses[key] = cdf_monotonous_loss if quantile_monotonous_loss is not None: key = "synthetic_quantile_monotonous" if check_monotonous_only else "quantile_monotonous" losses[key] = quantile_monotonous_loss if not losses: return self._zero, {"loss": self._zero} if not self.mtl.registered: self.mtl.register(losses.keys()) return self.mtl(losses), losses def _get_dual_recover_loss(self, dual_prediction, another_input_batch, another_losses): if dual_prediction is None: recover_loss = recover_losses = recover_loss_weights = None else: recover_losses = torch.abs(another_input_batch - dual_prediction) if not self._use_dynamic_dual_loss_weights: recover_loss_weights = 1. else: another_losses_detach = another_losses.detach() recover_loss_weights = 1 / (1 + 2 * torch.tanh(another_losses_detach)) recover_loss = (recover_losses * recover_loss_weights).mean() return recover_loss, recover_losses, recover_loss_weights @staticmethod def _get_cdf_loss(target, cdf_raw, anchor_batch, reduce): indicative = (target <= anchor_batch).to(torch.float32) cdf_losses = -indicative * cdf_raw + nn.functional.softplus(cdf_raw) return cdf_losses if not reduce else cdf_losses.mean() @staticmethod def _get_median_residual_monotonous_loss(median_residual, quantile_sign): return nn.functional.relu(-median_residual * quantile_sign).mean() @staticmethod def _get_quantile_residual_loss(target_residual, quantile_residual, quantile_batch, reduce): quantile_error = target_residual - quantile_residual quantile_losses = torch.max(quantile_batch * quantile_error, (quantile_batch - 1) * quantile_error) return quantile_losses if not reduce else quantile_losses.mean() def _get_median_residual_loss(self, target_median_residual, median_residual, quantile_sign): same_sign_mask = quantile_sign * torch.sign(target_median_residual) > 0 tmr, mr = map(lambda tensor: tensor[same_sign_mask], [target_median_residual, median_residual]) median_residual_mae = self._median_pressure * torch.abs(tmr - mr).mean() residual_monotonous_loss = DDRLoss._get_median_residual_monotonous_loss(median_residual, quantile_sign) return median_residual_mae + residual_monotonous_loss def _get_median_pressure_loss(self, predictions): pressure_pos_dict, pressure_neg_dict = map( predictions.get, map(lambda attr: f"pressure_sub_quantile_{attr}_dict", ["pos", "neg"])) additive_pos, additive_neg = pressure_pos_dict["add"], pressure_neg_dict["add"] multiply_pos, multiply_neg = pressure_pos_dict["mul"], pressure_neg_dict["mul"] # additive net & multiply net are tend to be zero here # because median pressure batch receives 0.5 as input return sum( torch.max( -self._median_pressure * sub_quantile, self._median_pressure_inv * sub_quantile ).mean() for sub_quantile in [ additive_pos, -additive_neg, multiply_pos, multiply_neg ] ) __all__ = ["DDRLoss"]
[ "syameimaru_kurumi@pku.edu.cn" ]
syameimaru_kurumi@pku.edu.cn
b2f8bcc16221469eed37b0ab56a7404cf8dc0a34
3d709e8a007a8c46c8ece0b63407551a06a47bf1
/app/accounts/__init__.py
9bbf50c3aafc7efb11440d6c8e8559e6b65cab46
[]
no_license
lassilaiho/recipe-book
4f74422790ed9bb7d810d1fdff1ebf2c0610a6c9
379dd7b52ef15272dfdba24e668f4ddc7fc896b3
refs/heads/master
2023-06-21T22:37:29.983415
2021-07-24T13:24:24
2021-07-24T13:24:24
233,632,351
0
0
null
2021-03-20T02:44:35
2020-01-13T15:49:03
HTML
UTF-8
Python
false
false
172
py
from app.main import login_manager from app.accounts.models import Account @login_manager.user_loader def load_user(account_id): return Account.query.get(account_id)
[ "lassi.laiho0@gmail.com" ]
lassi.laiho0@gmail.com
609e132e00c44ec7d44b98d5322c72c6ba7dd196
db5952a4ecb74177d8acdcc1440af65a540ba07e
/abbrev.py
6df1f73e9d1fc70481055894247005d7c95acf5a
[]
no_license
dividedmind/moviesorter
a39aaf03f5ac0984d4a57cce2fcf87b320a59bdf
f2d57d184d63ccc35bd0d1b10dde7de803da5aae
refs/heads/master
2021-01-23T15:42:35.272179
2011-08-22T11:55:10
2011-08-22T11:55:10
null
0
0
null
null
null
null
UTF-8
Python
false
false
211
py
# -*- coding: utf-8 -*- from google.appengine.ext import webapp register = webapp.template.create_template_register() @register.filter def abbrev(name): return ''.join([word[0] for word in name.split()])
[ "divided.mind@gmail.com" ]
divided.mind@gmail.com
beaa4c42310beb20c73ad1cf96be7aa287176ab4
b1d90fa399c2f4cb1f5eba0846d60d72044fc4b9
/wASmaster/configure_was/configure_servers_and_cluster.py
c1bbfa285f3c144a3c45f9b43aa241d97a38b4e4
[]
no_license
igmatovina/webSphere-automatization
a497b0ec70b1bee833082c58410ed4409e1ae84b
c4ec7fdba9d57ce176b7186dfd6697c95ebd6214
refs/heads/main
2023-01-02T00:34:02.523424
2020-10-19T11:10:29
2020-10-19T11:10:29
305,355,822
1
0
null
null
null
null
UTF-8
Python
false
false
2,126
py
#!/usr/bin/python # -*- coding: utf-8 -*- # exit() from java.lang import System as jvm import sys sys.modules['AdminConfig'] = AdminConfig sys.modules['AdminControl'] = AdminControl sys.modules['AdminApp'] = AdminApp sys.modules['AdminTask'] = AdminTask sys.modules['Help'] = Help import myfunctions as fl import xml.etree.ElementTree as ET try: tree = ET.parse('config/new_cluster.xml') root = tree.getroot() scopes = root.findall('.//scope') except: e = sys.exc_info() sys.exit(e) def createserver(node, server_name): AdminTask.createApplicationServer(node, ['-name', server_name]) def converttocluster(server_scope, cluster_name): AdminConfig.convertToCluster(server_scope, cluster_name) def createclustermember(cluster, node, server_name): AdminConfig.createClusterMember(cluster, node, [['memberName', server_name]]) for scope in scopes: scope_attributes = scope.attrib cluster_scope_type = fl.getScopeType(scope_attributes) cluster_name = scope.attrib['Cluster'] print cluster_name members = scope.findall('.//member') for member in members: member_attributes = member.attrib node_name = member.attrib['Node'] server_name = member.attrib['Server'] scope_type = fl.getScopeType(member_attributes) if AdminClusterManagement.checkIfClusterExists(cluster_name) \ == 'false': createserver(node_name, server_name) server_id = fl.getScopeId(scope_type, member_attributes) converttocluster(server_id, cluster_name) elif AdminClusterManagement.checkIfClusterMemberExists(cluster_name, server_name) == 'false': clusterid = fl.getScopeId(cluster_scope_type, scope_attributes) node_id = fl.getNodeId(node_name) createclustermember(clusterid, node_id, server_name) else: print '' print 'Cluster ' + cluster_name + ' and cluster member ' \ + server_name + ' already exist' print '' AdminConfig.save()
[ "noreply@github.com" ]
igmatovina.noreply@github.com
c61b3a1fac58f75d6fca03e86ed4fb7ac6661149
63b6c1ff3827055d05a92aaf9c6cfb5c5617bccc
/compositionDuMahal/admin.py
bc70c0c1f2c6aeef1f0c5e2eee5b48c98b9a5bda
[]
no_license
Slam0810/django
990deb8f615036ead078304c3298f246a8e5002e
d81923534c1eb0e330de91433fed72a42b922ee6
refs/heads/master
2023-03-27T05:32:24.776629
2021-03-23T08:59:34
2021-03-23T08:59:34
345,477,458
0
0
null
null
null
null
UTF-8
Python
false
false
884
py
from django.contrib import admin from .models import Produit, Contact, Reservation, Presentation # Register your models here. #admin.site.register(Produit) admin.site.register(Contact) class ReservationAdmin(admin.ModelAdmin): list_display = ('created_at', 'contacted') readonly_fields = ('created_at', 'contacted') def has_add_permission(self, request): return False class PresentationAdmin(admin.ModelAdmin): list_display = ('nom', 'fonction', 'metier') class ProduitAdmin(admin.ModelAdmin): list_display =('nom', 'image', 'description','etat', 'caracteristic', 'date') list_filter = ('nom',) date_hierarchy = 'date' ordering = ('date',) search_fields = ('nom', 'etat', 'caracteristic') admin.site.register(Produit, ProduitAdmin) admin.site.register(Presentation, PresentationAdmin) admin.site.register(Reservation, ReservationAdmin)
[ "birama.tour@gmail.com" ]
birama.tour@gmail.com
66e3f4caa9591de9d0af524ee3a95b967482a74a
0a9e171dbce02947a88290ee16e0fb90e796428a
/Linear_Regression/multi_linreg.py
e9f2773099fcceb018b319c0c3abbda27822ce9d
[]
no_license
sanjmohan/MLExercises
e3416990215a0c101f44093ddaf42816ea8d742c
a71698c794a858496d250ea5a4f9dc6ff55cf171
refs/heads/master
2021-01-12T05:32:42.817911
2020-04-21T22:09:32
2020-04-21T22:09:32
77,121,207
0
0
null
null
null
null
UTF-8
Python
false
false
3,569
py
# Multiple Linear Regression on Housing Price given 13 attributes # First time using theano! import numpy as np import theano from theano import tensor as T from theano import function import matplotlib.pyplot as plt def load_data(): # Features separated by space, examples separated by line breaks # Load first 13 values as feature, load last value as target # Matrix of all examples (row = example, column = feature) x = [] # matrix of all target values y = [] # read file of data with open('Data/housing_data.txt') as f: for line in f: if line != "": ex = [float(i) for i in line.split(' ') if i != ''] # first "attribute" is 1 as placeholder for theta_0 x.append([1] + ex[:-1]) y.append(ex[-1]) return x, y def normalize(data): # Rescale features to lie on range [0, 1] # Transpose => each row is a feature xT = np.asarray(data).T # Skip first placeholder "feature" for i in range(1, len(xT)): feature = xT[i] min_val = min(feature) max_val = max(feature) feature = (feature - min_val) / (max_val - min_val) xT[i] = feature return (xT.T).tolist() x_in, y_in = load_data() x_in = normalize(x_in) num_test = 100 num_train = len(x_in) - num_test # leave last num_test examples for testing train_x = np.asarray(x_in[:-num_test]) train_y = np.asarray(y_in[:-num_test]).reshape(num_train, 1) test_x = np.asarray(x_in[-num_test:]) test_y = np.asarray(y_in[-num_test:]).reshape(num_test, 1) print("Data Size: %d" % len(x_in)) print("Number of Features: %d" % (len(x_in[0])-1)) # first "feature" is placeholder print("Training Size: %d" % num_train) print("Test Size: %d" % num_test) # shared var - column vector with length = number of independent attributes theta = theano.shared(np.zeros((train_x.shape[1], 1))) # symbolic inputs to cost function x = T.matrix('x') y = T.matrix('y') # Compute predictions (feedforward, hypothesis, etc.) pred = T.dot(x, theta) # least mean square cost function c = 0.5 * T.mean((pred - y) ** 2) # function([symbolic inputs], output, name=name) cost = theano.function([x, y], c, name="cost") # least mean square cost partial derivatives # grad w/respect to theta_j = 1/m * sum( (x_i - y_i) * x_i_j ) # gc = 1/num_train * T.dot(x.T, (pred - y)) # gradient descent update function # learning rate lr = 0.01 print("Learning Rate: %f" % lr) # update format: (shared var to update, expression representing update) # featuring symbolic differentiation! updates = [(theta, theta - lr * T.grad(c, theta))] grad_desc = theano.function([x, y], theta, updates=updates, name="grad_desc") # iterate through gradient descent fixed number of times # list of costs at each iteration accuracy = [] iters = 3000 for i in range(iters): grad_desc(train_x, train_y) accuracy.append(cost(train_x, train_y)) if i % (iters // 20) == 0 or i == iters - 1: print("Iteration %d" % (i+1)) print("Minimum Training Cost: %f" % min(accuracy)) print("Test Cost: %f" % cost(test_x, test_y)) # show (hopefully) decreasing cost plt.plot(range(iters), accuracy) plt.show() # 300 iters, lr = 0.000006: min cost = 36 # (higher lr explodes) # 300 iters w/normalization, lr = 0.000006: min cost = 261 # 300 iters w/normalization, lr = 0.01: min cost = 35, test cost = 16 # (higher lr explodes) # 3000 iters, lr = 0.000006: min cost = 27 # 3000 iters w/normalization: min cost = 178 # 3000 iters w/normalization, lr = 0.01: min cost = 15, test cost = 10
[ "sam2mohan@gmail.com" ]
sam2mohan@gmail.com
3fce7e6e7ffee44c337a0c83125d2ce5f09a5280
49e5c03d59b9b7671f6be1780444cccd6ef14e7e
/maple/utils.py
a2c13c7246640f7c79516edfb77a8839724b6d00
[]
no_license
isled/backups
e2dd176adaed63d3c41e9a4e9377a012d192976f
94568f9582dfea2aff9898ef834c8fb583deec87
refs/heads/master
2021-07-10T20:15:56.087030
2017-10-12T11:02:58
2017-10-12T11:02:58
null
0
0
null
null
null
null
UTF-8
Python
false
false
987
py
import typing class MapleError(Exception): """ maple 基础错误类。 所有错误类均基于此类。 """ pass def ensure_bytes(value: typing.Any) -> bytes: """ 确保传入变量是字节对象 :param value: 任意类型 :return: bytes """ if isinstance(value, bytes): return value if isinstance(value, bytearray): return bytes(value) if value is None: return b"" if not isinstance(value, str): str_value = str(value) else: str_value = value return str_value.encode('utf-8') def ensure_str(value: typing.Any) -> str: """ 确保传入变量是字符串对象 :param value: 任何类型 :return: str """ if isinstance(value, str): return value if value is None: return '' if isinstance(value, (bytes, bytearray)): str_value = value.decode('utf-8') else: str_value = value return str(str_value)
[ "ymkwfn1688@qq.com" ]
ymkwfn1688@qq.com
bf4537cf065e4166235f72754c003c22dff1f73a
7740035c7af9b8dcf1a82f0c1b46a7c6dd18f8f4
/blog_project/blogs/migrations/0001_initial.py
39215dc6daab96b05125b0c07573cf9e2c715387
[]
no_license
Arijit1303/Django-Projects
2df930d84ace7637eeaa4d5e3bba161fbce9a4f4
2125894e126b5f7089abf7d6d020fb1a82940463
refs/heads/master
2021-05-01T17:02:26.912653
2018-02-10T07:31:01
2018-02-10T07:31:01
120,993,324
0
0
null
null
null
null
UTF-8
Python
false
false
785
py
# Generated by Django 2.0.1 on 2018-01-15 10:42 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Post', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title', models.CharField(max_length=200)), ('text', models.TextField()), ('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)), ], ), ]
[ "noreply@github.com" ]
Arijit1303.noreply@github.com
a5b8f6d17d44bb21c75ef0b3d113fa8cb8f0dfdc
90be82216cd48b95e8ebfd0054a048ea91707872
/organisations/migrations/0002_auto_20200828_1740.py
c9a44789b14b923443120b8e9961778cd8f797de
[ "MIT" ]
permissive
rubberducklive/tcm_api
9c774e50e1e04f4c4c5735871ef38d35b013a5e0
53d2b533e3f9251cce49bd4c1b8e9e65a03eaf04
refs/heads/main
2022-12-06T17:30:29.807552
2020-08-28T17:49:19
2020-08-28T17:49:19
290,266,719
0
0
null
null
null
null
UTF-8
Python
false
false
440
py
# Generated by Django 3.0.9 on 2020-08-28 17:40 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('organisations', '0001_initial'), ] operations = [ migrations.AlterField( model_name='organisation', name='name', field=models.CharField(max_length=255, unique=True, verbose_name='Name of the organisation'), ), ]
[ "ulhas.sm@gmail.com" ]
ulhas.sm@gmail.com
5f6a1e686ceb5a0fd3ce59392ce011e48b736289
e427906785f3076ea7cf5f0bc87ba2edffb926b6
/Bakhteev2019/projects/Bakhteev2017Hypergrad/code/pyfos/hyperoptimizers/hoag_optimize.py
b606cc4e8b1163da2cca8fdc76fe02847bda54d1
[]
no_license
rigof80/PhDThesis
d9e03b84b8118f8c9fd677622126bef88ea2eda8
0dfc331608059427ab2bc6fe61ac127b5dbd0fe3
refs/heads/master
2022-01-09T04:32:09.310722
2019-05-01T17:59:45
2019-05-01T17:59:45
null
0
0
null
null
null
null
UTF-8
Python
false
false
8,817
py
import sys sys.path.append('.') import theano import theano.tensor as T import numpy as np import random from structures import HyperparameterOptimization import random import time from scipy.optimize import minimize import gc def hoag_optimize(trainig_criterion, model_constructor, param_optimizer, trial_num , batch_size, batch_size2, train_iteration_num, X_data, Y_data, hyperparams, internal_optimize_learning_rate=10**(-5), internal_optimize_eps = 0.98, limits=None, max_abs_err = 10**10, lr=0.0, verbose=0): history = [] dataset_size = np.array(X_data).shape[0] X_datas = theano.shared(X_data) Y_datas = theano.shared(Y_data) if len(hyperparams)>1: raise NotImplementedError('Sorry, not implemented: num of hyperparams > 1') training_procedure = trainig_criterion( model_constructor, param_optimizer,X_data, Y_data ) k = len(training_procedure.models) """ 1. solve 2. make hessian optimization 3. make derivatives 4. correct """ Xs = [T.matrix() for _ in xrange(k)] Ys = [T.vector(dtype=Y_data.dtype) for _ in xrange(k)] indices = [T.ivector() for _ in xrange(k)] Xs2 = [T.matrix() for _ in xrange(k)] Ys2 = [T.vector(dtype=Y_data.dtype) for _ in xrange(k)] indices2 = [T.ivector() for _ in xrange(k)] costs = [] givens = [] costs_valid = [] all_params = [] for X,Y, index, model, X2, Y2, index2 in zip(Xs, Ys, indices, training_procedure.models,Xs2, Ys2, indices2): train_cost = model.cost(X, Y) all_params.append(model.params) validation_cost = model.validation(X2, Y2) givens.append((X,X_datas[index])) givens.append((Y,Y_datas[index])) givens.append((X2,X_datas[index2])) givens.append((Y2,Y_datas[index2])) costs.append(-train_cost) #using negative for article correspondence costs_valid.append(-validation_cost) #using negative for article correspondence valid_cost = T.mean(costs_valid) cost = T.mean(costs) q = [theano.shared(np.zeros(len(all_params[0].eval())).astype(theano.config.floatX)) for _ in xrange(k)] h_2 = T.grad(cost,all_params) Hq = T.Rop(h_2, all_params, q) #submodels are independent g_1 = T.grad(valid_cost, all_params) #test: 2x2 g_2 = T.grad(valid_cost, hyperparams, disconnected_inputs='ignore') g_2 = theano.function(indices2, g_2, givens=givens, on_unused_input='ignore') h_1 = T.grad(cost, all_params) #test: 2x2 #h_1_2s = [theano.gradient.jacobian(h_1_, hyperparams, disconnected_inputs='ignore' ) for h_1_, q_ in zip(h_1, q)] #h_1_2s_conc = T.concatenate([h_[0] for h_ in h_1_2s], axis=0) #test expecting: 4x2 #h_1_2_q = theano.function(indices, T.dot(h_1_2s_conc.T, T.concatenate(q)), givens=givens) h_1_2s = T.Lop(T.concatenate(h_1), hyperparams, T.concatenate(q)) h_1_2_q = theano.function(indices, h_1_2s[0], givens=givens) internal_cost = T.mean((T.concatenate(Hq) - T.concatenate(g_1))**2) internal_grad = T.grad(internal_cost, q) updates = [(q_, q_-internal_optimize_learning_rate*internal_grad_) for q_, internal_grad_ in zip(q, internal_grad)] internal_update = theano.function(indices+indices2, internal_cost, givens=givens, updates= updates+model.train_updates) internal_monitor = theano.function(indices+indices2, internal_cost, givens=givens) #internal_update = theano.function(indices+indices2, Hq, givens=givens, on_unused_input='ignore') for trial in xrange(trial_num): gc.collect() for m in training_procedure.models: m.respawn() if verbose>=0 : print 'trial ', trial for i in xrange(train_iteration_num): res = training_procedure.do_train() if verbose>=0 and (verbose==0 or i%verbose==0): print 'iteration {0}, internal loss={1}'.format(str(i), str(res)) valid_score = training_procedure.do_validation() history.append(([h.eval() for h in hyperparams], valid_score)) if verbose>=0: print 'validation score: ', valid_score if verbose>=0: print 'internal optimization' err = None rel_err = -1# -1 attemp_num = 10 while rel_err<internal_optimize_eps: sample1 = [random.sample(ti,batch_size2) for ti in training_procedure.train_indices] sample2 = [random.sample(vi,batch_size2) for vi in training_procedure.validation_indices] err_new = internal_update(*(sample1+sample2)) gc.collect() if err is not None: rel_err = min(err_new,err)/max(err_new,err) #qs = [q_.eval() for q_ in q] #if attemp_num> 0 and (np.isnan(np.mean(qs)) or np.isinf(np.mean(qs)) or (err_new > max_abs_err)) : # attemp_num-=1 # print 'bad internal learning rate', err_new, err, np.mean(qs) # if internal_optimize_learning_rate/10 > 0: # internal_optimize_learning_rate = internal_optimize_learning_rate/10 # for q_ in q: # q_.set_value(np.zeros(len(all_params[0].eval()))) # err = None # print 'updating learning rate', internal_optimize_learning_rate err = err_new#abs(err_new - err)/(err + err_new) if verbose>=0: print rel_err, err sample_t = [random.sample(ti, batch_size2) for ti in training_procedure.train_indices] sample_v = [random.sample(vi, batch_size2) for vi in training_procedure.validation_indices] time_s = time.time() #print len(g_2(*sample1)) #print len(h_1_2_q(*sample2)) grads = g_2(*sample_v) - h_1_2_q(*sample_t) #print 'TIME', time_s - time.time() #g_2(*training_procedure.validation_indices)##g_2(*training_procedure.validation_indices)# - h_1_2_q(*training_procedure.train_indices) #print 'grads', grads #print h_1_2_q(*training_procedure.train_indices) good_update = False attemp_num = 10 ####TODO while not good_update and attemp_num>0: good_update = True if limits: old_values = [h.eval() for h in hyperparams] for h,l,g in zip(hyperparams, lr,grads): #print (g).dtype, (h.eval()).dtype, type(l) h.set_value(h.eval() - l * g) if limits: h_id = -1 for h, l in zip(hyperparams, limits): h_id+=1 he = h.eval() if np.max((he))>l[1] or np.min((he))<l[0] or np.isnan(np.max(he)) or np.isinf(np.max(he)): print 'bad hyperparam update' print he,' vs limit ',l if np.isnan(np.max(he)) or np.isinf(np.max(he)): h.set_value(o) else: h.set_value(np.minimum(l[1], np.maximum(l[0], he))) for h2,o in zip(hyperparams, old_values): print 'returning value', o h2.set_value(o) lr[h_id]= lr[h_id] / 10.0 print 'new lr', lr attemp_num -= 1 good_update = False if verbose>=0: print 'hypergrads', grads return HyperparameterOptimization(best_values=history[-1][0], history=history) if __name__=='__main__': from generic.optimizer import gd_optimizer from pyfos.models.var_feedforward import build_var_feedforward from tc.cv import cv_tc from functools import partial matrix = np.load('../../data/matrix.npy') X, Y = np.load('../../data/linearx.npy'), np.load('../../data/lineary.npy') X_train = X[:100] Y_train = Y[:100] X_test = X[100:] Y_test = Y[100:] lr = theano.shared(10**(-3)) log_alphas = theano.shared(np.array([.0, .0])) optimizer = partial(gd_optimizer, learning_rate=lr) model_build = partial(build_feedforward, structure = [2,1], nonlinearity=lambda x:x, log_alphas=log_alphas, bias=False) hoag_optimize(partial(cv_tc, k =3, batch_size=75), model_build, optimizer, 25, 75, 100, X_train, Y_train, [log_alphas], lr=[0.01],verbose=10)#10**(-7), verbose=1)
[ "bakhteev@phystech.edu" ]
bakhteev@phystech.edu
6612f576f17ed5f3dd743c78d2b75c72608b9c56
5722f0056c3066fcfe71eabb66d1830c714626c3
/Week 3/ex29.py
cdf29f241855439e36a5a90a9ceedc58a5a2c418
[]
no_license
Leorodr501/idh3034leo
e14928d7126a9a946c61d2083b3bb43de833afbe
38596ca48bf945c5a8891fb9aa258d6b40edd9ca
refs/heads/master
2020-04-02T05:28:16.132218
2018-12-09T01:25:23
2018-12-09T01:25:23
154,079,280
0
0
null
null
null
null
UTF-8
Python
false
false
489
py
people = 20 cats = 30 dogs = 15 if people < cats: print("Too many cats! The world is doomed!") if people > cats: print("Not many cats! The world is saved!") if people < dogs: print("The world is drooled on!") if people > dogs: print("The world is dry!") dogs += 5 if people >= dogs: print("People are greater than or equal to dogs.") if people <= dogs: print("People are less than or equal to dogs.") if people == dogs: print("People are dogs.")
[ "root@instance-2.us-east4-c.c.mad-libs-221518.internal" ]
root@instance-2.us-east4-c.c.mad-libs-221518.internal
dd3c8aa9ff1f411e97b2efef7666a067449b8770
cee089aebd43dabcdc073fc4c1f3ec38bccd2a91
/Decison-Tree/Naive Decision Tree/decisionTree.py
9882f7f79be1f2bffea793117203b790748e8306
[]
no_license
ramarvab/DataMining_FinalProject
73f82438c87becfbbf5b5a21734dc6dc2b475873
28880b696a154fa0e263d04ac2c07436c118af96
refs/heads/master
2020-09-16T04:56:09.843205
2016-09-07T19:19:58
2016-09-07T19:19:58
67,636,260
0
0
null
null
null
null
UTF-8
Python
false
false
4,098
py
import numpy import math class tree_node(object): def __init__(self): self.leaf = 0 self.category = -1 self.feature = 0 self.value = -1 self.right = None self.left = None def get_entropy(catlist, categories): l_cat = len(catlist) if l_cat == 0: return 0 catlist = map(int, catlist) major = 0.0 cat = [0] * categories for i in catlist: cat[i] += 1 for i in cat: k = float(i)/float(l_cat) if k != 0: major -= k*math.log(k) return major def get_gini(catlist, categories): l_cat = len(catlist) if l_cat == 0: return 0 catlist = map(int, catlist) major = 0.0 cat = [0] * categories for i in catlist: cat[i] += 1 for i in cat: major += i*i major /= float(l_cat*l_cat) return 1.0-major def get_values(data, attr, points_index): vals = [data[i][attr] for i in points_index] vals.sort() parts = int(math.ceil(math.log(len(vals),2))) #parts = len(vals)/2 #parts = len(vals)/2 #vals = list(set(vals[::parts])) vals = list(set(vals)) vals.sort() return vals def get_mandv(data, features, attr, points_index, method, categories): values = get_values(data, attr, points_index) mandv = [] for value in values: l_set = [data[pt][features] for pt in points_index if data[pt][attr] <= value] r_set = [data[pt][features] for pt in points_index if data[pt][attr] > value] left_len = len(l_set) right_len = len(r_set) major = 0 if left_len == 0 or right_len == 0: major = 1000 else: if method == "g": major = get_gini(r_set, categories)*len(r_set) + get_gini(l_set, categories)*len(l_set) if method == "i": major = get_entropy(r_set, categories)*len(r_set) + get_gini(l_set, categories)*len(l_set) major = float(major)/float(len(points_index)) mandv.append([major, value]) mandv.sort(key=lambda x:x[0]) return mandv[0][0], mandv[0][1] def best_attr(data, features, points_index, method, categories): p_set = [data[pt][features] for pt in points_index] if method == "g": p_measure = get_gini(p_set, categories) else: p_measure = get_entropy(p_set, categories) attr_data = [] for i in range(features): major, val = get_mandv(data, features, i, points_index, method, categories) if p_measure > major: attr_data.append([i, major, val]) attr_data.sort(key=lambda x: x[1], reverse=False) if len(attr_data) == 0: return -1, -1 if attr_data[0][1] == 1000: return -1, -1 return attr_data[0][0], attr_data[0][2] def decision_tree(data, features, points_index, method, categories): # Generated new node node = tree_node() # checking for termination condition distinct_categories = set() cat_list = [0]*categories for i in points_index: distinct_categories.add(data[i][features]) cat_list[int(data[i][features])] += 1 distinct_categories = list(distinct_categories) if len(distinct_categories) == 1: node.leaf = 1 node.category = distinct_categories[0] return node attr, value = best_attr(data, features, points_index, method, categories) if attr == -1: node.leaf = 1 cat_list = zip(cat_list, range(categories)) cat_list.sort(key=lambda x: x[0], reverse=True) node.category = cat_list[0][1] return node node.leaf = 0 node.feature = attr node.value = value right_pt, left_pt = [], [] for i in points_index: if data[i][attr] <= value: left_pt.append(i) else: right_pt.append(i) l_len = len(left_pt) r_len = len(right_pt) if l_len != 0 and r_len != 0: node.left = decision_tree(data, features, left_pt, method, categories) node.right = decision_tree(data, features, right_pt, method, categories) elif l_len == 0: node = decision_tree(data, features, right_pt, method, categories) elif r_len == 0: node = decision_tree(data, features, left_pt, method, categories) return node ''' def best_attr(data, features, points_index, method, categories): # random approach for testing purpose random_attr = randint(0, features-1) count = 0 sumi = 0 for i in points_index: count += 1 sumi += data[i][random_attr] if count != 0: val = sumi/count else: val = sumi return random_attr, val '''
[ "ramarvab@149-161-153-43.dhcp-bl.indiana.edu" ]
ramarvab@149-161-153-43.dhcp-bl.indiana.edu
2b8904d38acfeffb87691cb317edd7a9494fbc11
21f05b45dbb43667007f3063d1a33082e122bec6
/src/NIMSU_Modules/DataType_Results.py
1073a7061d10b06d877ccfdcc43e4b35d6fd655b
[]
no_license
DanAyres/NIMSU
6f328f4b98a5eb34277be347fa1a2bb331bd87f0
6fe378c73d25aa58951de75d50841864268d389b
refs/heads/master
2020-05-02T11:18:06.087070
2015-05-27T09:27:17
2015-05-27T09:27:17
34,388,261
0
0
null
null
null
null
UTF-8
Python
false
false
2,820
py
''' Created on 14 Apr 2015 @author: daiel ''' class Data(): def __init__(self,val,listt=[],hdf5='none'): self.val=val self.list=listt self.hdf5=hdf5 def __add__(self,other): pass def __sub__(self,other): pass def __mul__(self,other): pass def __div__(self,other): pass def __radd__(self,other): pass class singleData(Data): def __add__(self,other): try: return singleData( self.val + other.val) except AttributeError: return singleData( self.val + other) def __radd__(self,other): return singleData( self.val + other) def __sub__(self,other): try: return singleData( self.val - other.val) except AttributeError: return singleData( self.val - other) def __mul__(self,other): try: return singleData( self.val * other.val) except AttributeError: return singleData( self.val * other) def __rmul__(self,other): return singleData( self.val * other) def __div__(self,other): try: return singleData( self.val / other.val) except AttributeError: return singleData( self.val / other) def __pow__(self,other): return singleData(self.val**other) class listData(Data): def __add__(self,other): try: return listData( self.val + other.val, listt=(self.list + other.list) ) except AttributeError: return listData( self.val + other, listt=(self.list + other) ) def __radd__(self,other): return listData( self.val + other, listt=(self.list + other) ) def __sub__(self,other): try: return listData( self.val - other.val, listt=(self.list - other.list) ) except AttributeError: return listData( self.val - other, listt=(self.list - other) ) def __mul__(self,other): try: return listData( self.val * other.val, listt=(self.list * other.list) ) except AttributeError: return listData( self.val * other, listt=(self.list * other) ) def __rmul__(self,other): return listData( self.val * other, listt=(self.list * other) ) def __div__(self,other): try: return listData( self.val / other.val, listt=(self.list / other.list) ) except AttributeError: return listData( self.val / other, listt=(self.list / other) ) def __pow__(self,other): return singleData(self.val**other,listt=(self.list**other)) class hdf5Data(Data): def __add__(self): pass
[ "daiel@daiel-XPS-L421X" ]
daiel@daiel-XPS-L421X
0099c7138f3d1228d6bced756f44e3f4ed25ed66
975e63eb3d9fd2617699a8dd447ed281a5225f27
/simulation_utils.py
0f2b84cac2fc73955902888e3297994687a5d916
[ "MIT" ]
permissive
meirelon/baseball-season-simulation
06813be8021105e388b28412f7a3313a3568500e
835283029844023ee528b4a52b771f10a4b622b5
refs/heads/master
2022-07-22T03:13:22.404375
2022-07-19T05:15:47
2022-07-19T05:15:47
175,287,540
3
1
null
null
null
null
UTF-8
Python
false
false
668
py
SCHEDULE_COLUMNS = [ "date", "number_of_games", "day_of_week", "visiting_team", "away_league", "away_game_number", "home_team", "home_league", "home_game_number", "game_time", ] # DISTRIBUTIONS = ["beta", "normal", "lognormal", "gamma", "weibull"] DISTRIBUTIONS = ["normal", "lognormal"] MLB_DIVISONS = { "al_east": ["NYA", "BOS", "BAL", "TOR", "TBA"], "al_central": ["MIN", "CLE", "KCA", "CHA", "DET"], "al_west": ["ANA", "HOU", "OAK", "SEA", "TEX"], "nl_east": ["NYN", "PHI", "ATL", "MIA", "WAS"], "nl_central": ["SLN", "CHN", "PIT", "MIL", "CIN"], "nl_west": ["ARI", "LAD", "COL", "SFN", "SDN"], }
[ "nestelm@gmail.com" ]
nestelm@gmail.com
c3c372e355b0b1fee36defc54ab53ac4f7d61fc6
dda2f6f4f823ec3571f0a8474a51a3498166b1f9
/rolle/migrations/0002_auto_20190920_1650.py
d2c3ede126b44ad962435e1f8736da9c4f8bd889
[]
no_license
prauscher/thwin
3761eef2e779491d52c5093ca0ce9841d218e743
bab06bc5659d3778e81b92995e46b826da9cbd68
refs/heads/master
2020-07-30T08:31:45.522366
2019-09-22T21:07:08
2019-09-22T21:07:08
210,156,570
1
0
null
null
null
null
UTF-8
Python
false
false
725
py
# Generated by Django 2.2.5 on 2019-09-20 16:50 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('rolle', '0001_initial'), ] operations = [ migrations.AlterField( model_name='freigabe', name='berechtigung', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='freigaben', to='rolle.Berechtigung'), ), migrations.AlterField( model_name='freigabe', name='rolle', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='freigaben', to='rolle.Rolle'), ), ]
[ "prauscher@prauscher.de" ]
prauscher@prauscher.de
0628946d4e9a280e8355cd0413d75bd4a43845dc
84e5297e214dd94105df7bbe627a506773d70224
/Assignment2/dnn_tf.py
478f858ded57e45f0034d15cb734f6130922bf28
[]
no_license
toannguyen1904/VietAI-ML-Foundation-5
b02b1463d0b820088fa7400112d41d4291357172
5adcd49c88e4c886b15973254d56c07c15a8660d
refs/heads/master
2022-05-16T10:27:27.570181
2020-03-16T05:37:58
2020-03-16T05:37:58
null
0
0
null
null
null
null
UTF-8
Python
false
false
6,419
py
"""dnn_tf_sol.py Solution of deep neural network implementation using tensorflow Author: Kien Huynh """ import numpy as np import tensorflow as tf import matplotlib.pyplot as plt from util import * from dnn_np import test import pdb def bat_classification(): # Load data from file # Make sure that bat.dat is in data/ train_x, train_y, test_x, test_y = get_bat_data() train_x, _, test_x = normalize(train_x, train_x, test_x) test_y = test_y.flatten().astype(np.int32) train_y = train_y.flatten().astype(np.int32) num_class = (np.unique(train_y)).shape[0] # DNN parameters hidden_layers = [100, 100, 100] learning_rate = 0.01 batch_size = 200 steps = 2000 # Specify that all features have real-value data feature_columns = [tf.feature_column.numeric_column("x", shape=[train_x.shape[1]])] # Available activition functions # https://www.tensorflow.org/api_guides/python/nn#Activation_Functions # tf.nn.relu # tf.nn.elu # tf.nn.sigmoid # tf.nn.tanh activation = tf.nn.relu # [TODO 1.7] Create a neural network and train it using estimator # Some available gradient descent optimization algorithms # https://www.tensorflow.org/api_docs/python/tf/train#classes # tf.train.GradientDescentOptimizer # tf.train.AdadeltaOptimizer # tf.train.AdagradOptimizer # tf.train.AdagradDAOptimizer # tf.train.MomentumOptimizer # tf.train.AdamOptimizer # tf.train.FtrlOptimizer # tf.train.ProximalGradientDescentOptimizer # tf.train.ProximalAdagradOptimizer # tf.train.RMSPropOptimizer # Create optimizer # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) # optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01) optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.005) # build a deep neural network # https://www.tensorflow.org/api_docs/python/tf/estimator/DNNClassifier classifier = tf.estimator.DNNClassifier(feature_columns=feature_columns, hidden_units=hidden_layers, n_classes=num_class, activation_fn=activation, optimizer=optimizer) # Define the training inputs # https://www.tensorflow.org/api_docs/python/tf/estimator/inputs/numpy_input_fn train_input_fn = tf.estimator.inputs.numpy_input_fn(x = {"x": train_x}, y = train_y, batch_size=batch_size, shuffle=True, num_epochs=None) # Train model. classifier.train( input_fn=train_input_fn, steps=steps) # Define the test inputs test_input_fn = tf.estimator.inputs.numpy_input_fn( x={"x": test_x}, y=test_y, num_epochs=1, shuffle=False) # Evaluate accuracy. predict_input_fn = tf.estimator.inputs.numpy_input_fn( x={"x": test_x}, num_epochs=1, shuffle=False) y_hat = classifier.predict(input_fn=predict_input_fn) y_hat = list(y_hat) y_hat = np.asarray([int(x['classes'][0]) for x in y_hat]) test(y_hat, test_y) def mnist_classification(): # Load data from file # Make sure that fashion-mnist/*.gz is in data/ train_x, train_y, val_x, val_y, test_x, test_y = get_mnist_data(1) train_x, val_x, test_x = normalize(train_x, train_x, test_x) train_y = train_y.flatten().astype(np.int32) val_y = val_y.flatten().astype(np.int32) test_y = test_y.flatten().astype(np.int32) num_class = (np.unique(train_y)).shape[0] pdb.set_trace() # DNN parameters hidden_layers = [100, 100, 100] learning_rate = 0.01 batch_size = 200 steps = 500 # Specify that all features have real-value data feature_columns = [tf.feature_column.numeric_column("x", shape=[train_x.shape[1]])] # Choose activation function activation = tf.nn.sigmoid # Some available gradient descent optimization algorithms # TODO: [YC1.7] Create optimizer optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.005) # build a deep neural network classifier = tf.estimator.DNNClassifier(feature_columns=feature_columns, hidden_units=hidden_layers, n_classes=num_class, activation_fn=activation, optimizer=optimizer) # Define the training inputs # https://www.tensorflow.org/api_docs/python/tf/estimator/inputs/numpy_input_fn train_input_fn = tf.estimator.inputs.numpy_input_fn(x = {"x": train_x}, y = train_y, batch_size=batch_size, shuffle=True, num_epochs=None) # Train model. classifier.train( input_fn=train_input_fn, steps=steps) # Define the test inputs test_input_fn = tf.estimator.inputs.numpy_input_fn( x={"x": test_x}, y=test_y, num_epochs=1, shuffle=False) # Evaluate accuracy. predict_input_fn = tf.estimator.inputs.numpy_input_fn( x={"x": test_x}, num_epochs=1, shuffle=False) y_hat = classifier.predict(input_fn=predict_input_fn) y_hat = list(y_hat) y_hat = np.asarray([int(x['classes'][0]) for x in y_hat]) test(y_hat, test_y) if __name__ == '__main__': np.random.seed(2017) plt.ion() bat_classification() mnist_classification()
[ "47108512+ChrisZangNam@users.noreply.github.com" ]
47108512+ChrisZangNam@users.noreply.github.com
96da18240353d57e20908d2a0b7b3f23721bc1cd
89148623fc5a85684da41c8a8d7c04543f21e93e
/designer/formWindow.py
0f7729ba9b5bdeb96c35f445ab9262093931bf11
[]
no_license
lllllllai27/PyQt5_GUI
e2d9151fbac21b066e31d1f509740123411ec13c
0f858bbf058f975fb5db925c277fad73ecbef54f
refs/heads/master
2020-08-02T19:53:59.705009
2019-09-29T14:03:19
2019-09-29T14:03:19
211,487,236
0
0
null
null
null
null
UTF-8
Python
false
false
4,600
py
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'formWindow.ui' # # Created by: PyQt5 UI code generator 5.13.1 # # WARNING! All changes made in this file will be lost! from PyQt5 import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(800, 600) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.widget = QtWidgets.QWidget(self.centralwidget) self.widget.setGeometry(QtCore.QRect(250, 90, 227, 181)) self.widget.setObjectName("widget") self.verticalLayout = QtWidgets.QVBoxLayout(self.widget) self.verticalLayout.setContentsMargins(0, 0, 0, 0) self.verticalLayout.setObjectName("verticalLayout") self.formLayout = QtWidgets.QFormLayout() self.formLayout.setObjectName("formLayout") self.label = QtWidgets.QLabel(self.widget) self.label.setObjectName("label") self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label) self.lineEdit = QtWidgets.QLineEdit(self.widget) self.lineEdit.setObjectName("lineEdit") self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.lineEdit) self.label_2 = QtWidgets.QLabel(self.widget) self.label_2.setObjectName("label_2") self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.label_2) self.lineEdit_2 = QtWidgets.QLineEdit(self.widget) self.lineEdit_2.setObjectName("lineEdit_2") self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.lineEdit_2) self.label_3 = QtWidgets.QLabel(self.widget) self.label_3.setObjectName("label_3") self.formLayout.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.label_3) self.lineEdit_3 = QtWidgets.QLineEdit(self.widget) self.lineEdit_3.setObjectName("lineEdit_3") self.formLayout.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.lineEdit_3) self.label_4 = QtWidgets.QLabel(self.widget) self.label_4.setObjectName("label_4") self.formLayout.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.label_4) self.lineEdit_4 = QtWidgets.QLineEdit(self.widget) self.lineEdit_4.setObjectName("lineEdit_4") self.formLayout.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.lineEdit_4) self.label_5 = QtWidgets.QLabel(self.widget) self.label_5.setObjectName("label_5") self.formLayout.setWidget(4, QtWidgets.QFormLayout.LabelRole, self.label_5) self.lineEdit_5 = QtWidgets.QLineEdit(self.widget) self.lineEdit_5.setObjectName("lineEdit_5") self.formLayout.setWidget(4, QtWidgets.QFormLayout.FieldRole, self.lineEdit_5) self.verticalLayout.addLayout(self.formLayout) self.horizontalLayout = QtWidgets.QHBoxLayout() self.horizontalLayout.setObjectName("horizontalLayout") self.pushButton = QtWidgets.QPushButton(self.widget) self.pushButton.setObjectName("pushButton") self.horizontalLayout.addWidget(self.pushButton) self.pushButton_2 = QtWidgets.QPushButton(self.widget) self.pushButton_2.setObjectName("pushButton_2") self.horizontalLayout.addWidget(self.pushButton_2) self.verticalLayout.addLayout(self.horizontalLayout) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 800, 26)) self.menubar.setObjectName("menubar") MainWindow.setMenuBar(self.menubar) self.statusbar = QtWidgets.QStatusBar(MainWindow) self.statusbar.setObjectName("statusbar") MainWindow.setStatusBar(self.statusbar) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) self.label.setText(_translate("MainWindow", "序号:")) self.label_2.setText(_translate("MainWindow", "姓名:")) self.label_3.setText(_translate("MainWindow", "年龄:")) self.label_4.setText(_translate("MainWindow", "职位:")) self.label_5.setText(_translate("MainWindow", "薪水:")) self.pushButton.setText(_translate("MainWindow", "确定")) self.pushButton_2.setText(_translate("MainWindow", "取消"))
[ "31854329+lllllllai27@users.noreply.github.com" ]
31854329+lllllllai27@users.noreply.github.com
a69567cb312181e925b480f018bcfda89912d788
28642c0afd5a78640b713c4562d950ea40e0147a
/scripts/common.py
1e929bbe059b4d552b5c6f55549fcaef6dfad70b
[ "Apache-2.0" ]
permissive
Kevin-Mok/kogito-images
e4764327f36983fd3f545089d83b35549b304121
a814fe35e4a8c7bd32849bef934c7f6f57faf1b3
refs/heads/master
2023-06-12T09:24:35.854019
2021-07-05T12:59:09
2021-07-05T12:59:09
269,133,713
0
1
Apache-2.0
2020-06-03T16:04:24
2020-06-03T16:04:23
null
UTF-8
Python
false
false
13,810
py
#!/usr/bin/python3 # This script defines some common function that are used by manage-kogito-version.py and push-staging.py script import os import re from ruamel.yaml import YAML # All kogito-image modules that have the kogito version. MODULES = {"kogito-data-index-common", "kogito-data-index-mongodb", "kogito-data-index-infinispan", "kogito-data-index-postgresql", "kogito-trusty-common", "kogito-trusty-infinispan", "kogito-trusty-redis", "kogito-explainability", "kogito-image-dependencies", "kogito-jobs-service-common", "kogito-jobs-service-ephemeral", "kogito-jobs-service-infinispan", "kogito-jobs-service-mongodb", "kogito-jobs-service-postgresql", "kogito-trusty-ui", "kogito-jq", "kogito-kubernetes-client", "kogito-launch-scripts", "kogito-logging", "kogito-management-console", "kogito-task-console", "kogito-persistence", "kogito-runtime-native", "kogito-runtime-jvm", "kogito-builder", "kogito-s2i-core", "kogito-system-user", "kogito-jit-runner", "kogito-custom-truststore"} MODULE_FILENAME = "module.yaml" MODULES_DIR = "modules" # imagestream file that contains all images, this file aldo needs to be updated. IMAGE_STREAM_FILENAME = "kogito-imagestream.yaml" # image.yaml file definition that needs to be updated IMAGE_FILENAME = "image.yaml" ARTIFACTS_VERSION_ENV_KEY = "KOGITO_VERSION" # behave tests that needs to be updated BEHAVE_BASE_DIR = 'tests/features' CLONE_REPO_SCRIPT = 'tests/test-apps/clone-repo.sh' def yaml_loader(): """ default yaml Loader :return: yaml object """ yaml = YAML() yaml.preserve_quotes = True yaml.width = 1024 yaml.indent(mapping=2, sequence=4, offset=2) return yaml def update_image_version(target_version): """ Update image.yaml version tag. :param target_version: version used to update the image.yaml file """ print("Updating Image main file version from file {0} to version {1}".format(IMAGE_FILENAME, target_version)) try: with open(IMAGE_FILENAME) as image: data = yaml_loader().load(image) if 'version' in data: data['version'] = target_version else: print("Field version not found, returning...") return with open(IMAGE_FILENAME, 'w') as image: yaml_loader().dump(data, image) except TypeError as err: print("Unexpected error:", err) def update_image_stream(target_version): """ Update the imagestream file, it will update the tag name, version and image tag. :param target_version: version used to update the imagestream file; """ print("Updating ImageStream images version from file {0} to version {1}".format(IMAGE_STREAM_FILENAME, target_version)) try: with open(IMAGE_STREAM_FILENAME) as imagestream: data = yaml_loader().load(imagestream) for item_index, item in enumerate(data['items'], start=0): for tag_index, tag in enumerate(item['spec']['tags'], start=0): data['items'][item_index]['spec']['tags'][tag_index]['name'] = target_version data['items'][item_index]['spec']['tags'][tag_index]['annotations']['version'] = target_version image_dict = str.split(data['items'][item_index]['spec']['tags'][tag_index]['from']['name'], ':') # image name + new version updated_image_name = image_dict[0] + ':' + target_version data['items'][item_index]['spec']['tags'][tag_index]['from']['name'] = updated_image_name with open(IMAGE_STREAM_FILENAME, 'w') as imagestream: yaml_loader().dump(data, imagestream) except TypeError: raise def get_all_module_dirs(): """ Retrieve the module directories """ modules = [] # r=>root, d=>directories, f=>files for r, d, f in os.walk(MODULES_DIR): for item in f: if MODULE_FILENAME == item: modules.append(os.path.dirname(os.path.join(r, item))) return modules def get_kogito_module_dirs(): """ Retrieve the Kogito module directories """ modules = [] for moduleName in MODULES: modules.append(os.path.join(MODULES_DIR, moduleName)) return modules def get_all_images(): """ Retrieve the Kogito images' names """ images = [] # r=>root, d=>directories, f=>files for r, d, f in os.walk("."): for item in f: if re.compile(r'.*-overrides.yaml').match(item): images.append(item.replace("-overrides.yaml", '')) return images def update_modules_version(target_version): """ Update every Kogito module.yaml to the given version. :param target_version: version used to update all Kogito module.yaml files """ for module_dir in get_kogito_module_dirs(): update_module_version(module_dir, target_version) def update_module_version(module_dir, target_version): """ Set Kogito module.yaml to given version. :param module_dir: directory where cekit modules are hold :param target_version: version to set into the module """ try: module_file = os.path.join(module_dir, "module.yaml") with open(module_file) as module: data = yaml_loader().load(module) print( "Updating module {0} version from {1} to {2}".format(data['name'], data['version'], target_version)) data['version'] = target_version with open(module_file, 'w') as module: yaml_loader().dump(data, module) except TypeError: raise def retrieve_artifacts_version(): """ Retrieve the artifacts version from envs in main image.yaml """ try: with open(IMAGE_FILENAME) as imageFile: data = yaml_loader().load(imageFile) for index, env in enumerate(data['envs'], start=0): if env['name'] == ARTIFACTS_VERSION_ENV_KEY: return data['envs'][index]['value'] except TypeError: raise def update_artifacts_version_env_in_image(artifacts_version): """ Update `KOGITO_VERSION` env var in image.yaml. :param artifacts_version: kogito version used to update image.yaml which contains the `KOGITO_VERSION` env var """ try: with open(IMAGE_FILENAME) as imageFile: data = yaml_loader().load(imageFile) for index, env in enumerate(data['envs'], start=0): if env['name'] == ARTIFACTS_VERSION_ENV_KEY: print("Updating image.yaml env var {0} with value {1}".format(ARTIFACTS_VERSION_ENV_KEY, artifacts_version)) data['envs'][index]['value'] = artifacts_version with open(IMAGE_FILENAME, 'w') as imageFile: yaml_loader().dump(data, imageFile) except TypeError: raise def update_examples_ref_in_behave_tests(examples_ref): """ Update examples git reference into behave tests :param examples_ref: kogito-examples reference """ print("Set examples_ref {} in behave tests".format(examples_ref)) # this pattern will look for any occurrences of using master or using x.x.x pattern = re.compile(r'(using nightly-master)|(using nightly-\s*([\d.]+.x))|(using \s*([\d.]+))') replacement = 'using {}'.format(examples_ref) update_in_behave_tests(pattern, replacement) def update_examples_uri_in_behave_tests(examples_uri): """ Update examples uri into behave tests :param examples_uri: kogito-examples uri """ print("Set examples_uri {} in behave tests".format(examples_uri)) # pattern to get the default examples uri pattern = re.compile(r'(https://github.com/kiegroup/kogito-examples.git)') replacement = examples_uri update_in_behave_tests(pattern, replacement) def update_artifacts_version_in_behave_tests(artifacts_version): """ Update artifacts version into behave tests :param artifacts_version: artifacts version to set """ print("Set artifacts_version {} in behave tests".format(artifacts_version)) # pattern to change the KOGITO_VERSION pattern = re.compile('\|[\s]*KOGITO_VERSION[\s]*\|[\s]*(([\d.]+.x)|([\d.]+)[\s]*|([\d.]+-SNAPSHOT)|([\d.]+.Final))[\s]*\|') replacement = '| KOGITO_VERSION | {} | '.format(artifacts_version) update_in_behave_tests(pattern, replacement) def update_runtime_image_in_behave_tests(runtime_image_name, image_suffix): """ Update a runtime image into behave tests :param runtime_image_name: new full tag name of the runtime image :param image_suffix: suffix of the runtime image to update """ print("Set {0} runtime image to {1} in behave tests".format(image_suffix, runtime_image_name)) # pattern to change the KOGITO_VERSION pattern = re.compile(r'(runtime-image quay.io/kiegroup/kogito-runtime-{}:latest)'.format(image_suffix)) replacement = 'runtime-image {}'.format(runtime_image_name) update_in_behave_tests(pattern, replacement) pattern = re.compile(r'(runtime-image rhpam-7/rhpam-kogito-runtime-{}-rhel8:latest)'.format(image_suffix)) replacement = 'runtime-image {}'.format(runtime_image_name) update_in_behave_tests(pattern, replacement) def update_maven_repo_in_behave_tests(repo_url, replaceJbossRepository): """ Update maven repository into behave tests :param repo_url: Maven repository url :param replaceJbossRepository: Set to true if default Jboss repository needs to be overriden """ print("Set maven repo {} in behave tests".format(repo_url)) pattern = re.compile('\|\s*variable[\s]*\|[\s]*value[\s]*\|') env_var_key = "MAVEN_REPO_URL" if replaceJbossRepository: env_var_key = "JBOSS_MAVEN_REPO_URL" replacement = "| variable | value |\n | {} | {} |\n | MAVEN_DOWNLOAD_OUTPUT | true |".format(env_var_key, repo_url) update_in_behave_tests(pattern, replacement) def ignore_maven_self_signed_certificate_in_behave_tests(): """ Sets the environment variable to ignore the self-signed certificates in maven """ print("Setting MAVEN_IGNORE_SELF_SIGNED_CERTIFICATE env in behave tests") pattern = re.compile('\|\s*variable[\s]*\|[\s]*value[\s]*\|') replacement = "| variable | value |\n | MAVEN_IGNORE_SELF_SIGNED_CERTIFICATE | true |" update_in_behave_tests(pattern, replacement) def update_in_behave_tests(pattern, replacement): """ Update all behave tests files :param pattern: Pattern to look for into file :param replacement: What to put instead if pattern found """ for f in os.listdir(BEHAVE_BASE_DIR): if f.endswith('.feature'): update_in_file(os.path.join(BEHAVE_BASE_DIR, f), pattern, replacement) def update_examples_ref_in_clone_repo(examples_ref): """ Update examples git reference into clone-repo.sh script :param examples_ref: kogito-examples reference """ print("Set examples_ref {} in clone-repo script".format(examples_ref)) pattern = re.compile(r'(git checkout.*)') replacement = "git checkout master" if examples_ref != 'master': replacement = "git checkout -b {0} origin/{1}".format(examples_ref, examples_ref) update_in_file(CLONE_REPO_SCRIPT, pattern, replacement) def update_examples_uri_in_clone_repo(examples_uri): """ Update examples uri into clone-repo.sh script :param examples_uri: kogito-examples uri """ print("Set examples_uri {} in clone-repo script".format(examples_uri)) pattern = re.compile(r'(git clone.*)') replacement = "git clone {}".format(examples_uri) update_in_file(CLONE_REPO_SCRIPT, pattern, replacement) def update_maven_repo_in_clone_repo(repo_url, replace_jboss_repository): """ Update maven repository into clone-repo.sh script :param repo_url: Maven repository url :param replace_jboss_repository: Set to true if default Jboss repository needs to be overridden """ print("Set maven repo {} in clone-repo script".format(repo_url)) pattern = "" replacement = "" if replace_jboss_repository: pattern = re.compile(r'(export JBOSS_MAVEN_REPO_URL=.*)') replacement = 'export JBOSS_MAVEN_REPO_URL="{}"'.format(repo_url) else: pattern = re.compile(r'(# export MAVEN_REPO_URL=.*)') replacement = 'export MAVEN_REPO_URL="{}"'.format(repo_url) update_in_file(CLONE_REPO_SCRIPT, pattern, replacement) def ignore_maven_self_signed_certificate_in_clone_repo(): """ Sets the environment variable to ignore the self-signed certificates in maven """ print("Setting MAVEN_IGNORE_SELF_SIGNED_CERTIFICATE env in clone repo") pattern = re.compile(r'(# MAVEN_IGNORE_SELF_SIGNED_CERTIFICATE=.*)') replacement = "MAVEN_IGNORE_SELF_SIGNED_CERTIFICATE=true" update_in_file(CLONE_REPO_SCRIPT, pattern, replacement) def update_in_file(file, pattern, replacement): """ Update in given file :param file: file to update :param pattern: Pattern to look for into file :param replacement: What to put instead if pattern found """ with open(file) as fe: updated_value = pattern.sub(replacement, fe.read()) with open(file, 'w') as fe: fe.write(updated_value) if __name__ == "__main__": for m in get_kogito_module_dirs(): print("module {}".format(m))
[ "noreply@github.com" ]
Kevin-Mok.noreply@github.com
de48c66359d85fb9300d2d9bf9851a4d1a883f0d
034adbabe1f0243452e19a8313b23cc6950b4ed1
/check_version.py
faff1a4f08a8bd052d0bd50e437c484f88a96ca1
[]
no_license
MrLiuYS/JSONFormat4Flutter
e2204b136b7165400dffd8605277f87d80194d7a
aeb265abba99ddd74f65c354f436b2d0ab83f1be
refs/heads/master
2020-09-26T15:03:51.973447
2019-12-11T02:19:09
2019-12-11T02:19:09
226,279,167
0
0
null
2019-12-06T08:17:14
2019-12-06T08:17:13
null
UTF-8
Python
false
false
2,812
py
#!/usr/bin/env python # -*- coding:utf-8 -*- # @Filename : check_version.py # @Date : 18-8-20 上午1:52 # @Author : DebuggerX import configparser import os import ssl import sys from urllib import request from json import loads from PyQt5 import QtGui, QtCore from PyQt5.QtCore import QThread, pyqtSignal from PyQt5.QtWidgets import QMessageBox from tools import msg_box_ui code = 0.7 ignore_code = 0.0 check_last_version_thread = None def get_exe_path(): if getattr(sys, 'frozen', False): return os.path.dirname(sys.executable) else: return os.path.dirname(__file__) def _check_ignore_version(): config = configparser.ConfigParser() global ignore_code # noinspection PyBroadException try: config.read(os.path.join(get_exe_path(), '.ignore.cfg')) ignore_code = float(config.get('version', 'code')) except Exception: pass class CheckLastVersion(QThread): trigger = pyqtSignal(dict) def run(self): res_json = None # noinspection PyBroadException try: res = request.urlopen('https://raw.githubusercontent.com/debuggerx01/JSONFormat4Flutter/master/version', context=ssl._create_unverified_context()) res_json = loads(res.read().decode()) except Exception: pass if res_json is not None: global code if res_json['code'] > code and res_json['code'] > ignore_code: self.trigger.emit(res_json) def check_last_version_handler(json_obj): msg_box = QMessageBox() msg_box.addButton('确定', QMessageBox.AcceptRole) msg_box.addButton('忽略', QMessageBox.NoRole) msg_box.addButton('关闭', QMessageBox.RejectRole) msg_box.setParent(msg_box_ui) msg_box.setWindowTitle("有新版本更新!") msg_box.setText("新版本(v%s)更新内容:\n%s\n\n点击[确定]转跳到下载页,点击[忽略]忽略该版本提醒,点击[关闭]退出本提示框" % (json_obj['code'], json_obj['desc'])) res = msg_box.exec() if res == QMessageBox.RejectRole: config = configparser.ConfigParser() config.add_section('version') config.set('version', 'code', str(json_obj['code'])) with open(os.path.join(get_exe_path(), '.ignore.cfg'), 'w') as configfile: config.write(configfile) elif res == QMessageBox.AcceptRole: QtGui.QDesktopServices.openUrl(QtCore.QUrl('https://github.com/debuggerx01/JSONFormat4Flutter/releases')) def check_version(): _check_ignore_version() global check_last_version_thread check_last_version_thread = CheckLastVersion() check_last_version_thread.trigger.connect(check_last_version_handler) check_last_version_thread.start() return code
[ "dx8917312@163.com" ]
dx8917312@163.com