hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c7273dcb9719594ed240d392047b44afff69fd8c | 560 | py | Python | test/test_temp_f_uz_c.py | martabritala/augsta-limena-progr-2-nodarbibas-darbs | c950493224b355767c198c7f45cafd0097f9640b | [
"MIT"
] | null | null | null | test/test_temp_f_uz_c.py | martabritala/augsta-limena-progr-2-nodarbibas-darbs | c950493224b355767c198c7f45cafd0097f9640b | [
"MIT"
] | null | null | null | test/test_temp_f_uz_c.py | martabritala/augsta-limena-progr-2-nodarbibas-darbs | c950493224b355767c198c7f45cafd0097f9640b | [
"MIT"
] | null | null | null | import pytest
from src.temperatura_FuzC import FtoC
def test_temperatura_viszemaka():
assert FtoC(-500)==-273.15
def test_temperatura_norm():
assert FtoC(32)==0
"""
D. Temperatūras pārveidošana F->C
Funkcija akceptē vienu argumentu - temperatūru Fārenheita grādos,
un atgriež temperatūru Celsija grādos. Zemākā temperatūra
Celsija grādos var būt −273.15, tādēļ, ja aprēķinātā temperatūra ir zemāka, atgriež −273.15.
Argumenti:
t {int vai float} -- temperatūra Fārenheita grādos
Atgriež:
int vai float -- temperatūra Celsija grādos
"""
| 25.454545 | 92 | 0.757143 |
a26134e92feb5136140867928a7d56f1e9591c96 | 583 | py | Python | app/migrations/0003_auto_20170310_1115.py | TataneInYourFace/wifill-api | 9167306aa4b920b87e57d194b9ad073f16977544 | [
"MIT"
] | null | null | null | app/migrations/0003_auto_20170310_1115.py | TataneInYourFace/wifill-api | 9167306aa4b920b87e57d194b9ad073f16977544 | [
"MIT"
] | 1 | 2018-10-19T07:41:21.000Z | 2018-10-19T07:41:21.000Z | app/migrations/0003_auto_20170310_1115.py | TataneInYourFace/wifill-api | 9167306aa4b920b87e57d194b9ad073f16977544 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.6 on 2017-03-10 10:15
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0002_auto_20170310_1016'),
]
operations = [
migrations.AlterField(
model_name='address',
name='user',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
| 25.347826 | 121 | 0.667238 |
c963092a8da2bd9bf9ca44b429d4d52d85b2984e | 57,702 | py | Python | examples/simulation_verilator/test_simulation_verilator.py | leonardt/veriloggen | bc3dacaa6a3e0b0652763881d0edf0421c6d3189 | [
"Apache-2.0"
] | null | null | null | examples/simulation_verilator/test_simulation_verilator.py | leonardt/veriloggen | bc3dacaa6a3e0b0652763881d0edf0421c6d3189 | [
"Apache-2.0"
] | null | null | null | examples/simulation_verilator/test_simulation_verilator.py | leonardt/veriloggen | bc3dacaa6a3e0b0652763881d0edf0421c6d3189 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
import os
import veriloggen
import simulation_verilator
expected_verilog = """
module test
(
input io_CLK,
input io_RST
);
reg CLK;
reg RST;
wire [32-1:0] myaxi_awaddr;
wire [8-1:0] myaxi_awlen;
wire myaxi_awvalid;
reg myaxi_awready;
wire [32-1:0] myaxi_wdata;
wire [4-1:0] myaxi_wstrb;
wire myaxi_wlast;
wire myaxi_wvalid;
reg myaxi_wready;
wire [32-1:0] myaxi_araddr;
wire [8-1:0] myaxi_arlen;
wire myaxi_arvalid;
reg myaxi_arready;
reg [32-1:0] myaxi_rdata;
reg myaxi_rlast;
reg myaxi_rvalid;
wire myaxi_rready;
wire [32-1:0] memory_awaddr;
wire [8-1:0] memory_awlen;
wire memory_awvalid;
reg memory_awready;
wire [32-1:0] memory_wdata;
wire [4-1:0] memory_wstrb;
wire memory_wlast;
wire memory_wvalid;
reg memory_wready;
wire [32-1:0] memory_araddr;
wire [8-1:0] memory_arlen;
wire memory_arvalid;
reg memory_arready;
reg [32-1:0] memory_rdata;
reg memory_rlast;
reg memory_rvalid;
wire memory_rready;
reg [8-1:0] _memory_mem [0:2**20-1];
initial begin
$readmemh("memimg_test_simulation_verilator.out", _memory_mem);
end
reg [32-1:0] _memory_fsm;
localparam _memory_fsm_init = 0;
reg [33-1:0] _write_count;
reg [32-1:0] _write_addr;
reg [33-1:0] _read_count;
reg [32-1:0] _read_addr;
reg [33-1:0] _sleep_count;
reg [32-1:0] _d1__memory_fsm;
reg __memory_fsm_cond_100_0_1;
reg __memory_fsm_cond_200_1_1;
reg __memory_fsm_cond_211_2_1;
assign memory_awaddr = myaxi_awaddr;
assign memory_awlen = myaxi_awlen;
assign memory_awvalid = myaxi_awvalid;
wire _tmp_0;
assign _tmp_0 = memory_awready;
always @(*) begin
myaxi_awready = _tmp_0;
end
assign memory_wdata = myaxi_wdata;
assign memory_wstrb = myaxi_wstrb;
assign memory_wlast = myaxi_wlast;
assign memory_wvalid = myaxi_wvalid;
wire _tmp_1;
assign _tmp_1 = memory_wready;
always @(*) begin
myaxi_wready = _tmp_1;
end
assign memory_araddr = myaxi_araddr;
assign memory_arlen = myaxi_arlen;
assign memory_arvalid = myaxi_arvalid;
wire _tmp_2;
assign _tmp_2 = memory_arready;
always @(*) begin
myaxi_arready = _tmp_2;
end
always @(*) begin
myaxi_rdata = memory_rdata;
end
wire _tmp_3;
assign _tmp_3 = memory_rlast;
always @(*) begin
myaxi_rlast = _tmp_3;
end
wire _tmp_4;
assign _tmp_4 = memory_rvalid;
always @(*) begin
myaxi_rvalid = _tmp_4;
end
assign memory_rready = myaxi_rready;
blinkled
uut
(
.CLK(CLK),
.RST(RST),
.myaxi_awaddr(myaxi_awaddr),
.myaxi_awlen(myaxi_awlen),
.myaxi_awvalid(myaxi_awvalid),
.myaxi_awready(myaxi_awready),
.myaxi_wdata(myaxi_wdata),
.myaxi_wstrb(myaxi_wstrb),
.myaxi_wlast(myaxi_wlast),
.myaxi_wvalid(myaxi_wvalid),
.myaxi_wready(myaxi_wready),
.myaxi_araddr(myaxi_araddr),
.myaxi_arlen(myaxi_arlen),
.myaxi_arvalid(myaxi_arvalid),
.myaxi_arready(myaxi_arready),
.myaxi_rdata(myaxi_rdata),
.myaxi_rlast(myaxi_rlast),
.myaxi_rvalid(myaxi_rvalid),
.myaxi_rready(myaxi_rready)
);
initial begin
$write("");
$write("");
end
initial begin
CLK = 0;
$write("");
end
initial begin
RST = 0;
memory_awready = 0;
memory_wready = 0;
memory_arready = 0;
memory_rdata = 0;
memory_rlast = 0;
memory_rvalid = 0;
_memory_fsm = _memory_fsm_init;
_write_count = 0;
_write_addr = 0;
_read_count = 0;
_read_addr = 0;
_sleep_count = 0;
_d1__memory_fsm = _memory_fsm_init;
__memory_fsm_cond_100_0_1 = 0;
__memory_fsm_cond_200_1_1 = 0;
__memory_fsm_cond_211_2_1 = 0;
$write("");
RST = 1;
$write("");
RST = 0;
$write("");
$write("");
end
wire _tmp_5;
assign _tmp_5 = io_CLK;
always @(*) begin
CLK = _tmp_5;
end
wire _tmp_6;
assign _tmp_6 = io_RST;
always @(*) begin
RST = _tmp_6;
end
localparam _memory_fsm_200 = 200;
localparam _memory_fsm_201 = 201;
localparam _memory_fsm_202 = 202;
localparam _memory_fsm_203 = 203;
localparam _memory_fsm_204 = 204;
localparam _memory_fsm_205 = 205;
localparam _memory_fsm_206 = 206;
localparam _memory_fsm_207 = 207;
localparam _memory_fsm_208 = 208;
localparam _memory_fsm_209 = 209;
localparam _memory_fsm_210 = 210;
localparam _memory_fsm_211 = 211;
localparam _memory_fsm_100 = 100;
localparam _memory_fsm_101 = 101;
localparam _memory_fsm_102 = 102;
localparam _memory_fsm_103 = 103;
localparam _memory_fsm_104 = 104;
localparam _memory_fsm_105 = 105;
localparam _memory_fsm_106 = 106;
localparam _memory_fsm_107 = 107;
localparam _memory_fsm_108 = 108;
localparam _memory_fsm_109 = 109;
localparam _memory_fsm_110 = 110;
localparam _memory_fsm_111 = 111;
localparam _memory_fsm_112 = 112;
always @(posedge CLK) begin
if(RST) begin
_memory_fsm <= _memory_fsm_init;
_d1__memory_fsm <= _memory_fsm_init;
memory_awready <= 0;
_write_addr <= 0;
_write_count <= 0;
__memory_fsm_cond_100_0_1 <= 0;
memory_wready <= 0;
memory_arready <= 0;
_read_addr <= 0;
_read_count <= 0;
__memory_fsm_cond_200_1_1 <= 0;
memory_rdata[7:0] <= (0 >> 0) & { 8{ 1'd1 } };
memory_rdata[15:8] <= (0 >> 8) & { 8{ 1'd1 } };
memory_rdata[23:16] <= (0 >> 16) & { 8{ 1'd1 } };
memory_rdata[31:24] <= (0 >> 24) & { 8{ 1'd1 } };
memory_rvalid <= 0;
memory_rlast <= 0;
__memory_fsm_cond_211_2_1 <= 0;
memory_rdata <= 0;
_sleep_count <= 0;
end else begin
_sleep_count <= _sleep_count + 1;
if(_sleep_count == 3) begin
_sleep_count <= 0;
end
_d1__memory_fsm <= _memory_fsm;
case(_d1__memory_fsm)
_memory_fsm_100: begin
if(__memory_fsm_cond_100_0_1) begin
memory_awready <= 0;
end
end
_memory_fsm_200: begin
if(__memory_fsm_cond_200_1_1) begin
memory_arready <= 0;
end
end
_memory_fsm_211: begin
if(__memory_fsm_cond_211_2_1) begin
memory_rvalid <= 0;
memory_rlast <= 0;
end
end
endcase
case(_memory_fsm)
_memory_fsm_init: begin
if(memory_awvalid) begin
_memory_fsm <= _memory_fsm_100;
end
if(memory_arvalid) begin
_memory_fsm <= _memory_fsm_200;
end
end
_memory_fsm_100: begin
if(memory_awvalid) begin
memory_awready <= 1;
_write_addr <= memory_awaddr;
_write_count <= memory_awlen + 1;
end
__memory_fsm_cond_100_0_1 <= 1;
if(!memory_awvalid) begin
_memory_fsm <= _memory_fsm_init;
end
if(memory_awvalid) begin
_memory_fsm <= _memory_fsm_101;
end
end
_memory_fsm_101: begin
_memory_fsm <= _memory_fsm_102;
end
_memory_fsm_102: begin
_memory_fsm <= _memory_fsm_103;
end
_memory_fsm_103: begin
_memory_fsm <= _memory_fsm_104;
end
_memory_fsm_104: begin
_memory_fsm <= _memory_fsm_105;
end
_memory_fsm_105: begin
_memory_fsm <= _memory_fsm_106;
end
_memory_fsm_106: begin
_memory_fsm <= _memory_fsm_107;
end
_memory_fsm_107: begin
_memory_fsm <= _memory_fsm_108;
end
_memory_fsm_108: begin
_memory_fsm <= _memory_fsm_109;
end
_memory_fsm_109: begin
_memory_fsm <= _memory_fsm_110;
end
_memory_fsm_110: begin
_memory_fsm <= _memory_fsm_111;
end
_memory_fsm_111: begin
memory_wready <= 1;
_memory_fsm <= _memory_fsm_112;
end
_memory_fsm_112: begin
if(memory_wvalid && memory_wstrb[0]) begin
_memory_mem[_write_addr + 0] <= memory_wdata[7:0];
end
if(memory_wvalid && memory_wstrb[1]) begin
_memory_mem[_write_addr + 1] <= memory_wdata[15:8];
end
if(memory_wvalid && memory_wstrb[2]) begin
_memory_mem[_write_addr + 2] <= memory_wdata[23:16];
end
if(memory_wvalid && memory_wstrb[3]) begin
_memory_mem[_write_addr + 3] <= memory_wdata[31:24];
end
if(memory_wvalid && memory_wready) begin
_write_addr <= _write_addr + 4;
_write_count <= _write_count - 1;
end
if(_sleep_count == 3) begin
memory_wready <= 0;
end else begin
memory_wready <= 1;
end
if(memory_wvalid && memory_wready && (_write_count == 1)) begin
memory_wready <= 0;
end
if(memory_wvalid && memory_wready && (_write_count == 1)) begin
_memory_fsm <= _memory_fsm_init;
end
end
_memory_fsm_200: begin
if(memory_arvalid) begin
memory_arready <= 1;
_read_addr <= memory_araddr;
_read_count <= memory_arlen + 1;
end
__memory_fsm_cond_200_1_1 <= 1;
if(!memory_arvalid) begin
_memory_fsm <= _memory_fsm_init;
end
if(memory_arvalid) begin
_memory_fsm <= _memory_fsm_201;
end
end
_memory_fsm_201: begin
_memory_fsm <= _memory_fsm_202;
end
_memory_fsm_202: begin
_memory_fsm <= _memory_fsm_203;
end
_memory_fsm_203: begin
_memory_fsm <= _memory_fsm_204;
end
_memory_fsm_204: begin
_memory_fsm <= _memory_fsm_205;
end
_memory_fsm_205: begin
_memory_fsm <= _memory_fsm_206;
end
_memory_fsm_206: begin
_memory_fsm <= _memory_fsm_207;
end
_memory_fsm_207: begin
_memory_fsm <= _memory_fsm_208;
end
_memory_fsm_208: begin
_memory_fsm <= _memory_fsm_209;
end
_memory_fsm_209: begin
_memory_fsm <= _memory_fsm_210;
end
_memory_fsm_210: begin
_memory_fsm <= _memory_fsm_211;
end
_memory_fsm_211: begin
if(memory_rready | !memory_rvalid) begin
memory_rdata[7:0] <= _memory_mem[_read_addr + 0];
end
if(memory_rready | !memory_rvalid) begin
memory_rdata[15:8] <= _memory_mem[_read_addr + 1];
end
if(memory_rready | !memory_rvalid) begin
memory_rdata[23:16] <= _memory_mem[_read_addr + 2];
end
if(memory_rready | !memory_rvalid) begin
memory_rdata[31:24] <= _memory_mem[_read_addr + 3];
end
if((_sleep_count < 3) && (_read_count > 0) && memory_rready | !memory_rvalid) begin
memory_rvalid <= 1;
_read_addr <= _read_addr + 4;
_read_count <= _read_count - 1;
end
if((_sleep_count < 3) && (_read_count == 1) && memory_rready | !memory_rvalid) begin
memory_rlast <= 1;
end
__memory_fsm_cond_211_2_1 <= 1;
if(memory_rvalid && !memory_rready) begin
memory_rvalid <= memory_rvalid;
memory_rdata <= memory_rdata;
memory_rlast <= memory_rlast;
end
if(memory_rvalid && memory_rready && (_read_count == 0)) begin
_memory_fsm <= _memory_fsm_init;
end
end
endcase
end
end
endmodule
module blinkled
(
input CLK,
input RST,
output reg [32-1:0] myaxi_awaddr,
output reg [8-1:0] myaxi_awlen,
output reg myaxi_awvalid,
input myaxi_awready,
output reg [32-1:0] myaxi_wdata,
output reg [4-1:0] myaxi_wstrb,
output reg myaxi_wlast,
output reg myaxi_wvalid,
input myaxi_wready,
output reg [32-1:0] myaxi_araddr,
output reg [8-1:0] myaxi_arlen,
output reg myaxi_arvalid,
input myaxi_arready,
input [32-1:0] myaxi_rdata,
input myaxi_rlast,
input myaxi_rvalid,
output myaxi_rready
);
reg [32-1:0] timer;
reg [10-1:0] ram_a_0_addr;
wire [32-1:0] ram_a_0_rdata;
reg [32-1:0] ram_a_0_wdata;
reg ram_a_0_wenable;
ram_a
inst_ram_a
(
.CLK(CLK),
.ram_a_0_addr(ram_a_0_addr),
.ram_a_0_rdata(ram_a_0_rdata),
.ram_a_0_wdata(ram_a_0_wdata),
.ram_a_0_wenable(ram_a_0_wenable)
);
reg [10-1:0] ram_b_0_addr;
wire [32-1:0] ram_b_0_rdata;
reg [32-1:0] ram_b_0_wdata;
reg ram_b_0_wenable;
ram_b
inst_ram_b
(
.CLK(CLK),
.ram_b_0_addr(ram_b_0_addr),
.ram_b_0_rdata(ram_b_0_rdata),
.ram_b_0_wdata(ram_b_0_wdata),
.ram_b_0_wenable(ram_b_0_wenable)
);
reg [10-1:0] ram_c_0_addr;
wire [32-1:0] ram_c_0_rdata;
reg [32-1:0] ram_c_0_wdata;
reg ram_c_0_wenable;
ram_c
inst_ram_c
(
.CLK(CLK),
.ram_c_0_addr(ram_c_0_addr),
.ram_c_0_rdata(ram_c_0_rdata),
.ram_c_0_wdata(ram_c_0_wdata),
.ram_c_0_wenable(ram_c_0_wenable)
);
reg _myaxi_read_start;
reg [8-1:0] _myaxi_read_op_sel;
reg [32-1:0] _myaxi_read_local_addr;
reg [32-1:0] _myaxi_read_global_addr;
reg [33-1:0] _myaxi_read_size;
reg [32-1:0] _myaxi_read_local_stride;
reg _myaxi_read_idle;
reg _myaxi_write_start;
reg [8-1:0] _myaxi_write_op_sel;
reg [32-1:0] _myaxi_write_local_addr;
reg [32-1:0] _myaxi_write_global_addr;
reg [33-1:0] _myaxi_write_size;
reg [32-1:0] _myaxi_write_local_stride;
reg _myaxi_write_idle;
wire _myaxi_write_data_done;
reg [32-1:0] th_matmul;
localparam th_matmul_init = 0;
reg signed [32-1:0] _th_matmul_matrix_size_0;
reg signed [32-1:0] _th_matmul_a_offset_1;
reg signed [32-1:0] _th_matmul_b_offset_2;
reg signed [32-1:0] _th_matmul_c_offset_3;
reg signed [32-1:0] _th_matmul_start_time_4;
reg signed [32-1:0] _th_matmul_matrix_size_5;
reg signed [32-1:0] _th_matmul_a_offset_6;
reg signed [32-1:0] _th_matmul_b_offset_7;
reg signed [32-1:0] _th_matmul_c_offset_8;
reg signed [32-1:0] _th_matmul_a_addr_9;
reg signed [32-1:0] _th_matmul_c_addr_10;
reg signed [32-1:0] _th_matmul_i_11;
reg axim_flag_0;
reg [32-1:0] _d1_th_matmul;
reg _th_matmul_cond_6_0_1;
reg _myaxi_ram_a_0_read_start;
reg [8-1:0] _myaxi_ram_a_0_read_op_sel;
reg [32-1:0] _myaxi_ram_a_0_read_local_addr;
reg [32-1:0] _myaxi_ram_a_0_read_global_addr;
reg [33-1:0] _myaxi_ram_a_0_read_size;
reg [32-1:0] _myaxi_ram_a_0_read_local_stride;
reg [32-1:0] _myaxi_read_fsm;
localparam _myaxi_read_fsm_init = 0;
reg [32-1:0] _myaxi_read_cur_global_addr;
reg [33-1:0] _myaxi_read_cur_size;
reg [33-1:0] _myaxi_read_rest_size;
reg [32-1:0] _wdata_1;
reg _wvalid_2;
reg [34-1:0] _tmp_3;
reg _tmp_4;
wire [32-1:0] _dataflow__variable_odata_0;
wire _dataflow__variable_ovalid_0;
wire _dataflow__variable_oready_0;
assign _dataflow__variable_oready_0 = (_tmp_3 > 0) && !_tmp_4;
reg _ram_a_cond_0_1;
reg [9-1:0] _tmp_5;
reg _myaxi_cond_0_1;
assign myaxi_rready = _myaxi_read_fsm == 3;
reg [32-1:0] _d1__myaxi_read_fsm;
reg __myaxi_read_fsm_cond_3_0_1;
reg axim_flag_6;
reg __myaxi_read_fsm_cond_4_1_1;
reg signed [32-1:0] _th_matmul_b_addr_12;
reg signed [32-1:0] _th_matmul_j_13;
reg axim_flag_7;
reg _th_matmul_cond_13_1_1;
reg _myaxi_ram_b_0_read_start;
reg [8-1:0] _myaxi_ram_b_0_read_op_sel;
reg [32-1:0] _myaxi_ram_b_0_read_local_addr;
reg [32-1:0] _myaxi_ram_b_0_read_global_addr;
reg [33-1:0] _myaxi_ram_b_0_read_size;
reg [32-1:0] _myaxi_ram_b_0_read_local_stride;
reg [32-1:0] _wdata_8;
reg _wvalid_9;
reg [34-1:0] _tmp_10;
reg _tmp_11;
wire [32-1:0] _dataflow__variable_odata_1;
wire _dataflow__variable_ovalid_1;
wire _dataflow__variable_oready_1;
assign _dataflow__variable_oready_1 = (_tmp_10 > 0) && !_tmp_11;
reg _ram_b_cond_0_1;
reg __myaxi_read_fsm_cond_3_2_1;
reg signed [32-1:0] _th_matmul_sum_14;
reg signed [32-1:0] _th_matmul_k_15;
reg _tmp_12;
reg _ram_a_cond_1_1;
reg _ram_a_cond_2_1;
reg _ram_a_cond_2_2;
reg signed [32-1:0] _tmp_13;
reg signed [32-1:0] _th_matmul_x_16;
reg _tmp_14;
reg _ram_b_cond_1_1;
reg _ram_b_cond_2_1;
reg _ram_b_cond_2_2;
reg signed [32-1:0] _tmp_15;
reg signed [32-1:0] _th_matmul_y_17;
reg _ram_c_cond_0_1;
reg axim_flag_16;
reg _th_matmul_cond_29_2_1;
reg _myaxi_ram_c_0_write_start;
reg [8-1:0] _myaxi_ram_c_0_write_op_sel;
reg [32-1:0] _myaxi_ram_c_0_write_local_addr;
reg [32-1:0] _myaxi_ram_c_0_write_global_addr;
reg [33-1:0] _myaxi_ram_c_0_write_size;
reg [32-1:0] _myaxi_ram_c_0_write_local_stride;
reg [32-1:0] _myaxi_write_fsm;
localparam _myaxi_write_fsm_init = 0;
reg [32-1:0] _myaxi_write_cur_global_addr;
reg [33-1:0] _myaxi_write_cur_size;
reg [33-1:0] _myaxi_write_rest_size;
reg _tmp_17;
reg _tmp_18;
wire _tmp_19;
wire _tmp_20;
assign _tmp_20 = 1;
localparam _tmp_21 = 1;
wire [_tmp_21-1:0] _tmp_22;
assign _tmp_22 = (_tmp_19 || !_tmp_17) && (_tmp_20 || !_tmp_18);
reg [_tmp_21-1:0] __tmp_22_1;
wire signed [32-1:0] _tmp_23;
reg signed [32-1:0] __tmp_23_1;
assign _tmp_23 = (__tmp_22_1)? ram_c_0_rdata : __tmp_23_1;
reg _tmp_24;
reg _tmp_25;
reg _tmp_26;
reg _tmp_27;
reg [34-1:0] _tmp_28;
reg [9-1:0] _tmp_29;
reg _myaxi_cond_1_1;
reg _tmp_30;
wire [32-1:0] _dataflow__variable_odata_2;
wire _dataflow__variable_ovalid_2;
wire _dataflow__variable_oready_2;
assign _dataflow__variable_oready_2 = (_myaxi_write_fsm == 3) && (_myaxi_write_op_sel == 1) && ((_tmp_29 > 0) && (myaxi_wready || !myaxi_wvalid));
reg _myaxi_cond_2_1;
assign _myaxi_write_data_done = (_tmp_30 && myaxi_wvalid && myaxi_wready)? 1 : 0;
reg axim_flag_31;
reg [32-1:0] _d1__myaxi_write_fsm;
reg __myaxi_write_fsm_cond_4_0_1;
reg signed [32-1:0] _th_matmul_end_time_18;
reg signed [32-1:0] _th_matmul_time_19;
reg signed [32-1:0] _th_matmul_matrix_size_20;
reg signed [32-1:0] _th_matmul_a_offset_21;
reg signed [32-1:0] _th_matmul_b_offset_22;
reg signed [32-1:0] _th_matmul_c_offset_23;
reg signed [32-1:0] _th_matmul_all_ok_24;
reg signed [32-1:0] _th_matmul_c_addr_25;
reg signed [32-1:0] _th_matmul_i_26;
reg axim_flag_32;
reg _th_matmul_cond_44_3_1;
reg _myaxi_ram_c_0_read_start;
reg [8-1:0] _myaxi_ram_c_0_read_op_sel;
reg [32-1:0] _myaxi_ram_c_0_read_local_addr;
reg [32-1:0] _myaxi_ram_c_0_read_global_addr;
reg [33-1:0] _myaxi_ram_c_0_read_size;
reg [32-1:0] _myaxi_ram_c_0_read_local_stride;
reg [32-1:0] _wdata_33;
reg _wvalid_34;
reg [34-1:0] _tmp_35;
reg _tmp_36;
wire [32-1:0] _dataflow__variable_odata_4;
wire _dataflow__variable_ovalid_4;
wire _dataflow__variable_oready_4;
assign _dataflow__variable_oready_4 = (_tmp_35 > 0) && !_tmp_36;
reg _ram_c_cond_1_1;
reg __myaxi_read_fsm_cond_3_3_1;
reg signed [32-1:0] _th_matmul_j_27;
reg _tmp_37;
reg _ram_c_cond_2_1;
reg _ram_c_cond_3_1;
reg _ram_c_cond_3_2;
reg signed [32-1:0] _tmp_38;
reg signed [32-1:0] _th_matmul_v_28;
always @(posedge CLK) begin
if(RST) begin
timer <= 0;
end else begin
timer <= timer + 1;
end
end
always @(posedge CLK) begin
if(RST) begin
ram_a_0_addr <= 0;
_tmp_3 <= 0;
ram_a_0_wdata <= 0;
ram_a_0_wenable <= 0;
_tmp_4 <= 0;
_ram_a_cond_0_1 <= 0;
_ram_a_cond_1_1 <= 0;
_tmp_12 <= 0;
_ram_a_cond_2_1 <= 0;
_ram_a_cond_2_2 <= 0;
end else begin
if(_ram_a_cond_2_2) begin
_tmp_12 <= 0;
end
if(_ram_a_cond_0_1) begin
ram_a_0_wenable <= 0;
_tmp_4 <= 0;
end
if(_ram_a_cond_1_1) begin
_tmp_12 <= 1;
end
_ram_a_cond_2_2 <= _ram_a_cond_2_1;
if(_myaxi_read_start && (_myaxi_read_op_sel == 1) && (_tmp_3 == 0)) begin
ram_a_0_addr <= _myaxi_read_local_addr - _myaxi_read_local_stride;
_tmp_3 <= _myaxi_read_size;
end
if(_dataflow__variable_ovalid_0 && ((_tmp_3 > 0) && !_tmp_4) && (_tmp_3 > 0)) begin
ram_a_0_addr <= ram_a_0_addr + _myaxi_read_local_stride;
ram_a_0_wdata <= _dataflow__variable_odata_0;
ram_a_0_wenable <= 1;
_tmp_3 <= _tmp_3 - 1;
end
if(_dataflow__variable_ovalid_0 && ((_tmp_3 > 0) && !_tmp_4) && (_tmp_3 == 1)) begin
_tmp_4 <= 1;
end
_ram_a_cond_0_1 <= 1;
if(th_matmul == 20) begin
ram_a_0_addr <= _th_matmul_k_15;
end
_ram_a_cond_1_1 <= th_matmul == 20;
_ram_a_cond_2_1 <= th_matmul == 20;
end
end
always @(posedge CLK) begin
if(RST) begin
ram_b_0_addr <= 0;
_tmp_10 <= 0;
ram_b_0_wdata <= 0;
ram_b_0_wenable <= 0;
_tmp_11 <= 0;
_ram_b_cond_0_1 <= 0;
_ram_b_cond_1_1 <= 0;
_tmp_14 <= 0;
_ram_b_cond_2_1 <= 0;
_ram_b_cond_2_2 <= 0;
end else begin
if(_ram_b_cond_2_2) begin
_tmp_14 <= 0;
end
if(_ram_b_cond_0_1) begin
ram_b_0_wenable <= 0;
_tmp_11 <= 0;
end
if(_ram_b_cond_1_1) begin
_tmp_14 <= 1;
end
_ram_b_cond_2_2 <= _ram_b_cond_2_1;
if(_myaxi_read_start && (_myaxi_read_op_sel == 2) && (_tmp_10 == 0)) begin
ram_b_0_addr <= _myaxi_read_local_addr - _myaxi_read_local_stride;
_tmp_10 <= _myaxi_read_size;
end
if(_dataflow__variable_ovalid_1 && ((_tmp_10 > 0) && !_tmp_11) && (_tmp_10 > 0)) begin
ram_b_0_addr <= ram_b_0_addr + _myaxi_read_local_stride;
ram_b_0_wdata <= _dataflow__variable_odata_1;
ram_b_0_wenable <= 1;
_tmp_10 <= _tmp_10 - 1;
end
if(_dataflow__variable_ovalid_1 && ((_tmp_10 > 0) && !_tmp_11) && (_tmp_10 == 1)) begin
_tmp_11 <= 1;
end
_ram_b_cond_0_1 <= 1;
if(th_matmul == 22) begin
ram_b_0_addr <= _th_matmul_k_15;
end
_ram_b_cond_1_1 <= th_matmul == 22;
_ram_b_cond_2_1 <= th_matmul == 22;
end
end
always @(posedge CLK) begin
if(RST) begin
ram_c_0_addr <= 0;
ram_c_0_wdata <= 0;
ram_c_0_wenable <= 0;
_ram_c_cond_0_1 <= 0;
__tmp_22_1 <= 0;
__tmp_23_1 <= 0;
_tmp_27 <= 0;
_tmp_17 <= 0;
_tmp_18 <= 0;
_tmp_25 <= 0;
_tmp_26 <= 0;
_tmp_24 <= 0;
_tmp_28 <= 0;
_tmp_35 <= 0;
_tmp_36 <= 0;
_ram_c_cond_1_1 <= 0;
_ram_c_cond_2_1 <= 0;
_tmp_37 <= 0;
_ram_c_cond_3_1 <= 0;
_ram_c_cond_3_2 <= 0;
end else begin
if(_ram_c_cond_3_2) begin
_tmp_37 <= 0;
end
if(_ram_c_cond_0_1) begin
ram_c_0_wenable <= 0;
end
if(_ram_c_cond_1_1) begin
ram_c_0_wenable <= 0;
_tmp_36 <= 0;
end
if(_ram_c_cond_2_1) begin
_tmp_37 <= 1;
end
_ram_c_cond_3_2 <= _ram_c_cond_3_1;
if(th_matmul == 26) begin
ram_c_0_addr <= _th_matmul_j_13;
ram_c_0_wdata <= _th_matmul_sum_14;
ram_c_0_wenable <= 1;
end
_ram_c_cond_0_1 <= th_matmul == 26;
__tmp_22_1 <= _tmp_22;
__tmp_23_1 <= _tmp_23;
if((_tmp_19 || !_tmp_17) && (_tmp_20 || !_tmp_18) && _tmp_25) begin
_tmp_27 <= 0;
_tmp_17 <= 0;
_tmp_18 <= 0;
_tmp_25 <= 0;
end
if((_tmp_19 || !_tmp_17) && (_tmp_20 || !_tmp_18) && _tmp_24) begin
_tmp_17 <= 1;
_tmp_18 <= 1;
_tmp_27 <= _tmp_26;
_tmp_26 <= 0;
_tmp_24 <= 0;
_tmp_25 <= 1;
end
if(_myaxi_write_start && (_myaxi_write_op_sel == 1) && (_tmp_28 == 0) && !_tmp_26 && !_tmp_27) begin
ram_c_0_addr <= _myaxi_write_local_addr;
_tmp_28 <= _myaxi_write_size - 1;
_tmp_24 <= 1;
_tmp_26 <= _myaxi_write_size == 1;
end
if((_tmp_19 || !_tmp_17) && (_tmp_20 || !_tmp_18) && (_tmp_28 > 0)) begin
ram_c_0_addr <= ram_c_0_addr + _myaxi_write_local_stride;
_tmp_28 <= _tmp_28 - 1;
_tmp_24 <= 1;
_tmp_26 <= 0;
end
if((_tmp_19 || !_tmp_17) && (_tmp_20 || !_tmp_18) && (_tmp_28 == 1)) begin
_tmp_26 <= 1;
end
if(_myaxi_read_start && (_myaxi_read_op_sel == 3) && (_tmp_35 == 0)) begin
ram_c_0_addr <= _myaxi_read_local_addr - _myaxi_read_local_stride;
_tmp_35 <= _myaxi_read_size;
end
if(_dataflow__variable_ovalid_4 && ((_tmp_35 > 0) && !_tmp_36) && (_tmp_35 > 0)) begin
ram_c_0_addr <= ram_c_0_addr + _myaxi_read_local_stride;
ram_c_0_wdata <= _dataflow__variable_odata_4;
ram_c_0_wenable <= 1;
_tmp_35 <= _tmp_35 - 1;
end
if(_dataflow__variable_ovalid_4 && ((_tmp_35 > 0) && !_tmp_36) && (_tmp_35 == 1)) begin
_tmp_36 <= 1;
end
_ram_c_cond_1_1 <= 1;
if(th_matmul == 50) begin
ram_c_0_addr <= _th_matmul_j_27;
end
_ram_c_cond_2_1 <= th_matmul == 50;
_ram_c_cond_3_1 <= th_matmul == 50;
end
end
assign _dataflow__variable_odata_2 = _tmp_23;
assign _dataflow__variable_ovalid_2 = _tmp_17;
assign _tmp_19 = 1 && _dataflow__variable_oready_2;
always @(posedge CLK) begin
if(RST) begin
_myaxi_read_start <= 0;
_myaxi_write_start <= 0;
_myaxi_ram_a_0_read_start <= 0;
_myaxi_ram_a_0_read_op_sel <= 0;
_myaxi_ram_a_0_read_local_addr <= 0;
_myaxi_ram_a_0_read_global_addr <= 0;
_myaxi_ram_a_0_read_size <= 0;
_myaxi_ram_a_0_read_local_stride <= 0;
_myaxi_read_idle <= 1;
_myaxi_read_op_sel <= 0;
_myaxi_read_local_addr <= 0;
_myaxi_read_global_addr <= 0;
_myaxi_read_size <= 0;
_myaxi_read_local_stride <= 0;
myaxi_araddr <= 0;
myaxi_arlen <= 0;
myaxi_arvalid <= 0;
_tmp_5 <= 0;
_myaxi_cond_0_1 <= 0;
_myaxi_ram_b_0_read_start <= 0;
_myaxi_ram_b_0_read_op_sel <= 0;
_myaxi_ram_b_0_read_local_addr <= 0;
_myaxi_ram_b_0_read_global_addr <= 0;
_myaxi_ram_b_0_read_size <= 0;
_myaxi_ram_b_0_read_local_stride <= 0;
_myaxi_ram_c_0_write_start <= 0;
_myaxi_ram_c_0_write_op_sel <= 0;
_myaxi_ram_c_0_write_local_addr <= 0;
_myaxi_ram_c_0_write_global_addr <= 0;
_myaxi_ram_c_0_write_size <= 0;
_myaxi_ram_c_0_write_local_stride <= 0;
_myaxi_write_idle <= 1;
_myaxi_write_op_sel <= 0;
_myaxi_write_local_addr <= 0;
_myaxi_write_global_addr <= 0;
_myaxi_write_size <= 0;
_myaxi_write_local_stride <= 0;
myaxi_awaddr <= 0;
myaxi_awlen <= 0;
myaxi_awvalid <= 0;
_tmp_29 <= 0;
_myaxi_cond_1_1 <= 0;
myaxi_wdata <= 0;
myaxi_wvalid <= 0;
myaxi_wlast <= 0;
myaxi_wstrb <= 0;
_tmp_30 <= 0;
_myaxi_cond_2_1 <= 0;
_myaxi_ram_c_0_read_start <= 0;
_myaxi_ram_c_0_read_op_sel <= 0;
_myaxi_ram_c_0_read_local_addr <= 0;
_myaxi_ram_c_0_read_global_addr <= 0;
_myaxi_ram_c_0_read_size <= 0;
_myaxi_ram_c_0_read_local_stride <= 0;
end else begin
if(_myaxi_cond_0_1) begin
myaxi_arvalid <= 0;
end
if(_myaxi_cond_1_1) begin
myaxi_awvalid <= 0;
end
if(_myaxi_cond_2_1) begin
myaxi_wvalid <= 0;
myaxi_wlast <= 0;
_tmp_30 <= 0;
end
_myaxi_read_start <= 0;
_myaxi_write_start <= 0;
_myaxi_ram_a_0_read_start <= 0;
if(axim_flag_0) begin
_myaxi_ram_a_0_read_start <= 1;
_myaxi_ram_a_0_read_op_sel <= 1;
_myaxi_ram_a_0_read_local_addr <= 0;
_myaxi_ram_a_0_read_global_addr <= _th_matmul_a_addr_9;
_myaxi_ram_a_0_read_size <= _th_matmul_matrix_size_5;
_myaxi_ram_a_0_read_local_stride <= 1;
end
if(_myaxi_ram_a_0_read_start) begin
_myaxi_read_idle <= 0;
end
if(_myaxi_ram_a_0_read_start) begin
_myaxi_read_start <= 1;
_myaxi_read_op_sel <= _myaxi_ram_a_0_read_op_sel;
_myaxi_read_local_addr <= _myaxi_ram_a_0_read_local_addr;
_myaxi_read_global_addr <= _myaxi_ram_a_0_read_global_addr;
_myaxi_read_size <= _myaxi_ram_a_0_read_size;
_myaxi_read_local_stride <= _myaxi_ram_a_0_read_local_stride;
end
if((_myaxi_read_fsm == 2) && ((myaxi_arready || !myaxi_arvalid) && (_tmp_5 == 0))) begin
myaxi_araddr <= _myaxi_read_cur_global_addr;
myaxi_arlen <= _myaxi_read_cur_size - 1;
myaxi_arvalid <= 1;
_tmp_5 <= _myaxi_read_cur_size;
end
_myaxi_cond_0_1 <= 1;
if(myaxi_arvalid && !myaxi_arready) begin
myaxi_arvalid <= myaxi_arvalid;
end
if(myaxi_rready && myaxi_rvalid && (_tmp_5 > 0)) begin
_tmp_5 <= _tmp_5 - 1;
end
if(axim_flag_6) begin
_myaxi_read_idle <= 1;
end
_myaxi_ram_b_0_read_start <= 0;
if(axim_flag_7) begin
_myaxi_ram_b_0_read_start <= 1;
_myaxi_ram_b_0_read_op_sel <= 2;
_myaxi_ram_b_0_read_local_addr <= 0;
_myaxi_ram_b_0_read_global_addr <= _th_matmul_b_addr_12;
_myaxi_ram_b_0_read_size <= _th_matmul_matrix_size_5;
_myaxi_ram_b_0_read_local_stride <= 1;
end
if(_myaxi_ram_b_0_read_start) begin
_myaxi_read_idle <= 0;
end
if(_myaxi_ram_b_0_read_start) begin
_myaxi_read_start <= 1;
_myaxi_read_op_sel <= _myaxi_ram_b_0_read_op_sel;
_myaxi_read_local_addr <= _myaxi_ram_b_0_read_local_addr;
_myaxi_read_global_addr <= _myaxi_ram_b_0_read_global_addr;
_myaxi_read_size <= _myaxi_ram_b_0_read_size;
_myaxi_read_local_stride <= _myaxi_ram_b_0_read_local_stride;
end
_myaxi_ram_c_0_write_start <= 0;
if(axim_flag_16) begin
_myaxi_ram_c_0_write_start <= 1;
_myaxi_ram_c_0_write_op_sel <= 1;
_myaxi_ram_c_0_write_local_addr <= 0;
_myaxi_ram_c_0_write_global_addr <= _th_matmul_c_addr_10;
_myaxi_ram_c_0_write_size <= _th_matmul_matrix_size_5;
_myaxi_ram_c_0_write_local_stride <= 1;
end
if(_myaxi_ram_c_0_write_start) begin
_myaxi_write_idle <= 0;
end
if(_myaxi_ram_c_0_write_start) begin
_myaxi_write_start <= 1;
_myaxi_write_op_sel <= _myaxi_ram_c_0_write_op_sel;
_myaxi_write_local_addr <= _myaxi_ram_c_0_write_local_addr;
_myaxi_write_global_addr <= _myaxi_ram_c_0_write_global_addr;
_myaxi_write_size <= _myaxi_ram_c_0_write_size;
_myaxi_write_local_stride <= _myaxi_ram_c_0_write_local_stride;
end
if((_myaxi_write_fsm == 2) && ((myaxi_awready || !myaxi_awvalid) && (_tmp_29 == 0))) begin
myaxi_awaddr <= _myaxi_write_cur_global_addr;
myaxi_awlen <= _myaxi_write_cur_size - 1;
myaxi_awvalid <= 1;
_tmp_29 <= _myaxi_write_cur_size;
end
if((_myaxi_write_fsm == 2) && ((myaxi_awready || !myaxi_awvalid) && (_tmp_29 == 0)) && (_myaxi_write_cur_size == 0)) begin
myaxi_awvalid <= 0;
end
_myaxi_cond_1_1 <= 1;
if(myaxi_awvalid && !myaxi_awready) begin
myaxi_awvalid <= myaxi_awvalid;
end
if(_dataflow__variable_ovalid_2 && ((_myaxi_write_fsm == 3) && (_myaxi_write_op_sel == 1) && ((_tmp_29 > 0) && (myaxi_wready || !myaxi_wvalid))) && ((_tmp_29 > 0) && (myaxi_wready || !myaxi_wvalid) && (_tmp_29 > 0))) begin
myaxi_wdata <= _dataflow__variable_odata_2;
myaxi_wvalid <= 1;
myaxi_wlast <= 0;
myaxi_wstrb <= { 4{ 1'd1 } };
_tmp_29 <= _tmp_29 - 1;
end
if(_dataflow__variable_ovalid_2 && ((_myaxi_write_fsm == 3) && (_myaxi_write_op_sel == 1) && ((_tmp_29 > 0) && (myaxi_wready || !myaxi_wvalid))) && ((_tmp_29 > 0) && (myaxi_wready || !myaxi_wvalid) && (_tmp_29 > 0)) && (_tmp_29 == 1)) begin
myaxi_wlast <= 1;
_tmp_30 <= 1;
end
_myaxi_cond_2_1 <= 1;
if(myaxi_wvalid && !myaxi_wready) begin
myaxi_wvalid <= myaxi_wvalid;
myaxi_wlast <= myaxi_wlast;
_tmp_30 <= _tmp_30;
end
if(axim_flag_31) begin
_myaxi_write_idle <= 1;
end
_myaxi_ram_c_0_read_start <= 0;
if(axim_flag_32) begin
_myaxi_ram_c_0_read_start <= 1;
_myaxi_ram_c_0_read_op_sel <= 3;
_myaxi_ram_c_0_read_local_addr <= 0;
_myaxi_ram_c_0_read_global_addr <= _th_matmul_c_addr_25;
_myaxi_ram_c_0_read_size <= _th_matmul_matrix_size_20;
_myaxi_ram_c_0_read_local_stride <= 1;
end
if(_myaxi_ram_c_0_read_start) begin
_myaxi_read_idle <= 0;
end
if(_myaxi_ram_c_0_read_start) begin
_myaxi_read_start <= 1;
_myaxi_read_op_sel <= _myaxi_ram_c_0_read_op_sel;
_myaxi_read_local_addr <= _myaxi_ram_c_0_read_local_addr;
_myaxi_read_global_addr <= _myaxi_ram_c_0_read_global_addr;
_myaxi_read_size <= _myaxi_ram_c_0_read_size;
_myaxi_read_local_stride <= _myaxi_ram_c_0_read_local_stride;
end
end
end
assign _dataflow__variable_odata_0 = _wdata_1;
assign _dataflow__variable_ovalid_0 = _wvalid_2;
assign _dataflow__variable_odata_1 = _wdata_8;
assign _dataflow__variable_ovalid_1 = _wvalid_9;
assign _dataflow__variable_odata_4 = _wdata_33;
assign _dataflow__variable_ovalid_4 = _wvalid_34;
localparam th_matmul_1 = 1;
localparam th_matmul_2 = 2;
localparam th_matmul_3 = 3;
localparam th_matmul_4 = 4;
localparam th_matmul_5 = 5;
localparam th_matmul_6 = 6;
localparam th_matmul_7 = 7;
localparam th_matmul_8 = 8;
localparam th_matmul_9 = 9;
localparam th_matmul_10 = 10;
localparam th_matmul_11 = 11;
localparam th_matmul_12 = 12;
localparam th_matmul_13 = 13;
localparam th_matmul_14 = 14;
localparam th_matmul_15 = 15;
localparam th_matmul_16 = 16;
localparam th_matmul_17 = 17;
localparam th_matmul_18 = 18;
localparam th_matmul_19 = 19;
localparam th_matmul_20 = 20;
localparam th_matmul_21 = 21;
localparam th_matmul_22 = 22;
localparam th_matmul_23 = 23;
localparam th_matmul_24 = 24;
localparam th_matmul_25 = 25;
localparam th_matmul_26 = 26;
localparam th_matmul_27 = 27;
localparam th_matmul_28 = 28;
localparam th_matmul_29 = 29;
localparam th_matmul_30 = 30;
localparam th_matmul_31 = 31;
localparam th_matmul_32 = 32;
localparam th_matmul_33 = 33;
localparam th_matmul_34 = 34;
localparam th_matmul_35 = 35;
localparam th_matmul_36 = 36;
localparam th_matmul_37 = 37;
localparam th_matmul_38 = 38;
localparam th_matmul_39 = 39;
localparam th_matmul_40 = 40;
localparam th_matmul_41 = 41;
localparam th_matmul_42 = 42;
localparam th_matmul_43 = 43;
localparam th_matmul_44 = 44;
localparam th_matmul_45 = 45;
localparam th_matmul_46 = 46;
localparam th_matmul_47 = 47;
localparam th_matmul_48 = 48;
localparam th_matmul_49 = 49;
localparam th_matmul_50 = 50;
localparam th_matmul_51 = 51;
localparam th_matmul_52 = 52;
localparam th_matmul_53 = 53;
localparam th_matmul_54 = 54;
localparam th_matmul_55 = 55;
localparam th_matmul_56 = 56;
localparam th_matmul_57 = 57;
localparam th_matmul_58 = 58;
localparam th_matmul_59 = 59;
localparam th_matmul_60 = 60;
localparam th_matmul_61 = 61;
localparam th_matmul_62 = 62;
localparam th_matmul_63 = 63;
localparam th_matmul_64 = 64;
localparam th_matmul_65 = 65;
localparam th_matmul_66 = 66;
always @(posedge CLK) begin
if(RST) begin
th_matmul <= th_matmul_init;
_d1_th_matmul <= th_matmul_init;
_th_matmul_matrix_size_0 <= 0;
_th_matmul_a_offset_1 <= 0;
_th_matmul_b_offset_2 <= 0;
_th_matmul_c_offset_3 <= 0;
_th_matmul_start_time_4 <= 0;
_th_matmul_matrix_size_5 <= 0;
_th_matmul_a_offset_6 <= 0;
_th_matmul_b_offset_7 <= 0;
_th_matmul_c_offset_8 <= 0;
_th_matmul_a_addr_9 <= 0;
_th_matmul_c_addr_10 <= 0;
_th_matmul_i_11 <= 0;
axim_flag_0 <= 0;
_th_matmul_cond_6_0_1 <= 0;
_th_matmul_b_addr_12 <= 0;
_th_matmul_j_13 <= 0;
axim_flag_7 <= 0;
_th_matmul_cond_13_1_1 <= 0;
_th_matmul_sum_14 <= 0;
_th_matmul_k_15 <= 0;
_tmp_13 <= 0;
_th_matmul_x_16 <= 0;
_tmp_15 <= 0;
_th_matmul_y_17 <= 0;
axim_flag_16 <= 0;
_th_matmul_cond_29_2_1 <= 0;
_th_matmul_end_time_18 <= 0;
_th_matmul_time_19 <= 0;
_th_matmul_matrix_size_20 <= 0;
_th_matmul_a_offset_21 <= 0;
_th_matmul_b_offset_22 <= 0;
_th_matmul_c_offset_23 <= 0;
_th_matmul_all_ok_24 <= 0;
_th_matmul_c_addr_25 <= 0;
_th_matmul_i_26 <= 0;
axim_flag_32 <= 0;
_th_matmul_cond_44_3_1 <= 0;
_th_matmul_j_27 <= 0;
_tmp_38 <= 0;
_th_matmul_v_28 <= 0;
end else begin
_d1_th_matmul <= th_matmul;
case(_d1_th_matmul)
th_matmul_6: begin
if(_th_matmul_cond_6_0_1) begin
axim_flag_0 <= 0;
end
end
th_matmul_13: begin
if(_th_matmul_cond_13_1_1) begin
axim_flag_7 <= 0;
end
end
th_matmul_29: begin
if(_th_matmul_cond_29_2_1) begin
axim_flag_16 <= 0;
end
end
th_matmul_44: begin
if(_th_matmul_cond_44_3_1) begin
axim_flag_32 <= 0;
end
end
endcase
case(th_matmul)
th_matmul_init: begin
_th_matmul_matrix_size_0 <= 16;
_th_matmul_a_offset_1 <= 0;
_th_matmul_b_offset_2 <= 1024;
_th_matmul_c_offset_3 <= 2048;
th_matmul <= th_matmul_1;
end
th_matmul_1: begin
_th_matmul_start_time_4 <= timer;
th_matmul <= th_matmul_2;
end
th_matmul_2: begin
_th_matmul_matrix_size_5 <= _th_matmul_matrix_size_0;
_th_matmul_a_offset_6 <= _th_matmul_a_offset_1;
_th_matmul_b_offset_7 <= _th_matmul_b_offset_2;
_th_matmul_c_offset_8 <= _th_matmul_c_offset_3;
th_matmul <= th_matmul_3;
end
th_matmul_3: begin
_th_matmul_a_addr_9 <= _th_matmul_a_offset_6;
_th_matmul_c_addr_10 <= _th_matmul_c_offset_8;
th_matmul <= th_matmul_4;
end
th_matmul_4: begin
_th_matmul_i_11 <= 0;
th_matmul <= th_matmul_5;
end
th_matmul_5: begin
if(_th_matmul_i_11 < _th_matmul_matrix_size_5) begin
th_matmul <= th_matmul_6;
end else begin
th_matmul <= th_matmul_36;
end
end
th_matmul_6: begin
axim_flag_0 <= 1;
_th_matmul_cond_6_0_1 <= 1;
th_matmul <= th_matmul_7;
end
th_matmul_7: begin
th_matmul <= th_matmul_8;
end
th_matmul_8: begin
th_matmul <= th_matmul_9;
end
th_matmul_9: begin
if(_myaxi_read_idle) begin
th_matmul <= th_matmul_10;
end
end
th_matmul_10: begin
_th_matmul_b_addr_12 <= _th_matmul_b_offset_7;
th_matmul <= th_matmul_11;
end
th_matmul_11: begin
_th_matmul_j_13 <= 0;
th_matmul <= th_matmul_12;
end
th_matmul_12: begin
if(_th_matmul_j_13 < _th_matmul_matrix_size_5) begin
th_matmul <= th_matmul_13;
end else begin
th_matmul <= th_matmul_29;
end
end
th_matmul_13: begin
axim_flag_7 <= 1;
_th_matmul_cond_13_1_1 <= 1;
th_matmul <= th_matmul_14;
end
th_matmul_14: begin
th_matmul <= th_matmul_15;
end
th_matmul_15: begin
th_matmul <= th_matmul_16;
end
th_matmul_16: begin
if(_myaxi_read_idle) begin
th_matmul <= th_matmul_17;
end
end
th_matmul_17: begin
_th_matmul_sum_14 <= 0;
th_matmul <= th_matmul_18;
end
th_matmul_18: begin
_th_matmul_k_15 <= 0;
th_matmul <= th_matmul_19;
end
th_matmul_19: begin
if(_th_matmul_k_15 < _th_matmul_matrix_size_5) begin
th_matmul <= th_matmul_20;
end else begin
th_matmul <= th_matmul_26;
end
end
th_matmul_20: begin
if(_tmp_12) begin
_tmp_13 <= ram_a_0_rdata;
end
if(_tmp_12) begin
th_matmul <= th_matmul_21;
end
end
th_matmul_21: begin
_th_matmul_x_16 <= _tmp_13;
th_matmul <= th_matmul_22;
end
th_matmul_22: begin
if(_tmp_14) begin
_tmp_15 <= ram_b_0_rdata;
end
if(_tmp_14) begin
th_matmul <= th_matmul_23;
end
end
th_matmul_23: begin
_th_matmul_y_17 <= _tmp_15;
th_matmul <= th_matmul_24;
end
th_matmul_24: begin
_th_matmul_sum_14 <= _th_matmul_sum_14 + _th_matmul_x_16 * _th_matmul_y_17;
th_matmul <= th_matmul_25;
end
th_matmul_25: begin
_th_matmul_k_15 <= _th_matmul_k_15 + 1;
th_matmul <= th_matmul_19;
end
th_matmul_26: begin
th_matmul <= th_matmul_27;
end
th_matmul_27: begin
_th_matmul_b_addr_12 <= _th_matmul_b_addr_12 + (_th_matmul_matrix_size_5 << 2);
th_matmul <= th_matmul_28;
end
th_matmul_28: begin
_th_matmul_j_13 <= _th_matmul_j_13 + 1;
th_matmul <= th_matmul_12;
end
th_matmul_29: begin
axim_flag_16 <= 1;
_th_matmul_cond_29_2_1 <= 1;
th_matmul <= th_matmul_30;
end
th_matmul_30: begin
th_matmul <= th_matmul_31;
end
th_matmul_31: begin
th_matmul <= th_matmul_32;
end
th_matmul_32: begin
if(_myaxi_write_idle) begin
th_matmul <= th_matmul_33;
end
end
th_matmul_33: begin
_th_matmul_a_addr_9 <= _th_matmul_a_addr_9 + (_th_matmul_matrix_size_5 << 2);
th_matmul <= th_matmul_34;
end
th_matmul_34: begin
_th_matmul_c_addr_10 <= _th_matmul_c_addr_10 + (_th_matmul_matrix_size_5 << 2);
th_matmul <= th_matmul_35;
end
th_matmul_35: begin
_th_matmul_i_11 <= _th_matmul_i_11 + 1;
th_matmul <= th_matmul_5;
end
th_matmul_36: begin
_th_matmul_end_time_18 <= timer;
th_matmul <= th_matmul_37;
end
th_matmul_37: begin
_th_matmul_time_19 <= _th_matmul_end_time_18 - _th_matmul_start_time_4;
th_matmul <= th_matmul_38;
end
th_matmul_38: begin
$display("Time (cycles): %d", _th_matmul_time_19);
th_matmul <= th_matmul_39;
end
th_matmul_39: begin
_th_matmul_matrix_size_20 <= _th_matmul_matrix_size_0;
_th_matmul_a_offset_21 <= _th_matmul_a_offset_1;
_th_matmul_b_offset_22 <= _th_matmul_b_offset_2;
_th_matmul_c_offset_23 <= _th_matmul_c_offset_3;
th_matmul <= th_matmul_40;
end
th_matmul_40: begin
_th_matmul_all_ok_24 <= 1;
th_matmul <= th_matmul_41;
end
th_matmul_41: begin
_th_matmul_c_addr_25 <= _th_matmul_c_offset_23;
th_matmul <= th_matmul_42;
end
th_matmul_42: begin
_th_matmul_i_26 <= 0;
th_matmul <= th_matmul_43;
end
th_matmul_43: begin
if(_th_matmul_i_26 < _th_matmul_matrix_size_20) begin
th_matmul <= th_matmul_44;
end else begin
th_matmul <= th_matmul_61;
end
end
th_matmul_44: begin
axim_flag_32 <= 1;
_th_matmul_cond_44_3_1 <= 1;
th_matmul <= th_matmul_45;
end
th_matmul_45: begin
th_matmul <= th_matmul_46;
end
th_matmul_46: begin
th_matmul <= th_matmul_47;
end
th_matmul_47: begin
if(_myaxi_read_idle) begin
th_matmul <= th_matmul_48;
end
end
th_matmul_48: begin
_th_matmul_j_27 <= 0;
th_matmul <= th_matmul_49;
end
th_matmul_49: begin
if(_th_matmul_j_27 < _th_matmul_matrix_size_20) begin
th_matmul <= th_matmul_50;
end else begin
th_matmul <= th_matmul_59;
end
end
th_matmul_50: begin
if(_tmp_37) begin
_tmp_38 <= ram_c_0_rdata;
end
if(_tmp_37) begin
th_matmul <= th_matmul_51;
end
end
th_matmul_51: begin
_th_matmul_v_28 <= _tmp_38;
th_matmul <= th_matmul_52;
end
th_matmul_52: begin
if((_th_matmul_i_26 == _th_matmul_j_27) && (_th_matmul_v_28 !== (_th_matmul_i_26 + 1 << 1))) begin
th_matmul <= th_matmul_53;
end else begin
th_matmul <= th_matmul_55;
end
end
th_matmul_53: begin
_th_matmul_all_ok_24 <= 0;
th_matmul <= th_matmul_54;
end
th_matmul_54: begin
$display("NG [%d,%d] = %d", _th_matmul_i_26, _th_matmul_j_27, _th_matmul_v_28);
th_matmul <= th_matmul_55;
end
th_matmul_55: begin
if((_th_matmul_i_26 != _th_matmul_j_27) && (_th_matmul_v_28 !== 0)) begin
th_matmul <= th_matmul_56;
end else begin
th_matmul <= th_matmul_58;
end
end
th_matmul_56: begin
_th_matmul_all_ok_24 <= 0;
th_matmul <= th_matmul_57;
end
th_matmul_57: begin
$display("NG [%d,%d] = %d", _th_matmul_i_26, _th_matmul_j_27, _th_matmul_v_28);
th_matmul <= th_matmul_58;
end
th_matmul_58: begin
_th_matmul_j_27 <= _th_matmul_j_27 + 1;
th_matmul <= th_matmul_49;
end
th_matmul_59: begin
_th_matmul_c_addr_25 <= _th_matmul_c_addr_25 + (_th_matmul_matrix_size_20 << 2);
th_matmul <= th_matmul_60;
end
th_matmul_60: begin
_th_matmul_i_26 <= _th_matmul_i_26 + 1;
th_matmul <= th_matmul_43;
end
th_matmul_61: begin
if(_th_matmul_all_ok_24) begin
th_matmul <= th_matmul_62;
end else begin
th_matmul <= th_matmul_64;
end
end
th_matmul_62: begin
$display("# verify: PASSED");
th_matmul <= th_matmul_63;
end
th_matmul_63: begin
th_matmul <= th_matmul_65;
end
th_matmul_64: begin
$display("# verify: FAILED");
th_matmul <= th_matmul_65;
end
th_matmul_65: begin
$finish;
th_matmul <= th_matmul_66;
end
endcase
end
end
localparam _myaxi_read_fsm_1 = 1;
localparam _myaxi_read_fsm_2 = 2;
localparam _myaxi_read_fsm_3 = 3;
localparam _myaxi_read_fsm_4 = 4;
localparam _myaxi_read_fsm_5 = 5;
always @(posedge CLK) begin
if(RST) begin
_myaxi_read_fsm <= _myaxi_read_fsm_init;
_d1__myaxi_read_fsm <= _myaxi_read_fsm_init;
_myaxi_read_cur_global_addr <= 0;
_myaxi_read_rest_size <= 0;
_myaxi_read_cur_size <= 0;
__myaxi_read_fsm_cond_3_0_1 <= 0;
_wvalid_2 <= 0;
_wdata_1 <= 0;
axim_flag_6 <= 0;
__myaxi_read_fsm_cond_4_1_1 <= 0;
__myaxi_read_fsm_cond_3_2_1 <= 0;
_wvalid_9 <= 0;
_wdata_8 <= 0;
__myaxi_read_fsm_cond_3_3_1 <= 0;
_wvalid_34 <= 0;
_wdata_33 <= 0;
end else begin
_d1__myaxi_read_fsm <= _myaxi_read_fsm;
case(_d1__myaxi_read_fsm)
_myaxi_read_fsm_3: begin
if(__myaxi_read_fsm_cond_3_0_1) begin
_wvalid_2 <= 0;
end
if(__myaxi_read_fsm_cond_3_2_1) begin
_wvalid_9 <= 0;
end
if(__myaxi_read_fsm_cond_3_3_1) begin
_wvalid_34 <= 0;
end
end
_myaxi_read_fsm_4: begin
if(__myaxi_read_fsm_cond_4_1_1) begin
axim_flag_6 <= 0;
end
end
endcase
case(_myaxi_read_fsm)
_myaxi_read_fsm_init: begin
if(_myaxi_read_start) begin
_myaxi_read_cur_global_addr <= (_myaxi_read_global_addr >> 2) << 2;
_myaxi_read_rest_size <= _myaxi_read_size;
end
if(_myaxi_read_start && (_myaxi_read_op_sel == 1)) begin
_myaxi_read_fsm <= _myaxi_read_fsm_1;
end
if(_myaxi_read_start && (_myaxi_read_op_sel == 2)) begin
_myaxi_read_fsm <= _myaxi_read_fsm_1;
end
if(_myaxi_read_start && (_myaxi_read_op_sel == 3)) begin
_myaxi_read_fsm <= _myaxi_read_fsm_1;
end
end
_myaxi_read_fsm_1: begin
if((_myaxi_read_rest_size <= 256) && ((_myaxi_read_cur_global_addr & 4095) + (_myaxi_read_rest_size << 2) >= 4096)) begin
_myaxi_read_cur_size <= 4096 - (_myaxi_read_cur_global_addr & 4095) >> 2;
_myaxi_read_rest_size <= _myaxi_read_rest_size - (4096 - (_myaxi_read_cur_global_addr & 4095) >> 2);
end else if(_myaxi_read_rest_size <= 256) begin
_myaxi_read_cur_size <= _myaxi_read_rest_size;
_myaxi_read_rest_size <= 0;
end else if((_myaxi_read_cur_global_addr & 4095) + 1024 >= 4096) begin
_myaxi_read_cur_size <= 4096 - (_myaxi_read_cur_global_addr & 4095) >> 2;
_myaxi_read_rest_size <= _myaxi_read_rest_size - (4096 - (_myaxi_read_cur_global_addr & 4095) >> 2);
end else begin
_myaxi_read_cur_size <= 256;
_myaxi_read_rest_size <= _myaxi_read_rest_size - 256;
end
_myaxi_read_fsm <= _myaxi_read_fsm_2;
end
_myaxi_read_fsm_2: begin
if(myaxi_arready || !myaxi_arvalid) begin
_myaxi_read_fsm <= _myaxi_read_fsm_3;
end
end
_myaxi_read_fsm_3: begin
__myaxi_read_fsm_cond_3_0_1 <= 1;
if(myaxi_rready && myaxi_rvalid && (_myaxi_read_op_sel == 1)) begin
_wdata_1 <= myaxi_rdata;
_wvalid_2 <= 1;
end
if(myaxi_rready && myaxi_rvalid && myaxi_rlast) begin
_myaxi_read_cur_global_addr <= _myaxi_read_cur_global_addr + (_myaxi_read_cur_size << 2);
end
__myaxi_read_fsm_cond_3_2_1 <= 1;
if(myaxi_rready && myaxi_rvalid && (_myaxi_read_op_sel == 2)) begin
_wdata_8 <= myaxi_rdata;
_wvalid_9 <= 1;
end
__myaxi_read_fsm_cond_3_3_1 <= 1;
if(myaxi_rready && myaxi_rvalid && (_myaxi_read_op_sel == 3)) begin
_wdata_33 <= myaxi_rdata;
_wvalid_34 <= 1;
end
if(myaxi_rready && myaxi_rvalid && myaxi_rlast && (_myaxi_read_rest_size > 0)) begin
_myaxi_read_fsm <= _myaxi_read_fsm_1;
end
if(myaxi_rready && myaxi_rvalid && myaxi_rlast && (_myaxi_read_rest_size == 0)) begin
_myaxi_read_fsm <= _myaxi_read_fsm_4;
end
end
_myaxi_read_fsm_4: begin
axim_flag_6 <= 1;
__myaxi_read_fsm_cond_4_1_1 <= 1;
_myaxi_read_fsm <= _myaxi_read_fsm_5;
end
_myaxi_read_fsm_5: begin
_myaxi_read_fsm <= _myaxi_read_fsm_init;
end
endcase
end
end
localparam _myaxi_write_fsm_1 = 1;
localparam _myaxi_write_fsm_2 = 2;
localparam _myaxi_write_fsm_3 = 3;
localparam _myaxi_write_fsm_4 = 4;
localparam _myaxi_write_fsm_5 = 5;
always @(posedge CLK) begin
if(RST) begin
_myaxi_write_fsm <= _myaxi_write_fsm_init;
_d1__myaxi_write_fsm <= _myaxi_write_fsm_init;
_myaxi_write_cur_global_addr <= 0;
_myaxi_write_rest_size <= 0;
_myaxi_write_cur_size <= 0;
axim_flag_31 <= 0;
__myaxi_write_fsm_cond_4_0_1 <= 0;
end else begin
_d1__myaxi_write_fsm <= _myaxi_write_fsm;
case(_d1__myaxi_write_fsm)
_myaxi_write_fsm_4: begin
if(__myaxi_write_fsm_cond_4_0_1) begin
axim_flag_31 <= 0;
end
end
endcase
case(_myaxi_write_fsm)
_myaxi_write_fsm_init: begin
if(_myaxi_write_start) begin
_myaxi_write_cur_global_addr <= (_myaxi_write_global_addr >> 2) << 2;
_myaxi_write_rest_size <= _myaxi_write_size;
end
if(_myaxi_write_start && (_myaxi_write_op_sel == 1)) begin
_myaxi_write_fsm <= _myaxi_write_fsm_1;
end
end
_myaxi_write_fsm_1: begin
if((_myaxi_write_rest_size <= 256) && ((_myaxi_write_cur_global_addr & 4095) + (_myaxi_write_rest_size << 2) >= 4096)) begin
_myaxi_write_cur_size <= 4096 - (_myaxi_write_cur_global_addr & 4095) >> 2;
_myaxi_write_rest_size <= _myaxi_write_rest_size - (4096 - (_myaxi_write_cur_global_addr & 4095) >> 2);
end else if(_myaxi_write_rest_size <= 256) begin
_myaxi_write_cur_size <= _myaxi_write_rest_size;
_myaxi_write_rest_size <= 0;
end else if((_myaxi_write_cur_global_addr & 4095) + 1024 >= 4096) begin
_myaxi_write_cur_size <= 4096 - (_myaxi_write_cur_global_addr & 4095) >> 2;
_myaxi_write_rest_size <= _myaxi_write_rest_size - (4096 - (_myaxi_write_cur_global_addr & 4095) >> 2);
end else begin
_myaxi_write_cur_size <= 256;
_myaxi_write_rest_size <= _myaxi_write_rest_size - 256;
end
_myaxi_write_fsm <= _myaxi_write_fsm_2;
end
_myaxi_write_fsm_2: begin
if(myaxi_awready || !myaxi_awvalid) begin
_myaxi_write_fsm <= _myaxi_write_fsm_3;
end
end
_myaxi_write_fsm_3: begin
if(_myaxi_write_data_done) begin
_myaxi_write_cur_global_addr <= _myaxi_write_cur_global_addr + (_myaxi_write_cur_size << 2);
end
if(_myaxi_write_data_done && (_myaxi_write_rest_size > 0)) begin
_myaxi_write_fsm <= _myaxi_write_fsm_1;
end
if(_myaxi_write_data_done && (_myaxi_write_rest_size == 0)) begin
_myaxi_write_fsm <= _myaxi_write_fsm_4;
end
end
_myaxi_write_fsm_4: begin
axim_flag_31 <= 1;
__myaxi_write_fsm_cond_4_0_1 <= 1;
_myaxi_write_fsm <= _myaxi_write_fsm_5;
end
_myaxi_write_fsm_5: begin
_myaxi_write_fsm <= _myaxi_write_fsm_init;
end
endcase
end
end
endmodule
module ram_a
(
input CLK,
input [10-1:0] ram_a_0_addr,
output [32-1:0] ram_a_0_rdata,
input [32-1:0] ram_a_0_wdata,
input ram_a_0_wenable
);
reg [10-1:0] ram_a_0_daddr;
reg [32-1:0] mem [0:1024-1];
always @(posedge CLK) begin
if(ram_a_0_wenable) begin
mem[ram_a_0_addr] <= ram_a_0_wdata;
end
ram_a_0_daddr <= ram_a_0_addr;
end
assign ram_a_0_rdata = mem[ram_a_0_daddr];
endmodule
module ram_b
(
input CLK,
input [10-1:0] ram_b_0_addr,
output [32-1:0] ram_b_0_rdata,
input [32-1:0] ram_b_0_wdata,
input ram_b_0_wenable
);
reg [10-1:0] ram_b_0_daddr;
reg [32-1:0] mem [0:1024-1];
always @(posedge CLK) begin
if(ram_b_0_wenable) begin
mem[ram_b_0_addr] <= ram_b_0_wdata;
end
ram_b_0_daddr <= ram_b_0_addr;
end
assign ram_b_0_rdata = mem[ram_b_0_daddr];
endmodule
module ram_c
(
input CLK,
input [10-1:0] ram_c_0_addr,
output [32-1:0] ram_c_0_rdata,
input [32-1:0] ram_c_0_wdata,
input ram_c_0_wenable
);
reg [10-1:0] ram_c_0_daddr;
reg [32-1:0] mem [0:1024-1];
always @(posedge CLK) begin
if(ram_c_0_wenable) begin
mem[ram_c_0_addr] <= ram_c_0_wdata;
end
ram_c_0_daddr <= ram_c_0_addr;
end
assign ram_c_0_rdata = mem[ram_c_0_daddr];
endmodule
"""
expected_cpp = """
#include <iostream>
#include <verilated.h>
#include <verilated_vcd_c.h>
#include "Vout.h"
#define Top Vout
#define MAX_SIM_TIME (0)
#define TIME_STEP (5)
#define TRACE
vluint64_t main_time = 0;
double sc_time_stamp(){
return main_time;
}
int main(int argc, char** argv)
{
Verilated::commandArgs(argc, argv);
Top *top = new Top();
#ifdef TRACE
Verilated::traceEverOn(true);
VerilatedVcdC* tfp = new VerilatedVcdC;
top->trace(tfp, 99);
tfp->open("uut.vcd");
#endif
top->io_CLK = 0;
top->io_RST = 0;
// input initialization
while(!Verilated::gotFinish()){
if(main_time % 5 == 0){
top->io_CLK = !top->io_CLK;
}
if(main_time == 100){
top->io_RST = 1;
}
if(main_time == 100 * 2){
top->io_RST = 0;
}
// update input
top->eval();
#ifdef TRACE
tfp->dump(main_time);
#endif
if(MAX_SIM_TIME > 0 && main_time >= MAX_SIM_TIME){
//std::cout << "# simulation time: " << main_time << std::endl;
break;
}
main_time += TIME_STEP;
}
#ifdef TRACE
tfp->close();
#endif
top->final();
return 0;
}
"""
def test():
veriloggen.reset()
memimg_name = 'memimg_' + os.path.splitext(os.path.basename(__file__))[0] + '.out'
test_module = simulation_verilator.mkTest(memimg_name=memimg_name)
verilog = veriloggen.simulation.to_verilator_code(
test_module, [test_module])
cpp = veriloggen.simulation.to_verilator_cpp(test_module, 'out')
assert(expected_verilog == verilog)
assert(expected_cpp == cpp)
| 30.289764 | 246 | 0.630533 |
f210fbaf7589baf6fe5ca331931320fc4be157e7 | 5,541 | py | Python | tests/Particle/plot_q_gnn.py | maxiaoba/rlk | 3e23473f6bbc59552b6b2bcd97245e024d7ca95d | [
"MIT"
] | 1 | 2021-09-28T21:16:54.000Z | 2021-09-28T21:16:54.000Z | tests/Particle/plot_q_gnn.py | maxiaoba/rlkit | 3e23473f6bbc59552b6b2bcd97245e024d7ca95d | [
"MIT"
] | null | null | null | tests/Particle/plot_q_gnn.py | maxiaoba/rlkit | 3e23473f6bbc59552b6b2bcd97245e024d7ca95d | [
"MIT"
] | null | null | null | import csv
import os.path
import matplotlib
matplotlib.rcParams.update({'font.size': 10})
from matplotlib import pyplot as plt
import numpy as np
import torch
import argparse
from rlkit.torch import pytorch_util as ptu
# Logger Params
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', type=str, default='simple_spread')
parser.add_argument('--obsid', action='store_true', default=False) # add id in observation
parser.add_argument('--boundary', action='store_true', default=False)
parser.add_argument('--num_ag', type=int, default=None)
parser.add_argument('--num_adv', type=int, default=None)
parser.add_argument('--num_l', type=int, default=None)
parser.add_argument('--mpl', type=int, default=25) # max path length
parser.add_argument('--log_dir', type=str, default='MASAC')
parser.add_argument('--epoch', type=int, default=None)
parser.add_argument('--seed', type=str, default=0)
args = parser.parse_args()
pre_dir = './Data/'+args.exp_name\
+('obsid' if args.obsid else '')\
+('bd' if args.boundary else '')\
+(('ag'+str(args.num_ag)) if args.num_ag else '')\
+(('adv'+str(args.num_adv)) if args.num_adv else '')\
+(('l'+str(args.num_l)) if args.num_l else '')\
+'_mpl'+str(args.mpl)
if args.epoch or (args.epoch == 0):
data_path = '{}/{}/seed{}/itr_{}.pkl'.format(pre_dir,args.log_dir,args.seed,args.epoch)
plot_file = pre_dir+'/'+args.log_dir+'/seed'+str(args.seed)+'/q_ep'+str(args.epoch)+'.png'
else:
data_path = '{}/{}/seed{}/params.pkl'.format(pre_dir,args.log_dir,args.seed)
plot_file = pre_dir+'/'+args.log_dir+'/seed'+str(args.seed)+'/q.png'
data = torch.load(data_path,map_location='cpu')
import sys
sys.path.append("./multiagent-particle-envs")
from make_env import make_env
from particle_env_wrapper import ParticleEnv
world_args=dict(
num_agents=args.num_ag,
num_adversaries=args.num_adv,
num_landmarks=args.num_l,
obsid=args.obsid,
boundary=([[-1.,-1.],[1.,1.]] if args.boundary else None)
)
env = ParticleEnv(make_env(args.exp_name,discrete_action_space=False,world_args=world_args))
o_n = env.reset()
num_agent = env.num_agent
if 'trainer/cg1' in data.keys():
cg1 = data['trainer/cg1']
cg2 = data['trainer/cg2']
else:
cg1, cg2 = None, None
cg1_n = data['trainer/cg1_n']
cg2_n = data['trainer/cg2_n']
if 'trainer/qf1' in data.keys():
q1net = data['trainer/qf1']
q2net = data['trainer/qf2']
else:
q1net, q2net = None, None
q1net_n = data['trainer/qf1_n']
q2net_n = data['trainer/qf2_n']
xs = np.linspace(-1,1,100)
ys = np.linspace(-1,1,100)
q1s = dict()
q2s = dict()
o_n = env.reset()
with torch.no_grad():
for i,x in enumerate(xs):
for j,y in enumerate(ys):
actions = torch.tensor([[x,y]]*num_agent)
q_input = torch.cat([torch.tensor(o_n), actions],dim=-1).float()[None,:]
if cg1:
contexts_1 = cg1(q_input)
q1_input = torch.cat([actions[None,:],contexts_1],dim=-1).float()
contexts_2 = cg2(q_input)
q2_input = torch.cat([actions[None,:],contexts_2],dim=-1).float()
if q1net:
q1_n = q1net(q1_input)
q2_n = q2net(q2_input)
else:
q1_n = [q1net_n[agent](q1_input[:,agent,:]) for agent in range(len(q1net_n))]
q1_n = torch.stack(q1_n).transpose(0,1).contiguous()
q2_n = [q2net_n[agent](q2_input[:,agent,:]) for agent in range(len(q2net_n))]
q2_n = torch.stack(q2_n).transpose(0,1).contiguous()
else:
q1_n, q2_n = [], []
for agent in range(num_agent):
contexts_1 = cg1_n[agent](q_input)
q1_input = torch.cat([actions[None,:],contexts_1],dim=-1).float()
q1 = q1net_n[agent](q1_input[:,agent,:])
q1_n.append(q1)
contexts_2 = cg2_n[agent](q_input)
q2_input = torch.cat([actions[None,:],contexts_2],dim=-1).float()
q2 = q2net_n[agent](q2_input[:,agent,:])
q2_n.append(q2)
q1_n = torch.stack(q1_n).transpose(0,1).contiguous()
q2_n = torch.stack(q2_n).transpose(0,1).contiguous()
for agent in range(num_agent):
if not (agent in q1s.keys()):
q1s[agent] = np.zeros((100,100))
q2s[agent] = np.zeros((100,100))
q1s[agent][j,i] = q1_n[0][agent]
q2s[agent][j,i] = q2_n[0][agent]
plt.figure()
for agent in range(num_agent):
plt.subplot(3,num_agent,agent+1)
cs = plt.contourf(xs,ys,q1s[agent])
cbar = plt.colorbar(cs)
plt.gca().set_aspect('equal', 'box')
plt.xlim(-1,1)
plt.ylim(-1,1)
plt.xlabel('a1')
plt.ylabel('a2')
for agent in range(num_agent):
plt.subplot(3,num_agent,num_agent+agent+1)
cs = plt.contourf(xs,ys,q2s[agent])
cbar = plt.colorbar(cs)
plt.gca().set_aspect('equal', 'box')
plt.xlim(-1,1)
plt.ylim(-1,1)
plt.xlabel('a1')
plt.ylabel('a2')
for agent in range(num_agent):
plt.subplot(3,num_agent,2*num_agent+agent+1)
cs = plt.contourf(xs,ys,np.minimum(q1s[agent],q2s[agent]))
cbar = plt.colorbar(cs)
plt.gca().set_aspect('equal', 'box')
plt.xlim(-1,1)
plt.ylim(-1,1)
plt.xlabel('a1')
plt.ylabel('a2')
plt.savefig(plot_file)
plt.close()
| 37.187919 | 97 | 0.600253 |
3a6a3eee0577da06c07e8369470bc6a2a1142ed5 | 20,719 | py | Python | runtime/python/Lib/site-packages/numpy/core/tests/test_overrides.py | hwaipy/InteractionFreeNode | 88642b68430f57b028fd0f276a5709f89279e30d | [
"MIT"
] | 1 | 2022-01-08T12:30:44.000Z | 2022-01-08T12:30:44.000Z | runtime/python/Lib/site-packages/numpy/core/tests/test_overrides.py | hwaipy/InteractionFreeNode | 88642b68430f57b028fd0f276a5709f89279e30d | [
"MIT"
] | null | null | null | runtime/python/Lib/site-packages/numpy/core/tests/test_overrides.py | hwaipy/InteractionFreeNode | 88642b68430f57b028fd0f276a5709f89279e30d | [
"MIT"
] | null | null | null | import inspect
import sys
import os
import tempfile
from io import StringIO
from unittest import mock
import numpy as np
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex)
from numpy.core.overrides import (
_get_implementing_args, array_function_dispatch,
verify_matching_signatures, ARRAY_FUNCTION_ENABLED)
from numpy.compat import pickle
import pytest
requires_array_function = pytest.mark.skipif(
not ARRAY_FUNCTION_ENABLED,
reason="__array_function__ dispatch not enabled.")
def _return_not_implemented(self, *args, **kwargs):
return NotImplemented
# need to define this at the top level to test pickling
@array_function_dispatch(lambda array: (array,))
def dispatched_one_arg(array):
"""Docstring."""
return 'original'
@array_function_dispatch(lambda array1, array2: (array1, array2))
def dispatched_two_arg(array1, array2):
"""Docstring."""
return 'original'
class TestGetImplementingArgs:
def test_ndarray(self):
array = np.array(1)
args = _get_implementing_args([array])
assert_equal(list(args), [array])
args = _get_implementing_args([array, array])
assert_equal(list(args), [array])
args = _get_implementing_args([array, 1])
assert_equal(list(args), [array])
args = _get_implementing_args([1, array])
assert_equal(list(args), [array])
def test_ndarray_subclasses(self):
class OverrideSub(np.ndarray):
__array_function__ = _return_not_implemented
class NoOverrideSub(np.ndarray):
pass
array = np.array(1).view(np.ndarray)
override_sub = np.array(1).view(OverrideSub)
no_override_sub = np.array(1).view(NoOverrideSub)
args = _get_implementing_args([array, override_sub])
assert_equal(list(args), [override_sub, array])
args = _get_implementing_args([array, no_override_sub])
assert_equal(list(args), [no_override_sub, array])
args = _get_implementing_args(
[override_sub, no_override_sub])
assert_equal(list(args), [override_sub, no_override_sub])
def test_ndarray_and_duck_array(self):
class Other:
__array_function__ = _return_not_implemented
array = np.array(1)
other = Other()
args = _get_implementing_args([other, array])
assert_equal(list(args), [other, array])
args = _get_implementing_args([array, other])
assert_equal(list(args), [array, other])
def test_ndarray_subclass_and_duck_array(self):
class OverrideSub(np.ndarray):
__array_function__ = _return_not_implemented
class Other:
__array_function__ = _return_not_implemented
array = np.array(1)
subarray = np.array(1).view(OverrideSub)
other = Other()
assert_equal(_get_implementing_args([array, subarray, other]),
[subarray, array, other])
assert_equal(_get_implementing_args([array, other, subarray]),
[subarray, array, other])
def test_many_duck_arrays(self):
class A:
__array_function__ = _return_not_implemented
class B(A):
__array_function__ = _return_not_implemented
class C(A):
__array_function__ = _return_not_implemented
class D:
__array_function__ = _return_not_implemented
a = A()
b = B()
c = C()
d = D()
assert_equal(_get_implementing_args([1]), [])
assert_equal(_get_implementing_args([a]), [a])
assert_equal(_get_implementing_args([a, 1]), [a])
assert_equal(_get_implementing_args([a, a, a]), [a])
assert_equal(_get_implementing_args([a, d, a]), [a, d])
assert_equal(_get_implementing_args([a, b]), [b, a])
assert_equal(_get_implementing_args([b, a]), [b, a])
assert_equal(_get_implementing_args([a, b, c]), [b, c, a])
assert_equal(_get_implementing_args([a, c, b]), [c, b, a])
def test_too_many_duck_arrays(self):
namespace = dict(__array_function__=_return_not_implemented)
types = [type('A' + str(i), (object,), namespace) for i in range(33)]
relevant_args = [t() for t in types]
actual = _get_implementing_args(relevant_args[:32])
assert_equal(actual, relevant_args[:32])
with assert_raises_regex(TypeError, 'distinct argument types'):
_get_implementing_args(relevant_args)
class TestNDArrayArrayFunction:
@requires_array_function
def test_method(self):
class Other:
__array_function__ = _return_not_implemented
class NoOverrideSub(np.ndarray):
pass
class OverrideSub(np.ndarray):
__array_function__ = _return_not_implemented
array = np.array([1])
other = Other()
no_override_sub = array.view(NoOverrideSub)
override_sub = array.view(OverrideSub)
result = array.__array_function__(func=dispatched_two_arg,
types=(np.ndarray,),
args=(array, 1.), kwargs={})
assert_equal(result, 'original')
result = array.__array_function__(func=dispatched_two_arg,
types=(np.ndarray, Other),
args=(array, other), kwargs={})
assert_(result is NotImplemented)
result = array.__array_function__(func=dispatched_two_arg,
types=(np.ndarray, NoOverrideSub),
args=(array, no_override_sub),
kwargs={})
assert_equal(result, 'original')
result = array.__array_function__(func=dispatched_two_arg,
types=(np.ndarray, OverrideSub),
args=(array, override_sub),
kwargs={})
assert_equal(result, 'original')
with assert_raises_regex(TypeError, 'no implementation found'):
np.concatenate((array, other))
expected = np.concatenate((array, array))
result = np.concatenate((array, no_override_sub))
assert_equal(result, expected.view(NoOverrideSub))
result = np.concatenate((array, override_sub))
assert_equal(result, expected.view(OverrideSub))
def test_no_wrapper(self):
# This shouldn't happen unless a user intentionally calls
# __array_function__ with invalid arguments, but check that we raise
# an appropriate error all the same.
array = np.array(1)
func = lambda x: x
with assert_raises_regex(AttributeError, '_implementation'):
array.__array_function__(func=func, types=(np.ndarray,),
args=(array,), kwargs={})
@requires_array_function
class TestArrayFunctionDispatch:
def test_pickle(self):
for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
roundtripped = pickle.loads(
pickle.dumps(dispatched_one_arg, protocol=proto))
assert_(roundtripped is dispatched_one_arg)
def test_name_and_docstring(self):
assert_equal(dispatched_one_arg.__name__, 'dispatched_one_arg')
if sys.flags.optimize < 2:
assert_equal(dispatched_one_arg.__doc__, 'Docstring.')
def test_interface(self):
class MyArray:
def __array_function__(self, func, types, args, kwargs):
return (self, func, types, args, kwargs)
original = MyArray()
(obj, func, types, args, kwargs) = dispatched_one_arg(original)
assert_(obj is original)
assert_(func is dispatched_one_arg)
assert_equal(set(types), {MyArray})
# assert_equal uses the overloaded np.iscomplexobj() internally
assert_(args == (original,))
assert_equal(kwargs, {})
def test_not_implemented(self):
class MyArray:
def __array_function__(self, func, types, args, kwargs):
return NotImplemented
array = MyArray()
with assert_raises_regex(TypeError, 'no implementation found'):
dispatched_one_arg(array)
@requires_array_function
class TestVerifyMatchingSignatures:
def test_verify_matching_signatures(self):
verify_matching_signatures(lambda x: 0, lambda x: 0)
verify_matching_signatures(lambda x=None: 0, lambda x=None: 0)
verify_matching_signatures(lambda x=1: 0, lambda x=None: 0)
with assert_raises(RuntimeError):
verify_matching_signatures(lambda a: 0, lambda b: 0)
with assert_raises(RuntimeError):
verify_matching_signatures(lambda x: 0, lambda x=None: 0)
with assert_raises(RuntimeError):
verify_matching_signatures(lambda x=None: 0, lambda y=None: 0)
with assert_raises(RuntimeError):
verify_matching_signatures(lambda x=1: 0, lambda y=1: 0)
def test_array_function_dispatch(self):
with assert_raises(RuntimeError):
@array_function_dispatch(lambda x: (x,))
def f(y):
pass
# should not raise
@array_function_dispatch(lambda x: (x,), verify=False)
def f(y):
pass
def _new_duck_type_and_implements():
"""Create a duck array type and implements functions."""
HANDLED_FUNCTIONS = {}
class MyArray:
def __array_function__(self, func, types, args, kwargs):
if func not in HANDLED_FUNCTIONS:
return NotImplemented
if not all(issubclass(t, MyArray) for t in types):
return NotImplemented
return HANDLED_FUNCTIONS[func](*args, **kwargs)
def implements(numpy_function):
"""Register an __array_function__ implementations."""
def decorator(func):
HANDLED_FUNCTIONS[numpy_function] = func
return func
return decorator
return (MyArray, implements)
@requires_array_function
class TestArrayFunctionImplementation:
def test_one_arg(self):
MyArray, implements = _new_duck_type_and_implements()
@implements(dispatched_one_arg)
def _(array):
return 'myarray'
assert_equal(dispatched_one_arg(1), 'original')
assert_equal(dispatched_one_arg(MyArray()), 'myarray')
def test_optional_args(self):
MyArray, implements = _new_duck_type_and_implements()
@array_function_dispatch(lambda array, option=None: (array,))
def func_with_option(array, option='default'):
return option
@implements(func_with_option)
def my_array_func_with_option(array, new_option='myarray'):
return new_option
# we don't need to implement every option on __array_function__
# implementations
assert_equal(func_with_option(1), 'default')
assert_equal(func_with_option(1, option='extra'), 'extra')
assert_equal(func_with_option(MyArray()), 'myarray')
with assert_raises(TypeError):
func_with_option(MyArray(), option='extra')
# but new options on implementations can't be used
result = my_array_func_with_option(MyArray(), new_option='yes')
assert_equal(result, 'yes')
with assert_raises(TypeError):
func_with_option(MyArray(), new_option='no')
def test_not_implemented(self):
MyArray, implements = _new_duck_type_and_implements()
@array_function_dispatch(lambda array: (array,), module='my')
def func(array):
return array
array = np.array(1)
assert_(func(array) is array)
assert_equal(func.__module__, 'my')
with assert_raises_regex(
TypeError, "no implementation found for 'my.func'"):
func(MyArray())
class TestNDArrayMethods:
def test_repr(self):
# gh-12162: should still be defined even if __array_function__ doesn't
# implement np.array_repr()
class MyArray(np.ndarray):
def __array_function__(*args, **kwargs):
return NotImplemented
array = np.array(1).view(MyArray)
assert_equal(repr(array), 'MyArray(1)')
assert_equal(str(array), '1')
class TestNumPyFunctions:
def test_set_module(self):
assert_equal(np.sum.__module__, 'numpy')
assert_equal(np.char.equal.__module__, 'numpy.char')
assert_equal(np.fft.fft.__module__, 'numpy.fft')
assert_equal(np.linalg.solve.__module__, 'numpy.linalg')
def test_inspect_sum(self):
signature = inspect.signature(np.sum)
assert_('axis' in signature.parameters)
@requires_array_function
def test_override_sum(self):
MyArray, implements = _new_duck_type_and_implements()
@implements(np.sum)
def _(array):
return 'yes'
assert_equal(np.sum(MyArray()), 'yes')
@requires_array_function
def test_sum_on_mock_array(self):
# We need a proxy for mocks because __array_function__ is only looked
# up in the class dict
class ArrayProxy:
def __init__(self, value):
self.value = value
def __array_function__(self, *args, **kwargs):
return self.value.__array_function__(*args, **kwargs)
def __array__(self, *args, **kwargs):
return self.value.__array__(*args, **kwargs)
proxy = ArrayProxy(mock.Mock(spec=ArrayProxy))
proxy.value.__array_function__.return_value = 1
result = np.sum(proxy)
assert_equal(result, 1)
proxy.value.__array_function__.assert_called_once_with(
np.sum, (ArrayProxy,), (proxy,), {})
proxy.value.__array__.assert_not_called()
@requires_array_function
def test_sum_forwarding_implementation(self):
class MyArray(np.ndarray):
def sum(self, axis, out):
return 'summed'
def __array_function__(self, func, types, args, kwargs):
return super().__array_function__(func, types, args, kwargs)
# note: the internal implementation of np.sum() calls the .sum() method
array = np.array(1).view(MyArray)
assert_equal(np.sum(array), 'summed')
class TestArrayLike:
def setup(self):
class MyArray():
def __init__(self, function=None):
self.function = function
def __array_function__(self, func, types, args, kwargs):
try:
my_func = getattr(self, func.__name__)
except AttributeError:
return NotImplemented
return my_func(*args, **kwargs)
self.MyArray = MyArray
class MyNoArrayFunctionArray():
def __init__(self, function=None):
self.function = function
self.MyNoArrayFunctionArray = MyNoArrayFunctionArray
def add_method(self, name, arr_class, enable_value_error=False):
def _definition(*args, **kwargs):
# Check that `like=` isn't propagated downstream
assert 'like' not in kwargs
if enable_value_error and 'value_error' in kwargs:
raise ValueError
return arr_class(getattr(arr_class, name))
setattr(arr_class, name, _definition)
def func_args(*args, **kwargs):
return args, kwargs
@requires_array_function
def test_array_like_not_implemented(self):
self.add_method('array', self.MyArray)
ref = self.MyArray.array()
with assert_raises_regex(TypeError, 'no implementation found'):
array_like = np.asarray(1, like=ref)
_array_tests = [
('array', *func_args((1,))),
('asarray', *func_args((1,))),
('asanyarray', *func_args((1,))),
('ascontiguousarray', *func_args((2, 3))),
('asfortranarray', *func_args((2, 3))),
('require', *func_args((np.arange(6).reshape(2, 3),),
requirements=['A', 'F'])),
('empty', *func_args((1,))),
('full', *func_args((1,), 2)),
('ones', *func_args((1,))),
('zeros', *func_args((1,))),
('arange', *func_args(3)),
('frombuffer', *func_args(b'\x00' * 8, dtype=int)),
('fromiter', *func_args(range(3), dtype=int)),
('fromstring', *func_args('1,2', dtype=int, sep=',')),
('loadtxt', *func_args(lambda: StringIO('0 1\n2 3'))),
('genfromtxt', *func_args(lambda: StringIO(u'1,2.1'),
dtype=[('int', 'i8'), ('float', 'f8')],
delimiter=',')),
]
@pytest.mark.parametrize('function, args, kwargs', _array_tests)
@pytest.mark.parametrize('numpy_ref', [True, False])
@requires_array_function
def test_array_like(self, function, args, kwargs, numpy_ref):
self.add_method('array', self.MyArray)
self.add_method(function, self.MyArray)
np_func = getattr(np, function)
my_func = getattr(self.MyArray, function)
if numpy_ref is True:
ref = np.array(1)
else:
ref = self.MyArray.array()
like_args = tuple(a() if callable(a) else a for a in args)
array_like = np_func(*like_args, **kwargs, like=ref)
if numpy_ref is True:
assert type(array_like) is np.ndarray
np_args = tuple(a() if callable(a) else a for a in args)
np_arr = np_func(*np_args, **kwargs)
# Special-case np.empty to ensure values match
if function == "empty":
np_arr.fill(1)
array_like.fill(1)
assert_equal(array_like, np_arr)
else:
assert type(array_like) is self.MyArray
assert array_like.function is my_func
@pytest.mark.parametrize('function, args, kwargs', _array_tests)
@pytest.mark.parametrize('ref', [1, [1], "MyNoArrayFunctionArray"])
@requires_array_function
def test_no_array_function_like(self, function, args, kwargs, ref):
self.add_method('array', self.MyNoArrayFunctionArray)
self.add_method(function, self.MyNoArrayFunctionArray)
np_func = getattr(np, function)
# Instantiate ref if it's the MyNoArrayFunctionArray class
if ref == "MyNoArrayFunctionArray":
ref = self.MyNoArrayFunctionArray.array()
like_args = tuple(a() if callable(a) else a for a in args)
with assert_raises_regex(TypeError,
'The `like` argument must be an array-like that implements'):
np_func(*like_args, **kwargs, like=ref)
@pytest.mark.parametrize('numpy_ref', [True, False])
def test_array_like_fromfile(self, numpy_ref):
self.add_method('array', self.MyArray)
self.add_method("fromfile", self.MyArray)
if numpy_ref is True:
ref = np.array(1)
else:
ref = self.MyArray.array()
data = np.random.random(5)
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, "testfile")
data.tofile(fname)
array_like = np.fromfile(fname, like=ref)
if numpy_ref is True:
assert type(array_like) is np.ndarray
np_res = np.fromfile(fname, like=ref)
assert_equal(np_res, data)
assert_equal(array_like, np_res)
else:
assert type(array_like) is self.MyArray
assert array_like.function is self.MyArray.fromfile
@requires_array_function
def test_exception_handling(self):
self.add_method('array', self.MyArray, enable_value_error=True)
ref = self.MyArray.array()
with assert_raises(TypeError):
# Raises the error about `value_error` being invalid first
np.array(1, value_error=True, like=ref)
| 35.417094 | 80 | 0.602394 |
2e70b04bc759ecf30ec2d91fc8ef954a9354a503 | 617 | py | Python | Others/iroha/iroha2019-day1/e.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | Others/iroha/iroha2019-day1/e.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | Others/iroha/iroha2019-day1/e.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
def main():
n, a, b = map(int, input().split())
if b >= 1:
ds = sorted(list(map(int, input().split())))
min_ds = min(ds)
max_ds = max(ds)
ans = n - b
for i in range(1, len(ds)):
if ds[i] - ds[i - 1] > a:
ans -= 1
if min_ds - 1 >= a:
ans -= 1
if n - max_ds >= a:
ans -= 1
if a == 1:
ans = 0
print(ans)
else:
from math import ceil
print(n - ceil(n / a))
if __name__ == '__main__':
main()
| 19.28125 | 53 | 0.366288 |
f00939c44715cbb46e21a3b0bd4e2b066d1b7f29 | 2,549 | py | Python | extras/pyrepl/console.py | dillionhacker/python222 | 205414c33fba8166167fd8a6a03eda1a68f16316 | [
"Apache-2.0"
] | 1 | 2019-05-27T00:58:46.000Z | 2019-05-27T00:58:46.000Z | extras/pyrepl/console.py | tuankien2601/python222 | 205414c33fba8166167fd8a6a03eda1a68f16316 | [
"Apache-2.0"
] | null | null | null | extras/pyrepl/console.py | tuankien2601/python222 | 205414c33fba8166167fd8a6a03eda1a68f16316 | [
"Apache-2.0"
] | null | null | null | # Copyright 2000-2004 Michael Hudson mwh@python.net
#
# All Rights Reserved
#
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose is hereby granted without fee,
# provided that the above copyright notice appear in all copies and
# that both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THE AUTHOR MICHAEL HUDSON DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
# INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
# RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF
# CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
class Event:
"""An Event. `evt' is 'key' or somesuch."""
def __init__(self, evt, data, raw=''):
self.evt = evt
self.data = data
self.raw = raw
def __repr__(self):
return 'Event(%r, %r)'%(self.evt, self.data)
class Console:
"""Attributes:
screen,
height,
width,
"""
def refresh(self, screen, xy):
pass
def prepare(self):
pass
def restore(self):
pass
def move_cursor(self, x, y):
pass
def set_cursor_vis(self, vis):
pass
def getheightwidth(self):
"""Return (height, width) where height and width are the height
and width of the terminal window in characters."""
pass
def get_event(self, block=1):
"""Return an Event instance. Returns None if |block| is false
and there is no event pending, otherwise waits for the
completion of an event."""
pass
def beep(self):
pass
def clear(self):
"""Wipe the screen"""
pass
def finish(self):
"""Move the cursor to the end of the display and otherwise get
ready for end. XXX could be merged with restore? Hmm."""
pass
def flushoutput(self):
"""Flush all output to the screen (assuming there's some
buffering going on somewhere)."""
pass
def forgetinput(self):
"""Forget all pending, but not yet processed input."""
pass
def getpending(self):
"""Return the characters that have been typed but not yet
processed."""
pass
def wait(self):
"""Wait for an event."""
pass
| 27.117021 | 71 | 0.634759 |
8c977ddfa84897126fd0a57a872a7b5fe3132af4 | 7,505 | py | Python | run.py | encok/Password-locker | 67a1e7409b13cc09f84eaf3c6fb49cf3687484d3 | [
"MIT"
] | null | null | null | run.py | encok/Password-locker | 67a1e7409b13cc09f84eaf3c6fb49cf3687484d3 | [
"MIT"
] | null | null | null | run.py | encok/Password-locker | 67a1e7409b13cc09f84eaf3c6fb49cf3687484d3 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3.8
from user import User,Credentials
def function():
print(" ____ ")
print(" | _ \ ")
print(" | |_) ) ____ ___ ___ ")
print(" | __/ / _ |/ __ / __ ")
print(" | | / (_| |\__ \ \__ \ ")
print(" |_| \_____| ___/ ___/ ")
function()
def create_user(user_name,password):
'''
Function to create a new user
'''
new_user = User(user_name,password)
return new_user
def save_users(user):
'''
Function to save user
'''
user.save_user()
def display_users():
'''
Function that returns all the saved users
'''
return User.display_users()
def login_user(user_name,password):
"""
function that checks whether a user exist and then login the user in.
"""
check_user = Credentials.verify_user(user_name,password)
return check_user
def create_new_credential(account,userName,password):
"""
Function that creates new credentials for a given user account
"""
new_credential = Credentials(account,userName,password)
return new_credential
def save_credentials(credentials):
"""
Function to save Credentials to the credentials list
"""
credentials. save_details()
def display_accounts_details():
"""
Function that returns all the saved credential.
"""
return Credentials.display_credentials()
def delete_credential(credentials):
"""
Function to delete a Credentials from credentials list
"""
credentials.delete_credentials()
def find_credential(account):
"""
Function that finds a Credentials by an account name and returns the Credentials that belong to that account
"""
return Credentials.find_credential(account)
def check_credendtials(account):
"""
Function that check if a Credentials exists with that account name and return true or false
"""
return Credentials.if_credential_exist(account)
def generate_Password():
'''
generates a random password for the user.
'''
auto_password=Credentials.generatePassword()
return auto_password
def copy_password(account):
"""
A funct that copies the password using the pyperclip framework
We import the framework then declare a function that copies the emails.
"""
return Credentials.copy_password(account)
def main():
print("Hello Welcome to your Passlocker.\n Please enter one of the following to continue.\n CA --- Create New Account \n HA --- Have An Account \n")
short_code=input("").lower().strip()
if short_code == "ca":
print("Register")
print('*' *40)
username = input("user_Name: ")
while True:
print(" TP - To type your own pasword:\n GP - To generate random Password")
chosen_pass = input().lower().strip()
if chosen_pass == 'tp':
password = input("Enter Password\n")
break
elif chosen_pass == 'gp':
password = generate_Password()
break
else:
print("Invalid password please try once more")
save_users(create_user(username,password))
print("*"*70)
print(f"Hello {username}, Your account has been created succesfully! Your password is: {password}")
print("*"*70)
elif short_code == 'ha':
print("*"*50)
print("Enter your User name and your Password to log in:")
print('*' * 50)
username = input("User name: ")
password = input("password: ")
login = login_user(username,password)
if login_user == login:
print(f"Hello {username}.Welcome To PassWord Locker Manager")
print('\n')
while True:
print("Use these short codes:\n CC - Create a new credential \n DC - Display Credentials \n FC - Find a credential \n GP - Generate A randomn password \n D - Delete credential \n EX - Exit the application \n")
short_code = input().lower().strip()
if short_code == "cc":
print("Create New Credential")
print("."*20)
print("Account name ....")
account = input().lower()
print("Your Account username")
userName = input()
while True:
print(" TP - To type your own pasword if you already have an account:\n GP - To generate random Password")
password_Choice = input().lower().strip()
if password_Choice == 'tp':
password = input("Enter Your Own Password\n")
break
elif password_Choice == 'gp':
password = generate_Password()
break
else:
print("Invalid password please try once more")
save_credentials(create_new_credential(account,userName,password))
print('\n')
print(f"Account Credential for: {account} - UserName: {userName} - Password:{password} created succesfully")
print('\n')
elif short_code == "dc":
if display_accounts_details():
print("Here's your list of acoounts: ")
print('*' * 30)
print('_'* 30)
for account in display_accounts_details():
print(f" Account:{account.account} \n User Name:{username}\n Password:{password}")
print('_'* 30)
print('*' * 30)
else:
print("You don't have any credentials saved yet...")
elif short_code == "fc":
print("Enter the Account Name you want to search for")
search_name = input().lower()
if find_credential(search_name):
search_credential = find_credential(search_name)
print(f"Account Name : {search_credential.account}")
print('-' * 50)
print(f"User Name: {search_credential.user_name} Password :{search_credential.password}")
print('-' * 50)
else:
print("That Credential does not exist")
print('\n')
elif short_code == "d":
print("Enter the account name of the Credentials you want to delete")
search_name = input().lower()
if find_credential(search_name):
search_credential = find_credential(search_name)
print("_"*50)
search_credential.delete_credentials()
print('\n')
print(f"Your stored credentials for : {search_credential.account} successfully deleted!!!")
print('\n')
else:
print("That Credential you want to delete does not exist in your store yet")
elif short_code == 'gp':
password = generate_Password()
print(f" {password} Has been generated succesfull. You can proceed to use it to your account")
elif short_code == 'ex':
print("Thanks for using passwords store manager.. See you next time!")
break
else:
print("Wrong entry... Check your entry again and let it match those in the menu")
else:
print("Please enter a valid input to continue")
if __name__ == '__main__':
main() | 36.609756 | 218 | 0.571486 |
595425875891a0cee8de74935edb4e92d463e248 | 428 | py | Python | 6 kyu/Simple Fun 258 Is Divisible By 6.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | 6 | 2020-09-03T09:32:25.000Z | 2020-12-07T04:10:01.000Z | 6 kyu/Simple Fun 258 Is Divisible By 6.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | 1 | 2021-12-13T15:30:21.000Z | 2021-12-13T15:30:21.000Z | 6 kyu/Simple Fun 258 Is Divisible By 6.py | mwk0408/codewars_solutions | 9b4f502b5f159e68024d494e19a96a226acad5e5 | [
"MIT"
] | null | null | null | def is_divisible_by_6(s):
if s=="*":
return ["0", "6"]
elif s[-1].isdigit() and int(s[-1])%2:
return []
check=s.index("*")
res=[]
digit_sum=sum(int(i) for i in s[:check]+s[check+1:])
for i in range(10):
if (check==len(s)-1 and i%2==0 and (i+digit_sum)%3==0) or (check!=len(s)-1 and (i+digit_sum)%3==0):
res.append(str(int(s[:check]+str(i)+s[check+1:])))
return res | 35.666667 | 107 | 0.521028 |
d10c234887958033ad358b93f51befff96988cf9 | 3,662 | py | Python | Lab/adventofcode2020/day19.py | hscspring/TheAlgorithms-Python | 5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f | [
"MIT"
] | 10 | 2020-07-06T11:00:58.000Z | 2022-01-29T09:25:24.000Z | Lab/adventofcode2020/day19.py | hscspring/TheAlgorithms-Python | 5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f | [
"MIT"
] | null | null | null | Lab/adventofcode2020/day19.py | hscspring/TheAlgorithms-Python | 5c2faea1d2d25a9a81a4786e053b0cc58ab46c6f | [
"MIT"
] | 3 | 2020-07-13T06:39:23.000Z | 2020-08-15T16:29:48.000Z | import pnlp
import copy
file = "data/day19Test.txt"
file = "data/day19.txt"
# file = "data/day19Test2.txt"
def get_rule_msg(file):
text = pnlp.read_file(file)
tmp_rule, tmp_msg = text.split("\n\n")
rule = tmp_rule.split("\n")
msg = tmp_msg.split("\n")
msg = [m for m in msg if m]
return rule, msg
def build_dict(rule):
dct = {}
base = {}
for line in rule:
key, val = line.split(": ")
if "a" in val:
base[key] = "a"
elif "b" in val:
base[key] = "b"
else:
strs = val.split(" | ")
dct[key] = strs
return dct, base
def get_next(init, dct, base):
res = []
for item in init:
curr_parts = item.split()
for i, nxt_key in enumerate(curr_parts):
if nxt_key in base:
continue
nxt_parts = dct[nxt_key]
for part in nxt_parts:
cp = copy.deepcopy(curr_parts)
cp[i] = part
new = " ".join(cp)
res.append(new)
return res
def satisfy(init, base):
for part in init:
if not all_base(part, base):
return False
return True
def all_base(item, base):
for v in item.split():
if v not in base:
return False
return True
def item2str(item, base):
res = ""
for key in item.split():
val = base[key]
res += val
return res
def get_matched_messages(dct, base, start="0"):
res = set()
init = dct[start]
while not satisfy(init, base):
# 提前将已经完成的存好,不继续参与运算
filted = []
for item in init:
if all_base(item, base):
s = item2str(item, base)
res.add(s)
else:
filted.append(item)
init = get_next(filted, dct, base)
init = list(set(init))
for item in init:
s = item2str(item, base)
res.add(s)
return res
rule, msg = get_rule_msg(file)
dct, base = build_dict(rule)
# matched = get_matched_messages(dct, base, "0")
# res = 0
# for line in msg:
# if line in matched:
# res += 1
# print(res)
################# BETTER SOLUTION #################
"""
8,重复 42
11:重复 42 和 31
raw: 42 42 31 ==> 42*(1+1) + 31*1
var1: 42 n42 n31 ==> 42*(1+n) + 31*n
var2: 42m 42 31 ==> 42*(m+1) + 31*1
var3: 42m 42n 31n ==> 42*(m+n) + 31*n
可以用来解决 part1
"""
def can_combine(prefix, set42, set31):
n = len(list(set42)[0])
part = len(prefix) // n
assert len(prefix)/ n == part
if part < 2:
return prefix in set42
# flag = True
# for i in range(part):
# p = prefix[i*n: (i+1)*n]
# if p not in set42:
# flag = False
# break
# if flag:
# return True
first = prefix[:n]
if first not in set42:
return False
last = prefix[-n:]
if last not in set31:
return False
num42, num31 = 0, 0
idx42, idx31 = [], []
for i in range(part):
p = prefix[i*n: (i+1)*n]
if p in set42:
num42 += 1
idx42.append(i)
elif p in set31:
num31 += 1
idx31.append(i)
else:
return False
# VERTY IMPORTANT
if not min(idx31) > max(idx42):
return False
if num42 - num31 < 1:
return False
else:
return True
m42 = get_matched_messages(dct, base, "42")
m31 = get_matched_messages(dct, base, "31")
min_len = min(len(i) for i in msg)
res = 0
for line in msg:
# if len(line) != min_len:
# continue
if can_combine(line, m42, m31):
res += 1
print(res)
| 20.806818 | 51 | 0.510104 |
061e61638d075af41efd8296c474d2d74449815b | 3,381 | py | Python | episcanpy/tools/_clustering.py | mruffalo/epiScanpy | bcb86347d2b8451c384f97162625c8d5efb27ffc | [
"BSD-3-Clause"
] | 96 | 2019-05-25T17:41:13.000Z | 2022-02-28T10:29:23.000Z | episcanpy/tools/_clustering.py | mruffalo/epiScanpy | bcb86347d2b8451c384f97162625c8d5efb27ffc | [
"BSD-3-Clause"
] | 43 | 2019-07-12T03:12:51.000Z | 2022-03-30T13:07:19.000Z | episcanpy/tools/_clustering.py | mruffalo/epiScanpy | bcb86347d2b8451c384f97162625c8d5efb27ffc | [
"BSD-3-Clause"
] | 28 | 2019-03-28T16:40:52.000Z | 2022-03-16T16:12:40.000Z | import anndata as ad
import pandas as pd
import numpy as np
import scanpy as sc
import os
from sklearn.cluster import KMeans
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.cluster import adjusted_mutual_info_score
from sklearn.metrics.cluster import homogeneity_score
import seaborn as sns
def getNClusters(adata,n_cluster,range_min=0,range_max=3,max_steps=20, method='louvain', key_added=None):
"""
Function will test different settings of louvain to obtain the target number of clusters.
adapted from the function from the Pinello lab. See: https://github.com/pinellolab/scATAC-benchmarking
It can get cluster for both louvain and leiden.
You can specify the obs variable name as key_added.
"""
this_step = 0
this_min = float(range_min)
this_max = float(range_max)
while this_step < max_steps:
print('step ' + str(this_step))
this_resolution = this_min + ((this_max-this_min)/2)
if (method == 'louvain') and (key_added==None):
sc.tl.louvain(adata, resolution=this_resolution)
elif method == 'louvain'and isinstance(key_added, str):
sc.tl.louvain(adata, resolution=this_resolution, key_added=key_added)
elif( method == 'leiden') and (key_added==None):
sc.tl.leiden(adata,resolution=this_resolution)
else:
sc.tl.leiden(adata,resolution=this_resolution, key_added=key_added)
if key_added==None:
this_clusters = adata.obs[method].nunique()
else:
this_clusters = adata.obs[key_added].nunique()
print('got ' + str(this_clusters) + ' at resolution ' + str(this_resolution))
if this_clusters > n_cluster:
this_max = this_resolution
elif this_clusters < n_cluster:
this_min = this_resolution
elif this_clusters == n_cluster:
break
#return(this_resolution, adata)
else:
print('Cannot find the number of clusters')
print('Clustering solution from last iteration is used:' + str(this_clusters) + ' at resolution ' + str(this_resolution))
this_step += 1
def kmeans(adata, num_clusters):
"""
Compute kmeans clustering using X_pca fits.
random_state = 2019
"""
kmeans = KMeans(n_clusters=num_clusters, random_state=2019).fit(adata.obsm['X_pca'])
adata.obs['kmeans'] = pd.Series(kmeans.labels_,index=adata.obs.index).astype('category')
def hc(adata, num_clusters):
"""
Compute hierarchical clustering using X_pca fits.
random_state = 2019
"""
hc = AgglomerativeClustering(n_clusters=num_clusters).fit(adata.obsm['X_pca'])
adata.obs['hc'] = pd.Series(hc.labels_,index=adata.obs.index).astype('category')
#### Metrics for clustering
def ARI(adata, label_1, label_2):
"""
Compute Adjusted Rand Index.
"""
return(adjusted_rand_score(adata.obs[label_1], adata.obs[label_2]))
def AMI(adata, label_1, label_2):
"""
Compute adjusted Mutual Info.
"""
return(adjusted_mutual_info_score(adata.obs[label_1], adata.obs[label_2]))
def homogeneity(adata, label_1, label_2):
"""
Compute homogeneity score.
"""
return(homogeneity_score(adata.obs[label_1], adata.obs[label_2]))
| 34.151515 | 133 | 0.67761 |
f86b7da8f3fbb9efbdcd905d99c8700dd6fd1138 | 10,086 | py | Python | old experiments/UI_cars 2.py | industrial-optimization-group/researchers-night | 68f2fcb8530032e157badda772a795e1f3bb2c4b | [
"MIT"
] | null | null | null | old experiments/UI_cars 2.py | industrial-optimization-group/researchers-night | 68f2fcb8530032e157badda772a795e1f3bb2c4b | [
"MIT"
] | null | null | null | old experiments/UI_cars 2.py | industrial-optimization-group/researchers-night | 68f2fcb8530032e157badda772a795e1f3bb2c4b | [
"MIT"
] | null | null | null | import dash
from dash.exceptions import PreventUpdate
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
import dash_bootstrap_components as dbc
import utils.dash_reusable_components as drc
import dash_table
import plotly.express as ex
import plotly.graph_objects as go
import pandas as pd
import numpy as np
from pygmo import fast_non_dominated_sorting as nds
data = pd.read_csv("./data/car_data_v2_processed.csv", header=0)
details = pd.read_csv("./data/car_details_v2_processed.csv", header=0)
names = details.loc[0]
maxi = details.loc[1].astype(int)
details_on_card = details.loc[2].astype(int)
details_on_card = details.columns[details_on_card == 1]
sort_columns = details.columns[maxi != 0]
sort_data = data[sort_columns].values * maxi[sort_columns].values
front = data.loc[nds(sort_data)[0][0]].reset_index(drop=True)
numeric_cols = [
attr
for attr in data
if data.dtypes[attr] == "int64" or data.dtypes[attr] == "float64"
]
other_cols = [attr for attr in data if data.dtypes[attr] == "object"]
app = dash.Dash(__name__, external_stylesheets=[dbc.themes.FLATLY])
app.layout = html.Div(
children=[
# .container class is fixed, .container.scalable is scalable
dbc.Row([dbc.Col(html.H1(children="Optimization Group"))]),
dbc.Row(
[
dbc.Col(
children=html.Div(
[
# Top card with details(?)
dbc.Card(
[
html.H4(
"Researcher's Night Event",
className="card-title",
),
html.P(
(
"Dummy text Lorem ipsum dolor sit amet,"
" consectetur adipiscing elit. Donec ante odio,"
" ultricies cursus pulvinar nec, pretium vitae"
" quam. Sed eget placerat leo, feugiat"
" efficitur felis. Nullam consequat dui a"
" dictum ultrices. Quisque ultricies convallis"
" tristique. Mauris aliquet orci at sapien"
" fringilla ultricies. Quisque egestas in"
" libero at porta. Praesent eget magna dapibus."
),
className="card-text",
),
]
),
# Attributes drowdown
dbc.Card(
children=[
html.H4(
"Choose attributes to be plotted",
className="card-title",
),
dcc.Dropdown(
id="attributes-dropdown",
options=[
{
"label": f"{attr}: {'non_numeric' if front.dtypes[attr]=='object' else 'numeric'}",
"value": attr,
}
for attr in front.columns
],
clearable=True,
searchable=True,
multi=True,
value=numeric_cols,
),
dbc.Button(
id="clear-brush", children="Clear brushing"
),
],
),
# Selection of which solutions to plot
*[
dbc.Card(
children=[
html.H4(
f"Choose range of {attr}",
className="card-title",
),
dcc.RangeSlider(
id=f"slider-{attr}",
min=0,
max=1,
step=1 / (len(front[attr].unique()) * 10),
marks={
0: f"{1 * front[attr].min()}",
0.2: f"{0.8 * front[attr].min() + 0.2 * front[attr].max()}",
0.4: f"{0.6 * front[attr].min() + 0.4 * front[attr].max()}",
0.6: f"{0.4 * front[attr].min() + 0.6 * front[attr].max()}",
0.8: f"{0.2 * front[attr].min() + 0.8 * front[attr].max()}",
1: f"{front[attr].max()}",
},
value=[0, 1],
allowCross=False,
),
]
)
for attr in numeric_cols
],
*[
dbc.Card(
children=[
html.H4(
f"Choose types of {attr}",
className="card-title",
),
dcc.Checklist(
id=f"checklist-{attr}",
options=[
{"label": val, "value": val}
for val in front[attr].unique()
],
value=front[attr].unique(),
),
]
)
for attr in other_cols
],
dbc.Card(
children=[
dcc.Graph(
id="bar", config={"displayModeBar": False}
)
]
),
],
style={"maxHeight": "810px", "overflow": "scroll"},
),
width={"size": 3, "offset": 1},
className="ml-4 mr-4",
),
dbc.Col(
children=dcc.Graph(id="graph", style={"height": "810px"}),
width={"size": 7, "offset": 1},
),
]
),
dbc.Row([html.Div(id="callback-dump")]),
],
)
@app.callback(
Output("graph", "figure"),
[
Input("attributes-dropdown", "value"),
Input("clear-brush", "n_clicks"),
*[Input(f"slider-{attr}", "value") for attr in numeric_cols],
*[Input(f"checklist-{attr}", "value") for attr in other_cols],
],
)
def create_figure(chosen_attrs, clear_brush, *userchoice):
numeric_choices = userchoice[0 : len(numeric_cols)]
non_numeric_choices = userchoice[len(numeric_choices) :]
data_to_plot = front
for ranges, attr in zip(numeric_choices, numeric_cols):
attrmin = (1 - ranges[0]) * front[attr].min() + ranges[0] * front[attr].max()
attrmax = (1 - ranges[1]) * front[attr].min() + ranges[1] * front[attr].max()
data_to_plot = data_to_plot[data_to_plot[attr] >= attrmin]
data_to_plot = data_to_plot[data_to_plot[attr] <= attrmax]
for (classes, attr) in zip(non_numeric_choices, other_cols):
data_to_plot = data_to_plot[data_to_plot[attr].isin(classes)]
fig = ex.scatter_matrix(
data_to_plot, dimensions=chosen_attrs, hover_data=details_on_card
)
return fig
@app.callback(
Output("bar", "figure"), [Input("graph", "clickData")], [State("bar", "figure")]
)
def bar(selectedData, fig):
if selectedData is None:
raise PreventUpdate
point_id = selectedData["points"][0]["pointIndex"]
point = front.loc[point_id]
y_new = point.values
if fig is not None:
if len(fig["data"]) == 1:
y = np.asarray(fig["data"][0]["y"])
else:
y = np.asarray(fig["data"][0]["y"]) + np.asarray(fig["data"][1]["y"])
else:
y = np.zeros_like(y_new)
y_delta = y_new - y
color = np.sign(y_delta)
color = ["green" if x > 0 else "red" for x in color]
fig = go.Figure(
data=[
go.Bar(x=data.columns, y=y, name="Old choice"),
go.Bar(
x=data.columns, y=y_delta, marker_color=color, name="Delta from old"
),
]
)
fig.update_layout(margin=dict(t=0, l=0, r=0, b=0), barmode="stack")
fig.update_yaxes(range=[0, front.max().max()])
return fig
if __name__ == "__main__":
app.run_server(debug=True)
| 43.474138 | 131 | 0.377553 |
fc84b1c04a3f5e88a7f470b52bc0d429a6403a10 | 4,940 | py | Python | redismutex/mutex.py | abeerupadhyay/py-redismutex | c78c3fd4ce2b820de2e359cb293f73c220a669a5 | [
"MIT"
] | 3 | 2018-05-01T08:02:43.000Z | 2018-06-07T15:48:27.000Z | redismutex/mutex.py | esquarer/py-redismutex | c78c3fd4ce2b820de2e359cb293f73c220a669a5 | [
"MIT"
] | 1 | 2021-06-16T17:52:57.000Z | 2021-06-16T17:52:57.000Z | redismutex/mutex.py | esquarer/py-redismutex | c78c3fd4ce2b820de2e359cb293f73c220a669a5 | [
"MIT"
] | 1 | 2021-05-12T15:46:35.000Z | 2021-05-12T15:46:35.000Z | import time
import uuid
import redis
from .errors import (
BlockTimeExceedError, MutexLockError, MutexUnlockError
)
DEFAULT_BLOCK_TIME = 5
DEFAULT_DELAY = 0.5
DEFAULT_EXPIRY = 7
class RedisMutex(object):
"""Implements mutex using redis
"""
def __init__(self, redis_conn, blocking=True,
block_time=DEFAULT_BLOCK_TIME, delay=DEFAULT_DELAY,
expiry=DEFAULT_EXPIRY):
if not isinstance(redis_conn, redis.StrictRedis):
raise TypeError(
"Connection object must be of type 'redis.StrictRedis', "
"got '{}' instead".format(type(redis_conn))
)
if delay > block_time:
raise ValueError(
"Delay for a mutex should always be less than the block time."
)
if expiry < block_time:
raise ValueError(
"Expiry of a mutex should always be more than the block time."
)
self.redis = redis_conn
self.blocking = blocking
self.expiry = expiry
# block_time and delay are not relevant if the
# mutex is non-blocking, i.e., if blocking=False.
self.block_time = block_time if self.blocking else None
self.delay = delay if self.blocking else None
# Mutex key and mutex value are set to None by default. These
# are automatically set when a lock is acquired and can be
# accessed via self.key and self.value
self.reset()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.release_lock()
return self
@property
def key(self):
return self.__mkey
@property
def value(self):
return self.__mvalue
def generate_unique_id(self):
return uuid.uuid4().__str__()
def reset(self):
self.__mkey = None
self.__mvalue = None
def lock(self):
"""Adds the mutex key to redis with the given value and expiry.
"""
# nx=True ensures that the value must be set only when the
# provided key does not exists in redis.
result = self.redis.set(
self.__mkey, self.__mvalue, nx=True, ex=self.expiry
)
if not result:
raise MutexLockError(
"Unable to acquire lock using key '{}'".format(self.__mkey)
)
return self
def unlock(self):
"""Deletes the mutex key from redis after validating the unique
value for mutex.
"""
stored_value = self.redis.get(self.__mkey)
# The given key does not exists in redis
if not stored_value:
raise MutexUnlockError(
"Unable to unlock. Key '{}' does not exists in redis."
.format(self.__mkey)
)
# The given value is not equal to the stored value. This is
# done to remove the lock in a safe way - A lock can only be
# removed by the process which created it. Read more at
# https://redis.io/topics/distlock#why-failover-based-implementations-are-not-enough
elif not stored_value.decode("utf-8") == self.__mvalue:
raise MutexUnlockError(
"Unable to unlock. Value for key '{}' was reset."
.format(self.__mkey)
)
self.redis.delete(self.__mkey)
self.reset()
return self
def acquire_lock(self, mutex_key):
"""Handle locking of a mutex.
"""
self.__mkey = mutex_key
self.__mvalue = self.generate_unique_id()
if not self.blocking:
try:
return self.lock()
except MutexLockError as e:
self.reset()
raise MutexLockError(
"Unable to acquire lock using key '{}'".format(self.__mkey)
)
start = int(time.time())
elapsed_time = 0
# Poll redis to acquire lock on the given key for the allowed
# blocking time
while elapsed_time < self.block_time:
try:
return self.lock()
except MutexLockError as e:
# Add a delay before next poll
time.sleep(self.delay)
elapsed_time = int(time.time()) - start
# Exceeded the allowed waiting time for the mutex and failed
# to acquire lock in this duration. Hence raise TimeOutError
self.reset()
raise BlockTimeExceedError(
"Exceeded max allowed block time while acquiring lock."
)
def release_lock(self):
"""Handles unlocking of a mutex.
"""
if not self.__mkey or not self.__mvalue:
raise MutexUnlockError(
"Unable to perform operation. Found null values for mutex "
"key and(or) value."
)
return self.unlock()
| 30.493827 | 92 | 0.579757 |
5f81ea5aa21400d309dc90cb90458eecca18246f | 175 | py | Python | typeit/schema/__init__.py | avanov/type | dbf2a94de13b592987695b7346f10cbf53acf3af | [
"MIT"
] | 8 | 2018-06-17T16:01:12.000Z | 2021-11-05T23:34:55.000Z | typeit/schema/__init__.py | avanov/type | dbf2a94de13b592987695b7346f10cbf53acf3af | [
"MIT"
] | 71 | 2018-06-23T15:31:56.000Z | 2021-03-09T16:56:50.000Z | typeit/schema/__init__.py | avanov/type | dbf2a94de13b592987695b7346f10cbf53acf3af | [
"MIT"
] | 1 | 2021-11-05T23:34:57.000Z | 2021-11-05T23:34:57.000Z | from . import meta
from . import primitives
from . import types
from . import nodes
from .errors import Invalid
__all__ = ['meta', 'primitives', 'types', 'nodes', 'Invalid']
| 21.875 | 61 | 0.714286 |
22f7a0ebe6f0f561979d87c5cbd7df71ed794e15 | 4,475 | py | Python | mmdet/models/detectors/base.py | DouCir/mmdetection | 44613202c379d85315ed47ca670fd9853f90c3a5 | [
"Apache-2.0"
] | null | null | null | mmdet/models/detectors/base.py | DouCir/mmdetection | 44613202c379d85315ed47ca670fd9853f90c3a5 | [
"Apache-2.0"
] | null | null | null | mmdet/models/detectors/base.py | DouCir/mmdetection | 44613202c379d85315ed47ca670fd9853f90c3a5 | [
"Apache-2.0"
] | null | null | null | import logging
from abc import ABCMeta, abstractmethod
import mmcv
import numpy as np
import torch.nn as nn
import pycocotools.mask as maskUtils
from mmdet.core import tensor2imgs, get_classes
class BaseDetector(nn.Module):
"""Base class for detectors"""
__metaclass__ = ABCMeta
def __init__(self):
super(BaseDetector, self).__init__()
@property
def with_neck(self):
return hasattr(self, 'neck') and self.neck is not None
@property
def with_upper_neck(self):
return hasattr(self, 'upper_neck') and self.upper_neck is not None
@property
def with_bbox(self):
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self):
return hasattr(self, 'mask_head') and self.mask_head is not None
@abstractmethod
def extract_feat(self, imgs):
pass
def extract_feats(self, imgs):
assert isinstance(imgs, list)
for img in imgs:
yield self.extract_feat(img)
@abstractmethod
def forward_train(self, imgs, img_metas, **kwargs):
pass
@abstractmethod
def simple_test(self, img, img_meta, **kwargs):
pass
@abstractmethod
def aug_test(self, imgs, img_metas, **kwargs):
pass
def init_weights(self, pretrained=None):
if pretrained is not None:
logger = logging.getLogger()
logger.info('load model from: {}'.format(pretrained))
def forward_test(self, imgs, img_metas, **kwargs):
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError('{} must be a list, but got {}'.format(
name, type(var)))
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(
'num of augmentations ({}) != num of image meta ({})'.format(
len(imgs), len(img_metas)))
# TODO: remove the restriction of imgs_per_gpu == 1 when prepared
imgs_per_gpu = imgs[0].size(0)
assert imgs_per_gpu == 1
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], **kwargs)
else:
return self.aug_test(imgs, img_metas, **kwargs)
def forward(self, img, img_meta, return_loss=True, **kwargs):
if return_loss:
return self.forward_train(img, img_meta, **kwargs)
else:
return self.forward_test(img, img_meta, **kwargs)
def show_result(self,
data,
result,
img_norm_cfg,
dataset='coco',
score_thr=0.3):
if isinstance(result, tuple):
bbox_result, segm_result = result
else:
bbox_result, segm_result = result, None
img_tensor = data['img'][0]
img_metas = data['img_meta'][0].data[0]
imgs = tensor2imgs(img_tensor, **img_norm_cfg)
assert len(imgs) == len(img_metas)
if isinstance(dataset, str):
class_names = get_classes(dataset)
elif isinstance(dataset, (list, tuple)) or dataset is None:
class_names = dataset
else:
raise TypeError(
'dataset must be a valid dataset name or a sequence'
' of class names, not {}'.format(type(dataset)))
for img, img_meta in zip(imgs, img_metas):
h, w, _ = img_meta['img_shape']
img_show = img[:h, :w, :]
bboxes = np.vstack(bbox_result)
# draw segmentation masks
if segm_result is not None:
segms = mmcv.concat_list(segm_result)
inds = np.where(bboxes[:, -1] > score_thr)[0]
for i in inds:
color_mask = np.random.randint(
0, 256, (1, 3), dtype=np.uint8)
mask = maskUtils.decode(segms[i]).astype(np.bool)
img_show[mask] = img_show[mask] * 0.5 + color_mask * 0.5
# draw bounding boxes
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
mmcv.imshow_det_bboxes(
img_show,
bboxes,
labels,
class_names=class_names,
score_thr=score_thr)
| 32.194245 | 77 | 0.564022 |
b4daa8f025570de0584c631c629656d3b33b1f36 | 924 | py | Python | Teste_telas/hot.py | gfcastellano/Agenda-Leonel | 543dd46e1836410ec1ad7093ef4a07c2f84678a6 | [
"MIT"
] | 1 | 2020-06-16T03:54:09.000Z | 2020-06-16T03:54:09.000Z | Teste_telas/hot.py | gfcastellano/Agenda_Leonel | 543dd46e1836410ec1ad7093ef4a07c2f84678a6 | [
"MIT"
] | 1 | 2020-06-16T03:50:16.000Z | 2020-06-16T03:50:16.000Z | Teste_telas/hot.py | gfcastellano/Agenda_Leonel | 543dd46e1836410ec1ad7093ef4a07c2f84678a6 | [
"MIT"
] | null | null | null | from kivy.lang import Builder
from kivymd.app import MDApp
KV = '''
#:import KivyLexer kivy.extras.highlight.KivyLexer
#:import HotReloadViewer kivymd.utils.hot_reload_viewer.HotReloadViewer
BoxLayout:
CodeInput:
lexer: KivyLexer()
style_name: "native"
#on_text: app.update_kv_file(self.text)
size_hint_x: .7
HotReloadViewer:
size_hint_x: .3
path: app.path_to_kv_file
errors: True
errors_text_color: 1, 1, 0, 1
errors_background_color: app.theme_cls.bg_dark
'''
class Example(MDApp):
path_to_kv_file = "D:\\Users\\Gabriel\\Documents\\GitHub\\Agenda_Leonel\\Teste_telas\\kv_file.kv"
def build(self):
#self.theme_cls.theme_style = "Dark"
return Builder.load_string(KV)
def update_kv_file(self, text):
with open(self.path_to_kv_file, "w") as kv_file:
kv_file.write(text)
Example().run()
| 23.1 | 101 | 0.669913 |
83dc903f9b88d82f85fef2d37674999a645ab009 | 2,122 | py | Python | Python3Code/KalmanFilter.py | kim66003/ML4QS_group25 | cd7f838e95f1583701892175670d7d0c8da0e1be | [
"MIT"
] | null | null | null | Python3Code/KalmanFilter.py | kim66003/ML4QS_group25 | cd7f838e95f1583701892175670d7d0c8da0e1be | [
"MIT"
] | null | null | null | Python3Code/KalmanFilter.py | kim66003/ML4QS_group25 | cd7f838e95f1583701892175670d7d0c8da0e1be | [
"MIT"
] | null | null | null | import sys
import copy
from pathlib import Path
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from util.VisualizeDataset import VisualizeDataset
from Chapter3.DataTransformation import LowPassFilter
from Chapter3.DataTransformation import PrincipalComponentAnalysis
from Chapter3.ImputationMissingValues import ImputationMissingValues
from Chapter3.KalmanFilters import KalmanFilters
from Load import *
attributes_to_impute = [
"Acceleration x (m/s^2)","Acceleration y (m/s^2)","Acceleration z (m/s^2)",
"Gyroscope x (rad/s)","Gyroscope y (rad/s)","Gyroscope z (rad/s)",
]
save_names = {
"Acceleration x (m/s^2)": 'acc_x',
"Acceleration y (m/s^2)": 'acc_y',
"Acceleration z (m/s^2)": 'acc_z',
"Gyroscope x (rad/s)": 'gyr_x',
"Gyroscope y (rad/s)": 'gyr_y',
"Gyroscope z (rad/s)": 'gyr_z',
}
DataViz = VisualizeDataset(__file__, show=False)
KalFilter = KalmanFilters()
dataset = pd.read_csv(preprocessed_phone_data)
dataset.index = pd.to_datetime(dataset[time_col])
for col in attributes_to_impute:
print('Applying kalman filter for ', col)
dataset = KalFilter.apply_kalman_filter(dataset, col)
DataViz.save_path = save_names[col] + '_phone_imputed_values'
DataViz.plot_imputed_values(dataset, ['original', 'kalman'], col, dataset[col])
DataViz.save_path = save_names[col] + '_phone_all_data'
DataViz.plot_dataset(dataset, [col, col + '_kalman'], ['exact','exact'], ['line', 'line'])
print(dataset.columns)
dataset.to_csv(outlier_phone_data)
dataset = pd.read_csv(preprocessed_watch_data)
dataset.index = pd.to_datetime(dataset[time_col])
for col in attributes_to_impute:
print('Applying kalman filter for ', col)
dataset = KalFilter.apply_kalman_filter(dataset, col)
DataViz.save_path = save_names[col] + '_watch_imputed_values'
DataViz.plot_imputed_values(dataset, ['original', 'kalman'], col, dataset[col])
DataViz.save_path = save_names[col] + '_watch_all_data'
DataViz.plot_dataset(dataset, [col, col + '_kalman'], ['exact','exact'], ['line', 'line'])
print(dataset.columns)
dataset.to_csv(outlier_watch_data)
| 35.366667 | 94 | 0.739397 |
b0975e73f9a822ec28f8c100fa58980dc8e20606 | 1,814 | py | Python | migrations/versions/9458ea5b7af1_.py | Dev-Elie/Flask-Blog | 86ee901d1969a76fe298b9bb31502f160730e6b0 | [
"MIT"
] | 2 | 2021-12-09T12:22:54.000Z | 2022-02-13T07:36:22.000Z | migrations/versions/9458ea5b7af1_.py | Dev-Elie/Teksade-Blog | 86ee901d1969a76fe298b9bb31502f160730e6b0 | [
"MIT"
] | null | null | null | migrations/versions/9458ea5b7af1_.py | Dev-Elie/Teksade-Blog | 86ee901d1969a76fe298b9bb31502f160730e6b0 | [
"MIT"
] | 1 | 2021-11-24T15:17:49.000Z | 2021-11-24T15:17:49.000Z | """empty message
Revision ID: 9458ea5b7af1
Revises: 31e6a281df69
Create Date: 2021-02-22 22:06:04.215526
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '9458ea5b7af1'
down_revision = '31e6a281df69'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('messages',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('email', sa.String(length=64), nullable=False),
sa.Column('message', sa.Text(), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_messages_email'), 'messages', ['email'], unique=False)
op.create_index(op.f('ix_messages_name'), 'messages', ['name'], unique=False)
op.create_table('replies',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('message', sa.String(length=2000), nullable=False),
sa.Column('date_posted', sa.DateTime(), nullable=True),
sa.Column('author', sa.String(length=100), nullable=False),
sa.Column('comment_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['comment_id'], ['comments.id'], ondelete='SET NULL'),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_replies_date_posted'), 'replies', ['date_posted'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_replies_date_posted'), table_name='replies')
op.drop_table('replies')
op.drop_index(op.f('ix_messages_name'), table_name='messages')
op.drop_index(op.f('ix_messages_email'), table_name='messages')
op.drop_table('messages')
# ### end Alembic commands ###
| 35.568627 | 93 | 0.68688 |
c699bd7a173f83500dcb970c429722c199d6b1b6 | 2,441 | py | Python | base16/base16-gruvbox-dark-soft.py | memeplex/base16-prompt-toolkit | 8a52f886005eb1fa005fd65a2c4b1a680a1f1d91 | [
"MIT"
] | 12 | 2017-02-27T02:03:20.000Z | 2021-04-12T08:56:46.000Z | base16/base16-gruvbox-dark-soft.py | memeplex/base16-prompt-toolkit | 8a52f886005eb1fa005fd65a2c4b1a680a1f1d91 | [
"MIT"
] | 6 | 2017-02-27T19:09:13.000Z | 2020-12-18T10:47:30.000Z | base16/base16-gruvbox-dark-soft.py | memeplex/base16-prompt-toolkit | 8a52f886005eb1fa005fd65a2c4b1a680a1f1d91 | [
"MIT"
] | 8 | 2018-02-08T12:49:27.000Z | 2021-12-21T12:58:27.000Z | # -*- coding: utf-8 -*-
# base16-prompt-toolkit (https://github.com/memeplex/base16-prompt-toolkit)
# Base16 Prompt Toolkit template by Carlos Pita (carlosjosepita@gmail.com
# Gruvbox dark, soft scheme by Dawid Kurek (dawikur@gmail.com), morhetz (https://github.com/morhetz/gruvbox)
from prompt_toolkit.terminal.vt100_output import _256_colors
from pygments.style import Style
from pygments.token import (Keyword, Name, Comment, String, Error, Text,
Number, Operator, Literal, Token)
# See http://chriskempson.com/projects/base16/ for a description of the role
# of the different colors in the base16 palette.
base00 = '#32302f'
base01 = '#3c3836'
base02 = '#504945'
base03 = '#665c54'
base04 = '#bdae93'
base05 = '#d5c4a1'
base06 = '#ebdbb2'
base07 = '#fbf1c7'
base08 = '#fb4934'
base09 = '#fe8019'
base0A = '#fabd2f'
base0B = '#b8bb26'
base0C = '#8ec07c'
base0D = '#83a598'
base0E = '#d3869b'
base0F = '#d65d0e'
# See https://github.com/jonathanslenders/python-prompt-toolkit/issues/355
colors = (globals()['base0' + d] for d in '08BADEC5379F1246')
for i, color in enumerate(colors):
r, g, b = int(color[1:3], 16), int(color[3:5], 16), int(color[5:], 16)
_256_colors[r, g, b] = i + 6 if i > 8 else i
# See http://pygments.org/docs/tokens/ for a description of the different
# pygments tokens.
class Base16Style(Style):
background_color = base00
highlight_color = base02
default_style = base05
styles = {
Text: base05,
Error: '%s bold' % base08,
Comment: base03,
Keyword: base0E,
Keyword.Constant: base09,
Keyword.Namespace: base0D,
Name.Builtin: base0D,
Name.Function: base0D,
Name.Class: base0D,
Name.Decorator: base0E,
Name.Exception: base08,
Number: base09,
Operator: base0E,
Literal: base0B,
String: base0B
}
# See https://github.com/jonathanslenders/python-prompt-toolkit/blob/master/prompt_toolkit/styles/defaults.py
# for a description of prompt_toolkit related pseudo-tokens.
overrides = {
Token.Prompt: base0B,
Token.PromptNum: '%s bold' % base0B,
Token.OutPrompt: base08,
Token.OutPromptNum: '%s bold' % base08,
Token.Menu.Completions.Completion: 'bg:%s %s' % (base01, base04),
Token.Menu.Completions.Completion.Current: 'bg:%s %s' % (base04, base01),
Token.MatchingBracket.Other: 'bg:%s %s' % (base03, base00)
}
| 31.294872 | 109 | 0.669807 |
be8929bdc5943b5f3240b9e699568b682de9b933 | 619 | py | Python | Python/Rotate-array-by-D-elements.py | Nikhil-Sharma-1/DS-Algo-Point | b21f598397c9bd4ddf806c0ef62ea8c53d8bda2c | [
"MIT"
] | 1,148 | 2020-09-28T15:06:16.000Z | 2022-03-17T16:30:08.000Z | Python/Rotate-array-by-D-elements.py | Nikhil-Sharma-1/DS-Algo-Point | b21f598397c9bd4ddf806c0ef62ea8c53d8bda2c | [
"MIT"
] | 520 | 2020-09-28T18:34:26.000Z | 2021-10-30T17:06:43.000Z | Python/Rotate-array-by-D-elements.py | Nikhil-Sharma-1/DS-Algo-Point | b21f598397c9bd4ddf806c0ef62ea8c53d8bda2c | [
"MIT"
] | 491 | 2020-09-28T18:40:14.000Z | 2022-03-20T13:41:44.000Z | def rotatethearray(array, d):
array[0:d] = reversed(array[0:d])
array[d: ] = reversed(array[d: ])
array.reverse()
print(array)
print("enter the array of numbers:", end=" ")
array = [int(i) for i in input().split()]
print()
print("enter the value of d:", end=" ")
d = int(input())
rotatethearray(array, d)
#In this algo we first reversed the elements upto index D (excluded)
#then we reversed the elements from Dth index to the end
#finally we reversed the entire array
#doing so, our array gets reversed by D elements clockwise
# Input: 1 2 3 4 5
# D = 3
# Output: 4 5 1 2 3
# T.C: O(n)
# S.C: O(n)
| 24.76 | 68 | 0.659128 |
42565ab34378c7772ae9b4cae068ea584657b225 | 18,105 | py | Python | main.py | peternara/conditional-similarity-networks-fashion | 1a75e3d9eccebd7570f0183ef195dad075fd85c6 | [
"BSD-3-Clause"
] | null | null | null | main.py | peternara/conditional-similarity-networks-fashion | 1a75e3d9eccebd7570f0183ef195dad075fd85c6 | [
"BSD-3-Clause"
] | null | null | null | main.py | peternara/conditional-similarity-networks-fashion | 1a75e3d9eccebd7570f0183ef195dad075fd85c6 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import argparse
import os
import sys
import shutil
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import transforms
from torch.autograd import Variable
import torch.backends.cudnn as cudnn
from triplet_image_loader import TripletImageLoader
from tripletnet import CS_Tripletnet
from visdom import Visdom
import numpy as np
import Resnet_18
from csn import ConditionalSimNet
import tensorboardcolab as tb
from torch.utils.tensorboard import SummaryWriter
from google.colab import drive
drive.mount('/content/gdrive')
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=32, metavar='N',
help='input batch size for training (default: 64) 256 ')
parser.add_argument('--epochs', type=int, default=200, metavar='N',
help='number of epochs to train (default: 200)')
parser.add_argument('--start_epoch', type=int, default=1, metavar='N',
help='number of start epoch (default: 1)')
parser.add_argument('--lr', type=float, default=5e-5, metavar='LR',
help='learning rate (default: 5e-5)')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--log-interval', type=int, default=20, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--margin', type=float, default=0.2, metavar='M',
help='margin for triplet loss (default: 0.2)')
parser.add_argument('--resume', default='', type=str,
help='path to latest checkpoint (default: none)')
parser.add_argument('--name', default='Conditional_Similarity_Network', type=str,
help='name of experiment')
parser.add_argument('--embed_loss', type=float, default=5e-3, metavar='M',
help='parameter for loss for embedding norm')
parser.add_argument('--mask_loss', type=float, default=5e-4, metavar='M',
help='parameter for loss for mask norm')
parser.add_argument('--num_traintriplets', type=int, default=100000, metavar='N',
help='how many unique training triplets (default: 100000)')
parser.add_argument('--dim_embed', type=int, default=64, metavar='N',
help='how many dimensions in embedding (default: 64)')
parser.add_argument('--test', dest='test', action='store_true',
help='To only run inference on test set')
parser.add_argument('--learned', dest='learned', action='store_true',
help='To learn masks from random initialization')
parser.add_argument('--prein', dest='prein', action='store_true',
help='To initialize masks to be disjoint')
parser.add_argument('--visdom', dest='visdom', action='store_true',
help='Use visdom to track and plot')
parser.add_argument('--conditions', nargs='*', type=int,
help='Set of similarity notions')
parser.set_defaults(test=False)
parser.set_defaults(learned=True) # False
parser.set_defaults(prein=False)
parser.set_defaults(visdom=False)
best_acc = 0
step = 0
def main():
global args, best_acc
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
#torch.manual_seed(args.seed)
if args.cuda:
#torch.cuda.manual_seed(args.seed)
device = torch.device("cuda:0")
if args.visdom:
global plotter
plotter = VisdomLinePlotter(env_name=args.name)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
global conditions
if args.conditions is not None:
conditions = args.conditions
else:
conditions = [0,1,2,3]
kwargs = {'num_workers': 4, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
TripletImageLoader('data', 'ut-zap50k-images', 'filenames.json',
conditions, 'train', n_triplets=args.num_traintriplets,
transform=transforms.Compose([
transforms.Resize(112),
transforms.CenterCrop(112),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
TripletImageLoader('data', 'ut-zap50k-images', 'filenames.json',
conditions, 'test', n_triplets=160000,
transform=transforms.Compose([
transforms.Resize(112),
transforms.CenterCrop(112),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
val_loader = torch.utils.data.DataLoader(
TripletImageLoader('data', 'ut-zap50k-images', 'filenames.json',
conditions, 'val', n_triplets=80000,
transform=transforms.Compose([
transforms.Resize(112),
transforms.CenterCrop(112),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
model = Resnet_18.resnet18(pretrained=True, embedding_size=args.dim_embed)
model.to(device)
csn_model = ConditionalSimNet(model, n_conditions=len(conditions),
embedding_size=args.dim_embed, learnedmask=args.learned, prein=args.prein)
global mask_var
mask_var = csn_model.masks.weight
tnet = CS_Tripletnet(csn_model)
if args.cuda:
tnet.cuda()
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print("=> loading checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
tnet.load_state_dict(checkpoint['state_dict'])
print("=> loaded checkpoint '{}' (epoch {})"
.format(args.resume, checkpoint['epoch']))
else:
print("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
criterion = torch.nn.MarginRankingLoss(margin = args.margin)
parameters = filter(lambda p: p.requires_grad, tnet.parameters())
optimizer = optim.Adam(parameters, lr=args.lr)
print('=================================================================')
print('learned : ', args.learned)
print('batch-size : ', args.batch_size)
print('conditions : ', args.conditions)
print('margin : ', args.margin)
print('resume : ', args.resume)
print('train_loader : ', len(train_loader))
print('test_loader : ', len(test_loader))
print('val_loader : ', len(val_loader))
n_parameters = sum([p.data.nelement() for p in tnet.parameters()])
print(' + Number of params: {}'.format(n_parameters))
#tb = tb.TensorBoardColab()
tb = SummaryWriter()
if args.test:
test_acc = test(test_loader, tnet, criterion, 1, tb)
sys.exit()
for epoch in range(args.start_epoch, args.epochs + 1):
# update learning rate
adjust_learning_rate(optimizer, epoch)
# train for one epoch
print('train for one epoch')
train(train_loader, tnet, criterion, optimizer, epoch, tb)
# evaluate on validation set
print('evaluate on validation set')
acc = test(val_loader, tnet, criterion, epoch, tb)
# remember best acc and save checkpoint
is_best = acc > best_acc
best_acc = max(acc, best_acc)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': tnet.state_dict(),
'best_prec1': best_acc,
}, is_best)
tb.close()
def train(train_loader, tnet, criterion, optimizer, epoch, tb):
losses = AverageMeter()
accs = AverageMeter()
emb_norms = AverageMeter()
mask_norms = AverageMeter()
global step
batch_iterator = iter(train_loader)
# switch to train mode
tnet.train()
#for batch_idx, (data1, data2, data3, c) in enumerate(train_loader):
for batch_idx in range(len(train_loader.dataset)):
try:
data = next(batch_iterator)
except StopIteration:
batch_iterator = iter(train_loader)
data = next(batch_iterator)
except Exception as e:
print('Loading data exception:', e)
continue
data1, data2, data3, c = data
if args.cuda:
data1, data2, data3, c = data1.cuda(), data2.cuda(), data3.cuda(), c.cuda()
data1, data2, data3, c = Variable(data1), Variable(data2), Variable(data3), Variable(c)
# torch.Size([128, 3, 112, 112]) torch.Size([128, 3, 112, 112]) torch.Size([128, 3, 112, 112]) torch.Size([128])
# print(data1.shape, data2.shape, data3.shape, c.shape)
# compute output
dista, distb, mask_norm, embed_norm, mask_embed_norm = tnet(data1, data2, data3, c)
# 1 means, dista should be larger than distb
#print('dista : ', dista.size()) # [32] > batch size
#print(dista) # [0.4902, 0.3963, 0.4873, 0.5724, 0.4054, 0.3121, 0.3828, 0.4504, 0.5122,
# 0.5277, 0.4292, 0.4265, 0.3459, 0.2941, 0.5692, 0.5078, 0.3934, 0.4100,
# 0.5169, 0.3513, 0.4341, 0.3800, 0.4838, 0.4658, 0.3866, 0.4130, 0.4975,
# 0.4475, 0.4680, 0.4000, 0.4422, 0.4319]
#print('distb : ', distb.size()) # [32] > batch size
#print(distb) # [0.4969, 0.2613, 0.3864, 0.2541, 0.3750, 0.3438, 0.4856, 0.3948, 0.4113,
# 0.3821, 0.4903, 0.3686, 0.5303, 0.4397, 0.4437, 0.3278, 0.3889, 0.3586,
# 0.3326, 0.3111, 0.4075, 0.4273, 0.4425, 0.3362, 0.2934, 0.3503, 0.4526,
# 0.5191, 0.3576, 0.4157, 0.2716, 0.5081]
# MarginRankingLoss
# dista > distb 이니 모두 1인 케이스다
# 만약 ranking이 적다면 -1로 표시해주는 ,,,
# If :math:`y = 1` then it assumed the first input should be ranked higher,
# (have a larger value) than the second input, and vice-versa for :math:`y = -1`.
# \text{loss}(x, y) = \max(0, -y * (x1 - x2) + \text{margin})
target = torch.FloatTensor(dista.size()).fill_(1)
#print('target : ', target) # tensor([1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.,
# 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.])
#print('\t', target.shape) # [32] > batch size
if args.cuda:
target = target.cuda()
target = Variable(target)
loss_triplet = criterion(dista, distb, target)
loss_embedd = embed_norm / np.sqrt(data1.size(0))
loss_mask = mask_norm / data1.size(0)
loss = loss_triplet + args.embed_loss * loss_embedd + args.mask_loss * loss_mask
# measure accuracy and record loss
acc = accuracy(dista, distb)
losses.update(loss_triplet.data.item(), data1.size(0))
accs.update(acc, data1.size(0))
emb_norms.update(loss_embedd.data.item())
mask_norms.update(loss_mask.data.item())
# compute gradient and do optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{}]\t'
'Loss: {:.4f} ({:.4f}) \t'
'Acc: {:.2f}% ({:.2f}%) \t'
'Emb_Norm: {:.2f} ({:.2f})'.format(
epoch, batch_idx * len(data1), len(train_loader.dataset),
losses.val, losses.avg,
100. * accs.val, 100. * accs.avg, emb_norms.val, emb_norms.avg))
#tb.add_scalar("accs.val", accs.val, step)
tb.add_scalar("loss", losses.avg, step)
tb.add_scalar("accs.avg", accs.avg, step)
step+=1
# log avg values to visdom
if args.visdom:
plotter.plot('acc', 'train', epoch, accs.avg)
plotter.plot('loss', 'train', epoch, losses.avg)
plotter.plot('emb_norms', 'train', epoch, emb_norms.avg)
plotter.plot('mask_norms', 'train', epoch, mask_norms.avg)
if epoch % 10 == 0:
plotter.plot_mask(torch.nn.functional.relu(mask_var).data.cpu().numpy().T, epoch)
def test(test_loader, tnet, criterion, epoch, tb):
losses = AverageMeter()
accs = AverageMeter()
accs_cs = {}
for condition in conditions:
accs_cs[condition] = AverageMeter()
batch_iterator = iter(test_loader)
# switch to evaluation mode
tnet.eval()
#for batch_idx, (data1, data2, data3, c) in enumerate(test_loader):
for batch_idx in range(len(test_loader.dataset)):
try:
data = next(batch_iterator)
except StopIteration:
batch_iterator = iter(test_loader)
data = next(batch_iterator)
except Exception as e:
print('Loading data exception:', e)
continue
data1, data2, data3, c = data
if args.cuda:
data1, data2, data3, c = data1.cuda(), data2.cuda(), data3.cuda(), c.cuda()
data1, data2, data3, c = Variable(data1), Variable(data2), Variable(data3), Variable(c)
c_test = c
# compute output
dista, distb, _, _, _ = tnet(data1, data2, data3, c)
target = torch.FloatTensor(dista.size()).fill_(1)
if args.cuda:
target = target.cuda()
target = Variable(target)
test_loss = criterion(dista, distb, target).data.item()
# measure accuracy and record loss
acc = accuracy(dista, distb)
accs.update(acc, data1.size(0))
for condition in conditions:
accs_cs[condition].update(accuracy_id(dista, distb, c_test, condition), data1.size(0))
losses.update(test_loss, data1.size(0))
print('Test Epoch: {} [{}/{}]\t'
'Loss: {:.4f} ({:.4f}) \t'
'Acc: {:.2f}% ({:.2f}%) '.format(
epoch, batch_idx * len(data1), len(test_loader.dataset),
losses.val, losses.avg,
100. * accs.val, 100. * accs.avg))
print('\nTest set: Average loss: {:.4f}, Accuracy: {:.2f}%\n'.format(losses.avg, 100. * accs.avg))
tb.add_scalar("test_acc", losses.avg, epoch)
if args.visdom:
for condition in conditions:
plotter.plot('accs', 'acc_{}'.format(condition), epoch, accs_cs[condition].avg)
plotter.plot(args.name, args.name, epoch, accs.avg, env='overview')
plotter.plot('acc', 'test', epoch, accs.avg)
plotter.plot('loss', 'test', epoch, losses.avg)
return accs.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
"""Saves checkpoint to disk"""
directory = "/content/gdrive/My Drive/colab_mdoel/%s/"%(args.name)
if not os.path.exists(directory):
os.makedirs(directory)
filename = directory + filename
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, '/content/gdrive/My Drive/colab_mdoel/%s/'%(args.name) + 'model_best.pth.tar')
class VisdomLinePlotter(object):
"""Plots to Visdom"""
def __init__(self, env_name='main'):
self.viz = Visdom()
self.env = env_name
self.plots = {}
def plot(self, var_name, split_name, x, y, env=None):
if env is not None:
print_env = env
else:
print_env = self.env
if var_name not in self.plots:
self.plots[var_name] = self.viz.line(X=np.array([x,x]), Y=np.array([y,y]), env=print_env, opts=dict(
legend=[split_name],
title=var_name,
xlabel='Epochs',
ylabel=var_name
))
else:
self.viz.updateTrace(X=np.array([x]), Y=np.array([y]), env=print_env, win=self.plots[var_name], name=split_name)
def plot_mask(self, masks, epoch):
self.viz.bar(
X=masks,
env=self.env,
opts=dict(
stacked=True,
title=epoch,
)
)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def adjust_learning_rate(optimizer, epoch):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * ((1 - 0.015) ** epoch)
if args.visdom:
plotter.plot('lr', 'learning rate', epoch, lr)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
def accuracy(dista, distb):
margin = 0
pred = (dista - distb - margin).cpu().data
return (pred > 0).sum()*1.0/dista.size()[0]
def accuracy_id(dista, distb, c, c_id):
margin = 0
pred = (dista - distb - margin).cpu().data
return ((pred > 0)*(c.cpu().data == c_id)).sum()*1.0/(c.cpu().data == c_id).sum()
if __name__ == '__main__':
main()
| 39.791209 | 124 | 0.577078 |
71a31c58c298414077bbbce45043d2db7e766d79 | 3,550 | py | Python | oauthenticator/auth0.py | NYU-CI/oauthenticator | ae6df8af00a47c533ae06aeaf8de17b787c480b5 | [
"CC0-1.0"
] | null | null | null | oauthenticator/auth0.py | NYU-CI/oauthenticator | ae6df8af00a47c533ae06aeaf8de17b787c480b5 | [
"CC0-1.0"
] | null | null | null | oauthenticator/auth0.py | NYU-CI/oauthenticator | ae6df8af00a47c533ae06aeaf8de17b787c480b5 | [
"CC0-1.0"
] | null | null | null | """
Custom Authenticator to use Auth0 OAuth with JupyterHub
Derived using the Github and Google OAuthenticator implementations as examples.
The following environment variables may be used for configuration:
AUTH0_SUBDOMAIN - The subdomain for your Auth0 account
OAUTH_CLIENT_ID - Your client id
OAUTH_CLIENT_SECRET - Your client secret
OAUTH_CALLBACK_URL - Your callback handler URL
Additionally, if you are concerned about your secrets being exposed by
an env dump(I know I am!) you can set the client_secret, client_id and
oauth_callback_url directly on the config for Auth0OAuthenticator.
One instance of this could be adding the following to your jupyterhub_config.py :
c.Auth0OAuthenticator.client_id = 'YOUR_CLIENT_ID'
c.Auth0OAuthenticator.client_secret = 'YOUR_CLIENT_SECRET'
c.Auth0OAuthenticator.oauth_callback_url = 'YOUR_CALLBACK_URL'
If you are using the environment variable config, all you should need to
do is define them in the environment then add the following line to
jupyterhub_config.py :
c.JupyterHub.authenticator_class = 'oauthenticator.auth0.Auth0OAuthenticator'
"""
import json
import os
from tornado.auth import OAuth2Mixin
from tornado import gen, web
from tornado.httpclient import HTTPRequest, AsyncHTTPClient
from jupyterhub.auth import LocalAuthenticator
from .oauth2 import OAuthLoginHandler, OAuthenticator
AUTH0_SUBDOMAIN = os.getenv('AUTH0_SUBDOMAIN')
class Auth0Mixin(OAuth2Mixin):
_OAUTH_AUTHORIZE_URL = "https://%s.auth0.com/authorize" % AUTH0_SUBDOMAIN
_OAUTH_ACCESS_TOKEN_URL = "https://%s.auth0.com/oauth/token" % AUTH0_SUBDOMAIN
class Auth0LoginHandler(OAuthLoginHandler, Auth0Mixin):
pass
class Auth0OAuthenticator(OAuthenticator):
login_service = "Auth0"
login_handler = Auth0LoginHandler
@gen.coroutine
def authenticate(self, handler, data=None):
code = handler.get_argument("code", False)
if not code:
raise web.HTTPError(400, "oauth callback made without a token")
# TODO: Configure the curl_httpclient for tornado
http_client = AsyncHTTPClient()
params = {
'grant_type': 'authorization_code',
'client_id': self.client_id,
'client_secret': self.client_secret,
'code':code,
'redirect_uri': self.get_callback_url(handler)
}
url = "https://%s.auth0.com/oauth/token" % AUTH0_SUBDOMAIN
req = HTTPRequest(url,
method="POST",
headers={"Content-Type": "application/json"},
body=json.dumps(params)
)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
access_token = resp_json['access_token']
# Determine who the logged in user is
headers={"Accept": "application/json",
"User-Agent": "JupyterHub",
"Authorization": "Bearer {}".format(access_token)
}
req = HTTPRequest("https://%s.auth0.com/userinfo" % AUTH0_SUBDOMAIN,
method="GET",
headers=headers
)
resp = yield http_client.fetch(req)
resp_json = json.loads(resp.body.decode('utf8', 'replace'))
return resp_json["email"]
class LocalAuth0OAuthenticator(LocalAuthenticator, Auth0OAuthenticator):
"""A version that mixes in local system user creation"""
pass
| 32.87037 | 82 | 0.676338 |
20cb53fede182ff5c2cbfb0a499683c54323faae | 537 | py | Python | source/openwarpgui/openwarp/__init__.py | rhydar/Test | 32dd54af2c3657d0c49177395d5b1c1f7bd8e127 | [
"Apache-2.0"
] | 1 | 2016-07-27T12:32:57.000Z | 2016-07-27T12:32:57.000Z | source/openwarpgui/openwarp/__init__.py | rhydar/Test | 32dd54af2c3657d0c49177395d5b1c1f7bd8e127 | [
"Apache-2.0"
] | null | null | null | source/openwarpgui/openwarp/__init__.py | rhydar/Test | 32dd54af2c3657d0c49177395d5b1c1f7bd8e127 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
This is initialization module of openwarp.
"""
__author__ = "caoweiquan322"
__copyright__ = "Copyright (C) 2014 TopCoder Inc. All rights reserved."
__version__ = "1.0"
import logging
# Create a base logger for the whole module.
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
| 26.85 | 86 | 0.709497 |
247ddfbfdf32f192a5a43a6da9dbf11093c4977d | 1,588 | py | Python | F_Machine_learning/1_Unsupervised-Learning/solutions/ex2_extra.py | oercompbiomed/CBM101 | 20010dcb99fbf218c4789eb5918dcff8ceb94898 | [
"MIT"
] | 7 | 2019-07-03T07:41:55.000Z | 2022-02-06T20:25:37.000Z | F_Machine_learning/1_Unsupervised-Learning/solutions/ex2_extra.py | oercompbiomed/CBM101 | 20010dcb99fbf218c4789eb5918dcff8ceb94898 | [
"MIT"
] | 9 | 2019-03-14T15:15:09.000Z | 2019-08-01T14:18:21.000Z | F_Machine_learning/1_Unsupervised-Learning/solutions/ex2_extra.py | oercompbiomed/CBM101 | 20010dcb99fbf218c4789eb5918dcff8ceb94898 | [
"MIT"
] | 11 | 2019-03-12T10:43:11.000Z | 2021-10-05T12:15:00.000Z |
# one possible solution
# if it yields a bad result, rerun code
class KM():
def __init__(self, k):
self.k = k
def distances(self, X):
"""Makes a distance matrix to the centroids of shape (n_samples x n_centroids)"""
return np.vstack([np.sum((X-self.centroids[i,:])**2, axis=1) for i in range(self.k)]).T
def assign(self, X):
"""Selects the index of the distance matrix which has the smallest entry"""
dist = self.distances(X)
return np.argmin(dist, axis=1)
def update(self, X):
"""Updates centroid to mean of its constituents. If it has no constituents, respawn randomly"""
for j in range(self.k):
new_c = X[self.y==j,:].mean(axis=0)
if np.any(np.isnan(new_c)):
self.centroids[j,:] = np.random.uniform(X.min(0), X.max(0), X.shape[1])
else: self.centroids[j, :] = new_c
def __call__(self, X):
n, d = X.shape
self._converged = False
self.centroids = X[np.random.randint(0,n, self.k), :] # initialize by random selection of samples
self.y = np.zeros(shape=n) # empty initialize
while True: #repeat until convergence
#old_centroids = self.centroids
old_y = self.y
self.y = self.assign(X)
if np.all(self.y == old_y): return self.y
self.update(X)
return self.y
km = KM(5)
y = km(X)
plt.scatter(X[:,0], X[:,1], c=y)
plt.scatter(km.centroids[:,0], km.centroids[:,1], marker='x', s=150, c='red') | 34.521739 | 105 | 0.562972 |
11bb553392246490ee8606bda50366645982ed4f | 109 | py | Python | setup.py | bklebel/gym-sailingroute | f1b9d2a9aebdda3575da085b68ce3bc6fa7ff906 | [
"MIT"
] | null | null | null | setup.py | bklebel/gym-sailingroute | f1b9d2a9aebdda3575da085b68ce3bc6fa7ff906 | [
"MIT"
] | null | null | null | setup.py | bklebel/gym-sailingroute | f1b9d2a9aebdda3575da085b68ce3bc6fa7ff906 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(name='gym_sailingroute',
version='0.0.1'
install_requires=['gym']
)
| 13.625 | 31 | 0.724771 |
503b24969992ec3f09e090804b2e06e624389ee8 | 738 | py | Python | examples/standalone/json_parser_main.py | ThatXliner/lark | 88a64c990d0986d77ec1710ee8025e9b3b341afb | [
"MIT"
] | 1 | 2021-07-22T14:08:10.000Z | 2021-07-22T14:08:10.000Z | examples/standalone/json_parser_main.py | dbl007/lark | 61973a70f5786216e28f882048c5d11755631578 | [
"MIT"
] | 3 | 2020-11-21T22:22:12.000Z | 2020-11-21T22:29:08.000Z | examples/standalone/json_parser_main.py | ThatXliner/lark | 88a64c990d0986d77ec1710ee8025e9b3b341afb | [
"MIT"
] | null | null | null | """
Standalone Parser
===================================
This example demonstrates how to generate and use the standalone parser,
using the JSON example.
See README.md for more details.
"""
import sys
from json_parser import Lark_StandAlone, Transformer, inline_args
class TreeToJson(Transformer):
@inline_args
def string(self, s):
return s[1:-1].replace('\\"', '"')
array = list
pair = tuple
object = dict
number = inline_args(float)
null = lambda self, _: None
true = lambda self, _: True
false = lambda self, _: False
parser = Lark_StandAlone(transformer=TreeToJson())
if __name__ == '__main__':
with open(sys.argv[1]) as f:
print(parser.parse(f.read()))
| 20.5 | 76 | 0.628726 |
d0de41635888d5031b70831935dd7aa9ffee2895 | 2,814 | py | Python | obstools/tests/test_2_a_scripts_noH.py | paudetseis/OBStools | c6c02d8864c25a14f22d1fae17ff5ad911b9ff00 | [
"MIT"
] | 1 | 2019-12-05T04:32:38.000Z | 2019-12-05T04:32:38.000Z | obstools/tests/test_2_a_scripts_noH.py | paudetseis/OBStools | c6c02d8864c25a14f22d1fae17ff5ad911b9ff00 | [
"MIT"
] | 2 | 2019-12-04T02:06:45.000Z | 2019-12-06T22:20:19.000Z | obstools/tests/test_2_a_scripts_noH.py | paudetseis/OBStools | c6c02d8864c25a14f22d1fae17ff5ad911b9ff00 | [
"MIT"
] | 1 | 2020-02-25T16:51:35.000Z | 2020-02-25T16:51:35.000Z | import os
import matplotlib as mpl
if os.environ.get('DISPLAY', '') == '':
mpl.use('Agg')
import stdb
import numpy as np
import shutil
from pathlib import Path
from pkg_resources import resource_filename
from obspy.clients.fdsn import Client
from obstools.atacr import DayNoise, StaNoise, TFNoise
from obstools.atacr import EventStream, Power, Cross, Rotation
from obstools.atacr import utils, plotting
dbfile = resource_filename('obstools',
'examples/meta/M08A.pkl')
curdir = Path.cwd()
datadir = curdir / 'DATA'
avgdir = curdir / 'AVG_STA'
cmpdir = curdir / 'COMPL_STA'
evdir = curdir / 'EVENTS'
specdir = curdir / 'SPECTRA'
tfdir = curdir / 'TF_STA'
# Test with pressure only (no H)
def test_01_data_noH():
from obstools.scripts import atacr_download_data as atacr
args0 = atacr.get_daylong_arguments([
dbfile, '--keys', '7D.M08A', '-O',
'--start', '2012-03-08', '--end', '2012-03-10',
'--sampling-rate', '1.0', '--channels', 'P'])
atacr.main(args=args0)
def test_02_daily_noH():
from obstools.scripts import atacr_daily_spectra as atacr
args0 = atacr.get_dailyspec_arguments([
dbfile, '--keys', '7D.M08A', '-O', '--figQC',
'--figAverage', '--save-fig'])
atacr.main(args=args0)
def test_03_clean_noH():
from obstools.scripts import atacr_clean_spectra as atacr
args0 = atacr.get_cleanspec_arguments([
dbfile, '--keys', '7D.M08A', '-O', '--save-fig',
'--figCross', '--figCoh'])
atacr.main(args=args0)
def test_04_trans_noH():
from obstools.scripts import atacr_transfer_functions as atacr
args0 = atacr.get_transfer_arguments([
dbfile, '--keys', '7D.M08A', '-O', '--save-fig', '--figTF'])
atacr.main(args=args0)
def test_05_event_noH():
from obstools.scripts import atacr_download_event as atacr
args0 = atacr.get_event_arguments([
dbfile, '--keys', '7D.M08A', '-O',
'--start', '2012-03-08', '--end', '2012-03-10',
'--min-mag', '6.3', '--max-mag', '6.7', '--window', '7200.',
'--sampling-rate', '1.0', '--channels', 'P'])
atacr.main(args=args0)
def test_06_correct_noH():
from obstools.scripts import atacr_correct_event as atacr
args0 = atacr.get_correct_arguments([
dbfile, '--keys', '7D.M08A', '-O', '--figRaw',
'--figClean', '--save-fig', '--save'])
atacr.main(args=args0)
def test_07_comply_noH():
from obstools.scripts import comply_calculate as comply
args0 = comply.get_comply_arguments([
dbfile, '--keys', '7D.M08A', '-O', '--save-fig', '--fig'])
comply.main(args=args0)
def test_08_rmtree():
shutil.rmtree(datadir)
shutil.rmtree(avgdir)
shutil.rmtree(evdir)
shutil.rmtree(specdir)
shutil.rmtree(tfdir)
shutil.rmtree(cmpdir)
| 30.258065 | 68 | 0.648188 |
2e14b5fc4812f84f6c2f16e8334f12aba6b1ccf7 | 6,545 | py | Python | forge/models/__init__.py | Pandinosaurus/3d-forge | d631e14a9351911c3e5612c73c1608d97ed547d2 | [
"BSD-3-Clause"
] | 31 | 2015-07-13T15:36:50.000Z | 2022-02-07T21:37:51.000Z | forge/models/__init__.py | Pandinosaurus/3d-forge | d631e14a9351911c3e5612c73c1608d97ed547d2 | [
"BSD-3-Clause"
] | 109 | 2015-04-24T10:03:24.000Z | 2019-04-12T13:34:01.000Z | forge/models/__init__.py | Pandinosaurus/3d-forge | d631e14a9351911c3e5612c73c1608d97ed547d2 | [
"BSD-3-Clause"
] | 16 | 2015-10-03T06:03:22.000Z | 2022-03-31T08:24:37.000Z | # -*- coding: utf-8 -*-
from sqlalchemy.sql import func, and_
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import FunctionElement, text
from geoalchemy2.elements import WKBElement
from shapely.geometry import box, Point
class _interpolate_height_on_plane(FunctionElement):
name = "_interpolate_height_on_plane"
class bgdi_watermask_rasterize(FunctionElement):
name = "bgdi_watermask_rasterize"
class create_simplified_geom_table(FunctionElement):
name = "create_simplified_geom_table"
@compiles(_interpolate_height_on_plane)
def _compile_interpolate_height(element, compiler, **kw):
return "_interpolate_height_on_plane(%s)" % compiler.process(element.clauses)
@compiles(bgdi_watermask_rasterize)
def _compile_watermask(element, compiler, **kw):
return "bgdi_watermask_rasterize(%s)" % compiler.process(element.clauses)
@compiles(create_simplified_geom_table)
def _compile_create_simplified_geom_table(element, compiler, **kw):
return "create_simplified_geom_table(%s)" % compiler.process(element.clauses)
class Vector(object):
@classmethod
def primaryKeyColumn(cls):
return cls.__mapper__.primary_key[0]
@classmethod
def geometryColumn(cls):
return cls.__mapper__.columns['the_geom']
"""
Returns a sqlalchemy.sql.functions.Function clipping function
:param bbox: A list of 4 coordinates [minX, minY, maxX, maxY]
:params srid: Spatial reference system numerical ID
"""
@classmethod
def bboxClippedGeom(cls, bbox, srid=4326):
bboxGeom = shapelyBBox(bbox)
wkbGeometry = WKBElement(buffer(bboxGeom.wkb), srid)
geomColumn = cls.geometryColumn()
return func.ST_Intersection(geomColumn, wkbGeometry)
"""
Returns a slqalchemy.sql.functions.Function (interesects function)
Use it as a filter to determine if a geometry should be returned (True or False)
:params bbox: A list of 4 coordinates [minX, minX, maxX, maxY]
:params fromSrid: Spatial reference system numerical ID of the bbox
:params toSrid: Spatial reference system numerical ID of the table
"""
@classmethod
def bboxIntersects(cls, bbox, fromSrid=4326, toSrid=4326):
bboxGeom = shapelyBBox(bbox)
wkbGeometry = WKBElement(buffer(bboxGeom.wkb), fromSrid)
if fromSrid != toSrid:
wkbGeometry = func.ST_Transform(wkbGeometry, toSrid)
geomColumn = cls.geometryColumn()
return and_(
geomColumn.intersects(wkbGeometry),
func.ST_Intersects(geomColumn, wkbGeometry)
)
"""
Returns a slqalchemy.sql.functions.Function (interesects function)
Use it as a filter to determine if a geometry should be returned (True or False)
using a tolerance (in table unit). This function only works in 2 dimensions.
:params bbox: A list of 4 coordinates [minX, minX, maxX, maxY]
:params fromSrid: Spatial reference system numerical ID of the bbox
:params toSrid: Spatial reference system numerical ID of the table
:params tolerance: Tolerance in table unit
"""
@classmethod
def withinDistance2D(cls, bbox, fromSrid=4326, toSrid=4326, tolerance=0.):
bboxGeom = shapelyBBox(bbox)
wkbGeometry = WKBElement(buffer(bboxGeom.wkb), fromSrid)
if fromSrid != toSrid:
wkbGeometry = func.ST_Transform(wkbGeometry, toSrid)
geomColumn = cls.geometryColumn()
return func.ST_DWithin(geomColumn, wkbGeometry, tolerance)
"""
Returns a slqalchemy.sql.functions.Function (interesects function)
Use it as a point filter to determine if a geometry should be returned (True or False)
:params point: A list of dim 3 representing one point [X, Y, Z]
:params geomColumn: A sqlAlchemy Column representing a postgis geometry (Optional)
:params srid: Spatial reference system numerical ID
"""
@classmethod
def pointIntersects(cls, point, geomColumn=None, srid=4326):
pointGeom = Point(point)
wkbGeometry = WKBElement(buffer(pointGeom.wkb), srid)
geomColumn = cls.geometryColumn() if geomColumn is None else geomColumn
return func.ST_Intersects(geomColumn, wkbGeometry)
"""
Returns a slqalchemy.sql.functions.Function
Use it as a point filter to determine if a geometry should be returned (True or False)
:params point: A list of dim 3 representing one point [X, Y, Z]
:params geomColumn: A sqlAlchemy Column representing a postgis geometry
:params srid: Spatial reference system numerical ID
"""
@classmethod
def interpolateHeightOnPlane(cls, point, geomColumn=None, srid=4326):
pointGeom = Point(point)
wkbGeometry = WKBElement(buffer(pointGeom.wkb), srid)
geomColumn = cls.geometryColumn() if geomColumn is None else geomColumn
return func.ST_AsEWKB(_interpolate_height_on_plane(geomColumn, wkbGeometry))
"""
Return a sqlalchemy.sql.functions.Function
Use it to create watermasks using a bounding box and a tile width and height in px
:params bbox: A list of 4 coordinates [minX, minX, maxX, maxY]
:params width: The width of the image in px
:params height: The height of the image in px
:params srid: Spatial reference system numerical ID
"""
@classmethod
def watermaskRasterize(cls, bbox, width=256, height=256, srid=4326):
geomColumn = cls.geometryColumn()
bboxGeom = shapelyBBox(bbox)
wkbGeometry = WKBElement(buffer(bboxGeom.wkb), srid)
# ST_DumpValues(Raster, Band Number, True -> returns None
# and False -> returns numerical vals)
return func.ST_DumpValues(
bgdi_watermask_rasterize(
wkbGeometry, width, height,
'.'.join((cls.__table_args__['schema'], cls.__tablename__)),
geomColumn.name
), 1, False
)
"""
Returns a shapely.geometry.polygon.Polygon
:param bbox: A list of 4 cooridinates [minX, minY, maxX, maxY]
"""
def shapelyBBox(bbox):
return box(*bbox)
"""
Returns a sqlalchemy.sql.expression.text
:params schemaname: the schema name
:params tablename: the table name
:params srid: Spatial reference system numerical ID
"""
def tableExtentLiteral(schemaname, tablename, srid):
return text("SELECT ST_XMin(r), ST_YMin(r), "
"ST_XMax(r), ST_YMax(r) "
"FROM (SELECT ST_Collect(ST_Transform(the_geom, %d)) AS r "
"FROM %s.%s) AS foo" % (srid, schemaname, tablename)
)
| 38.052326 | 90 | 0.703896 |
f45424b8fecf9812273d3e0f26fae9b86de5b210 | 716 | py | Python | html_decorators.py | EstebanMongui/challenge-python-04 | db027e4eaa47bc66afa0e8aeabbfc25609bb98f8 | [
"MIT"
] | null | null | null | html_decorators.py | EstebanMongui/challenge-python-04 | db027e4eaa47bc66afa0e8aeabbfc25609bb98f8 | [
"MIT"
] | null | null | null | html_decorators.py | EstebanMongui/challenge-python-04 | db027e4eaa47bc66afa0e8aeabbfc25609bb98f8 | [
"MIT"
] | null | null | null | def div(func):
def wrapper(*args):
return f'<div>{func(*args)}</div>'
return wrapper
def article(func):
def wrapper(*args):
return f'<article>{func(*args)}</article>'
return wrapper
def p(func):
def wrapper(*args):
return f'<p>{func(*args)}</p>'
return wrapper
# Here you must apply the decorators, uncomment this later
# @div
# @article
# @p
def saludo(nombre):
return f'¡Hola {nombre}, ¿Cómo estás?'
def run():
print(saludo('Jorge'))
if __name__ == '__main__':
run()
# We want to have three different outputs 👇🏼
# <div>¡Hola Jorge, ¿Cómo estás?'</div>
# <article>¡Hola Jorge, ¿Cómo estás?'</article>
# <p>¡Hola Jorge, ¿Cómo estás?'</p>
| 15.911111 | 58 | 0.601955 |
acd4ae28c8df2770697a9b19a1a3fa6e25f1262c | 4,270 | py | Python | test/test_files/pylops/pylops/signalprocessing/ConvolveND.py | SoftwareUnderstanding/inspect4py | 9c4d7252535082ad938b26baf281d93f3a27285e | [
"BSD-3-Clause"
] | 2 | 2022-02-15T20:30:57.000Z | 2022-03-17T00:50:37.000Z | test/test_files/pylops/pylops/signalprocessing/ConvolveND.py | SoftwareUnderstanding/code_inspector | a820b5a7bb18f5df9c3e79346108d8280b20c39a | [
"BSD-3-Clause"
] | 101 | 2021-06-09T14:19:59.000Z | 2022-01-24T13:24:39.000Z | test/test_files/pylops/pylops/signalprocessing/ConvolveND.py | SoftwareUnderstanding/inspect4py | 9c4d7252535082ad938b26baf281d93f3a27285e | [
"BSD-3-Clause"
] | 1 | 2021-09-22T06:59:32.000Z | 2021-09-22T06:59:32.000Z | import numpy as np
from pylops import LinearOperator
from pylops.utils.backend import get_array_module, get_convolve, \
get_correlate, to_cupy_conditional
class ConvolveND(LinearOperator):
r"""ND convolution operator.
Apply n-dimensional convolution with a compact filter to model
(and data) along a set of directions ``dirs`` of a n-dimensional
array.
Parameters
----------
N : :obj:`int`
Number of samples in model
h : :obj:`numpy.ndarray`
nd compact filter to be convolved to input signal
dims : :obj:`list`
Number of samples for each dimension
offset : :obj:`tuple`, optional
Indices of the center of the compact filter
dirs : :obj:`tuple`, optional
Directions along which convolution is applied
(set to ``None`` for filter of same dimension as input vector)
method : :obj:`str`, optional
Method used to calculate the convolution (``direct`` or ``fft``).
dtype : :obj:`str`, optional
Type of elements in input array.
Attributes
----------
shape : :obj:`tuple`
Operator shape
explicit : :obj:`bool`
Operator contains a matrix that can be solved
explicitly (``True``) or not (``False``)
Notes
-----
The ConvolveND operator applies n-dimensional convolution
between the input signal :math:`d(x_1, x_2, ..., x_N)` and a compact
filter kernel :math:`h(x_1, x_2, ..., x_N)` in forward model. This is a
straighforward extension to multiple dimensions of
:obj:`pylops.signalprocessing.Convolve2D` operator.
"""
def __init__(self, N, h, dims, offset=None, dirs=None,
method='fft', dtype='float64'):
ncp = get_array_module(h)
self.h = h
self.nh = np.array(self.h.shape)
self.dirs = np.arange(len(dims)) if dirs is None else np.array(dirs)
# padding
if offset is None:
offset = np.zeros(self.h.ndim, dtype=np.int)
else:
offset = np.array(offset, dtype=np.int)
self.offset = 2 * (self.nh // 2 - offset)
pad = [(0, 0) for _ in range(self.h.ndim)]
dopad = False
for inh, nh in enumerate(self.nh):
if nh % 2 == 0:
self.offset[inh] -= 1
if self.offset[inh] != 0:
pad[inh] = [self.offset[inh] if self.offset[inh] > 0 else 0,
-self.offset[inh] if self.offset[inh] < 0 else 0]
dopad = True
if dopad:
self.h = ncp.pad(self.h, pad, mode='constant')
self.nh = self.h.shape
# find out which directions are used for convolution and define offsets
if len(dims) != len(self.nh):
dimsh = np.ones(len(dims), dtype=np.int)
for idir, dir in enumerate(self.dirs):
dimsh[dir] = self.nh[idir]
self.h = self.h.reshape(dimsh)
if np.prod(dims) != N:
raise ValueError('product of dims must equal N!')
else:
self.dims = np.array(dims)
self.reshape = True
# convolve and correate functions
self.convolve = get_convolve(h)
self.correlate = get_correlate(h)
self.method = method
self.shape = (np.prod(self.dims), np.prod(self.dims))
self.dtype = np.dtype(dtype)
self.explicit = False
def _matvec(self, x):
# correct type of h if different from x and choose methods accordingly
if type(self.h) != type(x):
self.h = to_cupy_conditional(x, self.h)
self.convolve = get_convolve(self.h)
self.correlate = get_correlate(self.h)
x = np.reshape(x, self.dims)
y = self.convolve(x, self.h, mode='same', method=self.method)
y = y.ravel()
return y
def _rmatvec(self, x):
# correct type of h if different from x and choose methods accordingly
if type(self.h) != type(x):
self.h = to_cupy_conditional(x, self.h)
self.convolve = get_convolve(self.h)
self.correlate = get_correlate(self.h)
x = np.reshape(x, self.dims)
y = self.correlate(x, self.h, mode='same', method=self.method)
y = y.ravel()
return y
| 36.186441 | 79 | 0.585012 |
aa486b4e806bfbccbbd3cde4ef76538eff1713ac | 10,811 | py | Python | diyzhl.py | dmaziuk/diy-zhl | 79bc42ab184d65972baccba4274e0f06f3ec34d9 | [
"Unlicense"
] | 6 | 2019-12-11T00:39:39.000Z | 2021-04-17T05:32:31.000Z | diyzhl.py | dmaziuk/diy-zhl | 79bc42ab184d65972baccba4274e0f06f3ec34d9 | [
"Unlicense"
] | null | null | null | diyzhl.py | dmaziuk/diy-zhl | 79bc42ab184d65972baccba4274e0f06f3ec34d9 | [
"Unlicense"
] | 2 | 2019-12-11T17:05:58.000Z | 2021-06-01T18:27:27.000Z | #!/usr/bin/python -u
#
# (K) Copy Rites Reversed: reuse what you like (but give credit)
#
# Credits:
#
# Mark Powell's "Deco for Divers"
# Erik Baker's papers: "Decolessons", "Understanding M-values", and "Clearing up the confusion about Deep Stops" in particular
# Buhlmann's "Decompression - Decompression Sickness", English edition
# Several open-source implementations, most notably Subsurface software (and people, Robert in particular)
# Plenty of other on-line sources, e.g. Stuart Morrison's "DIY Decompresion"
#
# The goal here is "by the book" implementation to use for learning this stuff.
#
import sys
import math
# ZH-L12 from "Decompression"
#
ZHL12N = {
1 : { "t" : 2.65, "a" : 2.2, "b" : 0.82 },
2 : { "t" : 7.94, "a" : 1.5, "b" : 0.82 },
3 : { "t" : 12.2, "a" : 1.08, "b" : 0.825 },
4 : { "t" : 18.5, "a" : 0.9, "b" : 0.835 },
5 : { "t" : 26.5, "a" : 0.75, "b" : 0.845 },
6 : { "t" : 37.0, "a" : 0.58, "b" : 0.86 },
7 : { "t" : 53.0, "a" : 0.47, "b" : 0.87 },
8 : { "t" : 79.0, "a" : 0.45, "b" : 0.89 },
9 : { "t" : 114.0, "a" : 0.45, "b" : 0.89 },
10 : { "t" : 146.0, "a" : 0.455, "b" : 0.934 },
11 : { "t" : 185.0, "a" : 0.455, "b" : 0.934 },
12 : { "t" : 238.0, "a" : 0.38, "b" : 0.944 },
13 : { "t" : 304.0, "a" : 0.255, "b" : 0.962 },
14 : { "t" : 397.0, "a" : 0.255, "b" : 0.962 },
15 : { "t" : 503.0, "a" : 0.255, "b" : 0.962 },
16 : { "t" : 635.0, "a" : 0.255, "b" : 0.962 }
}
ZHL12He = {
1 : { "t" : 1.0, "a" : 2.2, "b" : 0.82 },
2 : { "t" : 3.0, "a" : 1.5, "b" : 0.82 },
3 : { "t" : 4.6, "a" : 1.08, "b" : 0.825 },
4 : { "t" : 7.0, "a" : 0.9, "b" : 0.835 },
5 : { "t" : 10.0, "a" : 0.75, "b" : 0.845 },
6 : { "t" : 14.0, "a" : 0.58, "b" : 0.86 },
7 : { "t" : 20.0, "a" : 0.47, "b" : 0.87 },
8 : { "t" : 30.0, "a" : 0.45, "b" : 0.89 },
9 : { "t" : 43.0, "a" : 0.45, "b" : 0.89 },
10 : { "t" : 55.0, "a" : 0.515, "b" : 0.926 },
11 : { "t" : 70.0, "a" : 0.515, "b" : 0.926 },
12 : { "t" : 90.0, "a" : 0.515, "b" : 0.926 },
13 : { "t" : 115.0, "a" : 0.515, "b" : 0.926 },
14 : { "t" : 150.0, "a" : 0.515, "b" : 0.926 },
15 : { "t" : 190.0, "a" : 0.515, "b" : 0.926 },
16 : { "t" : 240.0, "a" : 0.515, "b" : 0.926 },
}
# ZH_L16: several sources incl. a photo of a page from Tauchmedizin @
# http://www.nigelhewitt.co.uk/stuff/aab.jpg
# It appears nobody has Helium numbers for "-A" and "-C", nor for the 4-minute TC
# 5-minute TC is keyed as 1.1.
#
ZHL16N = {
1 : { "t" : 4.0, "b" : 0.505, "a" : { "A" : 1.2599, "B" : 1.2599, "C" : 1.2599 } },
1.1 : { "t" : 5.0, "b" : 0.5578, "a" : { "A" : 1.1696, "B" : 1.1696, "C" : 1.1696 } },
2 : { "t" : 8.0, "b" : 0.6514, "a" : { "A" : 1.0, "B" : 1.0, "C" : 1.0 } },
3 : { "t" : 12.5, "b" : 0.7222, "a" : { "A" : 0.8618, "B" : 0.8618, "C" : 0.8618 } },
4 : { "t" : 18.5, "b" : 0.7825, "a" : { "A" : 0.7562, "B" : 0.7562, "C" : 0.7562 } },
5 : { "t" : 27.0, "b" : 0.8126, "a" : { "A" : 0.6667, "B" : 0.6667, "C" : 0.62 } },
6 : { "t" : 38.3, "b" : 0.8434, "a" : { "A" : 0.5933, "B" : 0.56, "C" : 0.5043 } },
7 : { "t" : 54.3, "b" : 0.8693, "a" : { "A" : 0.5282, "B" : 0.4947, "C" : 0.441 } },
8 : { "t" : 77.0, "b" : 0.891, "a" : { "A" : 0.4701, "B" : 0.45, "C" : 0.4 } },
9 : { "t" : 109.0, "b" : 0.9092, "a" : { "A" : 0.4187, "B" : 0.4187, "C" : 0.375 } },
10 : { "t" : 146.0, "b" : 0.9222, "a" : { "A" : 0.3798, "B" : 0.3798, "C" : 0.35 } },
11 : { "t" : 187.0, "b" : 0.9319, "a" : { "A" : 0.3497, "B" : 0.3497, "C" : 0.3295 } },
12 : { "t" : 239.0, "b" : 0.9403, "a" : { "A" : 0.3223, "B" : 0.3223, "C" : 0.3065 } },
13 : { "t" : 305.0, "b" : 0.9477, "a" : { "A" : 0.2971, "B" : 0.285, "C" : 0.2835 } },
14 : { "t" : 390.0, "b" : 0.9544, "a" : { "A" : 0.2737, "B" : 0.2737, "C" : 0.261 } },
15 : { "t" : 498.0, "b" : 0.9602, "a" : { "A" : 0.2523, "B" : 0.2523, "C" : 0.248 } },
16 : { "t" : 635.0, "b" : 0.9653, "a" : { "A" : 0.2327, "B" : 0.2327, "C" : 0.2327 } }
}
# Note that "B" is a misnomer as some implementations call it "C",
# some call it "B", and nobody can read German and/or drop a hundred
# bucks on Tauchmedizin to see what Herr Buhlmann actually said.
#
ZHL16He = {
1 : { "t" : 1.51, "a" : { "B" : 1.7424 }, "b" : 0.4245 },
1.1 : { "t" : 1.88, "a" : { "B" : 1.6189 }, "b" : 0.477 },
2 : { "t" : 3.02, "a" : { "B" : 1.383 }, "b" : 0.5747 },
3 : { "t" : 4.72, "a" : { "B" : 1.1919 }, "b" : 0.6527 },
4 : { "t" : 6.99, "a" : { "B" : 1.0458 }, "b" : 0.7223 },
5 : { "t" : 10.21, "a" : { "B" : 0.922 }, "b" : 0.7582 },
6 : { "t" : 14.48, "a" : { "B" : 0.8205 }, "b" : 0.7957 },
7 : { "t" : 20.53, "a" : { "B" : 0.7305 }, "b" : 0.8279 },
8 : { "t" : 29.11, "a" : { "B" : 0.6502 }, "b" : 0.8553 },
9 : { "t" : 41.20, "a" : { "B" : 0.595 }, "b" : 0.8757 },
10 : { "t" : 55.19, "a" : { "B" : 0.5545 }, "b" : 0.8903 },
11 : { "t" : 70.69, "a" : { "B" : 0.5333 }, "b" : 0.8997 },
12 : { "t" : 90.34, "a" : { "B" : 0.5189 }, "b" : 0.9073 },
13 : { "t" : 115.29, "a" : { "B" : 0.5181 }, "b" : 0.9122 },
14 : { "t" : 147.42, "a" : { "B" : 0.5176 }, "b" : 0.9171 },
15 : { "t" : 188.24, "a" : { "B" : 0.5172 }, "b" : 0.9217 },
16 : { "t" : 240.03, "a" : { "B" : 0.5119 }, "b" : 0.9267 }
}
# From Deco for Divers
# Do not use:
# need to convert to bar and delta-M to use,
# M0 is in msw and M = Delta M * Depth + M0,
#
WORKMAN = {
1 : { "t" : 5.0, "M0" : 31.5, "M" : 1.8 },
2 : { "t" : 10.0, "M0" : 26.8, "M" : 1.6 },
3 : { "t" : 20.0, "M0" : 21.9, "M" : 1.5 },
4 : { "t" : 40.0, "M0" : 17.0, "M" : 1.4 },
5 : { "t" : 80.0, "M0" : 16.4, "M" : 1.3 },
6 : { "t" : 120.0, "M0" : 15.8, "M" : 1.2 },
7 : { "t" : 160.0, "M0" : 15.5, "M" : 1.15 },
8 : { "t" : 200.0, "M0" : 15.5, "M" : 1.1 },
9 : { "t" : 240.0, "M0" : 15.2, "M" : 1.1 }
}
# Also from Deco for Divers
#
# Since DSAT's primary concern is no-stop diving, it only uses M0
# -- there is no Delta M i.e. Delta M = 1
# values are in msw
#
# Convert to Buhlmann with
# m_w2b( M0 = NNN / 10, dM = 1, P = 1 )
# and run ZHL with DSAT compartments and M-values
#
DSAT = {
1 : { "t" : 5.0, "M0" : 30.42 },
2 : { "t" : 10.0, "M0" : 25.37 },
3 : { "t" : 20.0, "M0" : 20.54 },
4 : { "t" : 30.0, "M0" : 18.34 },
5 : { "t" : 40.0, "M0" : 17.11 },
6 : { "t" : 60.0, "M0" : 15.79 },
7 : { "t" : 80.0, "M0" : 15.11 },
8 : { "t" : 100.0, "M0" : 14.69 },
9 : { "t" : 120.0, "M0" : 14.41 },
10 : { "t" : 160.0, "M0" : 14.06 },
11 : { "t" : 200.0, "M0" : 13.84 },
12 : { "t" : 240.0, "M0" : 13.69 },
13 : { "t" : 360.0, "M0" : 13.45 },
14 : { "t" : 480.0, "M0" : 13.33 }
}
# return alveolar inert gas pressure
# with P amb = 1 bar, fraction of inert gas = 0.79, and RQ = 0.9
# this should return 0.79 - 0.0567 = 0.7451 or 0.7452 dep. on where you round it
#
def palv( Pamb = 1, Q = 0.79, RQ = 0.9 ) :
assert float( RQ ) != 0.0
vw = float( Pamb ) - 0.0627 + (1.0 - float( RQ )) / float( RQ ) * 0.0534
return round( vw * float( Q ), 4 )
# return k: constant for tissue compartment (min^-1)
# Th : tissue compartment half-time in minutes
# for 5-minute compartment it's 0.8452
#
def kay( Th = 5 ) :
assert float( Th ) > 0.0
return round( math.log( 2 ) / float( Th ), 4 )
# return rate of pressure change in bar/min
# d0 : start pressure, bar
# dt : end pressure, bar
# t : time, min
# Q : fraction of inert gas (same Q as in palv()
#
def arr( d0 = 1.0, dt = 1.0, t = 1, Q = 0.79 ) :
assert float( t ) > 0.0
dP = (float( dt ) - float( d0 )) / float( t )
rc = dP * float( Q )
return round( rc, 4 )
# Schreiner equation
# Palv + R * (t - 1/k) - (Palv - Pi - R/k) * e^(-k * t)
#
# returns pressure in tissue compartment after time t at depth Pa & dP
#
# Pi: initial pressure of inert gas in tissue (bar)
# Palv: initial pressure of inert gas in the lungs (bar, output of palv())
# t: time (minutes)\n",
# R: rate of pressure change (output of arr()),
# k: gas decay constant (output of kay()
#
# (Intermediate variables b/c I was playing with rounding)
#
def schreiner( Pi = 0.7451, Palv = 0.7451, t = 1, R = 0, k = 0.1386, verbose = False ) :
assert float( k ) != 0.0
x1 = float( R ) * (float( t ) - 1.0 / float( k ))
x2 = float( Palv ) - float( Pi ) - float( R ) / float( k )
x3 = math.e ** (float( -k ) * float( t ))
rc = round( float( Palv ) + x1 - x2 * x3, 4 )
if verbose : sys.stdout.write( "x1: %f, x2: %f, x3: %f, rc: %f\n" % (x1, x2, x3, rc,) )
return round( rc, 4 )
# M-value: workman to buhlmann
# P is ambient pressure in bar
# returns pair ( a, b )
#
# TODO: add GF?
#
def m_w2b( M0 = 2.9624, dM = 1.7928, P = 1 ) :
assert float( dM ) >= 1.0
a = float( M0 ) - float( dM ) * float( P )
b = 1.0 / float( dM )
return (round( a, 4 ), round( b, 4 ))
# M-value: buhlmann to workman
# returns pair ( M0, dM )
#
def m_b2w( a = 1.1696, b = 0.5578, P = 1 ) :
assert float( b ) > 0.0
M0 = float( a ) + float( P ) / float( b )
dM = 1.0 / float( b )
return (round( M0, 4 ), round( dM, 4 ))
# no-stop time by Schreiner
#
# Palv: initial pressure of inert gas in the lungs (bar, output of palv())
# t: time (minutes)
# R: rate of pressure change (output of arr()),
# k: gas decay constant (output of kay()
# -- same as schreiner()
# M0: surfacing M-value as per Workman
#
def ndl( Palv = 0.7451, M0 = 2.9624, t = 0, R = 0, k = 0.1386, verbose = False ) :
# (M0 - Palv - R * (t - 1/k)) * math.e ** (k * t) + Palv - R / k
assert float( k ) != 0.0
x1 = float( M0 ) - float( Palv ) - float( R ) * (float( t ) - 1.0 / float( k ))
x2 = math.e ** (float( k ) * float( t ))
rc = x1 * x2 + float( Palv ) - float( R ) / float( k )
if verbose : sys.stdout.write( "x1: %f, x2: %f, rc: %f\n" % (x1, x2, rc,) )
return round( rc, 4 )
# Buhlman formula with GF and Helium
# returns safe ascent ceiling
#
# Pn is N2 pressure in tissue compartment
# Phe is He pressure in tissue compartment
# an is N2 a coefficient
# bn is N2 b coefficient
# ahe is He a coefficient
# bhe is He b coefficient
# gf is current gradient factor
#
def buhlmann( Pn, an, bn, Phe = 0, ahe = 0, bhe = 0, gf = 1 ) :
P = float( Pn ) + float( Phe )
assert float( P ) != 0.0
a = (float( an ) * float( Pn ) + float( ahe ) * float( Phe )) / P
b = (float( bn ) * float( Pn ) + float( bhe ) * float( Phe )) / P
num = float( P ) - float( a ) * float( gf )
den = float( gf ) / float( b ) + 1.0 - float( gf )
assert den != 0.0
rc = num / den
return round( rc, 4 )
# eof
#
| 40.040741 | 126 | 0.451947 |
b91e423748da495ef84033d3b2cf10c562bebea6 | 4,855 | py | Python | multiagent/scenarios/simple_spread.py | jiayu-ch15/MPE-for-curriculum-learning | c71bdfa6a406f5d6a2dcfcd4c17ae879c4fd27d0 | [
"MIT"
] | null | null | null | multiagent/scenarios/simple_spread.py | jiayu-ch15/MPE-for-curriculum-learning | c71bdfa6a406f5d6a2dcfcd4c17ae879c4fd27d0 | [
"MIT"
] | null | null | null | multiagent/scenarios/simple_spread.py | jiayu-ch15/MPE-for-curriculum-learning | c71bdfa6a406f5d6a2dcfcd4c17ae879c4fd27d0 | [
"MIT"
] | null | null | null | import numpy as np
from multiagent.core import World, Agent, Landmark
from multiagent.scenario import BaseScenario
class Scenario(BaseScenario):
def make_world(self):
world = World()
# set any world properties first
world.dim_c = 2
num_agents = 16
num_landmarks = 16
# add agents
world.agents = [Agent() for i in range(num_agents)]
for i, agent in enumerate(world.agents):
agent.name = 'agent %d' % i
agent.collide = True
agent.silent = True
agent.size = 0.08 #0.15
# add landmarks
world.landmarks = [Landmark() for i in range(num_landmarks)]
for i, landmark in enumerate(world.landmarks):
landmark.name = 'landmark %d' % i
landmark.collide = False
landmark.movable = False
#landmark.size = 0.005
# make initial conditions
self.reset_world(world)
return world
def reset_world(self, world):
# random properties for agents
for i, agent in enumerate(world.agents):
agent.color = np.array([0.3, 0.2, 0.7]) #0.35 0.35 0.85
# random properties for landmarks
for i, landmark in enumerate(world.landmarks):
landmark.color = np.array([0.5, 0.5, 0.5]) #0.25 0.25 0.25
# set random initial states
for agent in world.agents:
agent.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
agent.state.p_vel = np.zeros(world.dim_p)
agent.state.c = np.zeros(world.dim_c)
for i, landmark in enumerate(world.landmarks):
landmark.state.p_pos = np.random.uniform(-1, +1, world.dim_p)
landmark.state.p_vel = np.zeros(world.dim_p)
def benchmark_data(self, agent, world):
rew = 0
collisions = 0
occupied_landmarks = 0
min_dists = 0
for l in world.landmarks:
dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]
min_dists += min(dists)
rew -= min(dists)
if min(dists) < 0.1:
occupied_landmarks += 1
if agent.collide:
for a in world.agents:
if self.is_collision(a, agent):
rew -= 1
collisions += 1
return (rew, collisions, min_dists, occupied_landmarks)
def is_collision(self, agent1, agent2):
delta_pos = agent1.state.p_pos - agent2.state.p_pos
dist = np.sqrt(np.sum(np.square(delta_pos)))
dist_min = agent1.size + agent2.size
return True if dist < dist_min else False
def num_reach(self, world):
num = 0
for l in world.landmarks:
dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]
if min(dists) <= world.agents[0].size + world.landmarks[0].size:
num = num + 1
return num
def reward(self, agent, world):
# Agents are rewarded based on minimum agent distance to each landmark, penalized for collisions
rew = 0
for l in world.landmarks:
dists = [np.sqrt(np.sum(np.square(a.state.p_pos - l.state.p_pos))) for a in world.agents]
rew -= min(dists)
# for a in world.agents:
# dists = [np.sqrt(np.sum(np.square(l.state.p_pos - a.state.p_pos))) for l in world.landmarks]
# rew -= min(dists)
if min(dists) < agent.size + world.landmarks[0].size:
rew += 8/len(world.agents)
if agent.collide:
for a in world.agents:
'''
for b in world.agents:
if (a != b):
if self.is_collision(a, b):
rew -= 6/len(world.agents)
'''
if self.is_collision(a, agent) and a != agent:
rew -= 3/len(world.agents)
return 0.1*rew
def observation(self, agent, world):
# get positions of all entities in this agent's reference frame
entity_pos = []
for entity in world.landmarks: # world.entities:
entity_pos.append(entity.state.p_pos - agent.state.p_pos)
# entity colors
entity_color = []
for entity in world.landmarks: # world.entities:
entity_color.append(entity.color)
# communication of all other agents
comm = []
other_pos = []
for other in world.agents:
if other is agent: continue
comm.append(other.state.c)
other_pos.append(other.state.p_pos - agent.state.p_pos)
# import pdb;pdb.set_trace()
return np.concatenate([agent.state.p_vel] + [agent.state.p_pos] + entity_pos + other_pos + comm)
| 39.471545 | 106 | 0.562101 |
4f65e5b4a487e02e1e182c7932c508323598a2d0 | 3,625 | py | Python | modules/tts/flowtron/text/text_en/__init__.py | serkhanekarim/AI | 0a13880ae8e608cd00fa819dc590097abdb7ae6e | [
"Apache-2.0"
] | null | null | null | modules/tts/flowtron/text/text_en/__init__.py | serkhanekarim/AI | 0a13880ae8e608cd00fa819dc590097abdb7ae6e | [
"Apache-2.0"
] | null | null | null | modules/tts/flowtron/text/text_en/__init__.py | serkhanekarim/AI | 0a13880ae8e608cd00fa819dc590097abdb7ae6e | [
"Apache-2.0"
] | null | null | null | """ from https://github.com/keithito/tacotron """
import re
from text.text_en import cleaners
from text.text_en.symbols import symbols
from text.text_en.symbols import _punctuation as punctuation_symbols
# Mappings from symbol to numeric ID and vice versa:
_symbol_to_id = {s: i for i, s in enumerate(symbols)}
_id_to_symbol = {i: s for i, s in enumerate(symbols)}
# Regular expression matching text enclosed in curly braces:
_curly_re = re.compile(r'(.*?)\{(.+?)\}(.*)')
# for arpabet with apostrophe
_apostrophe = re.compile(r"(?=\S*['])([a-zA-Z'-]+)")
def text_to_sequence(text):
'''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
The text can optionally have ARPAbet sequences enclosed in curly braces embedded
in it. For example, "Turn left on {HH AW1 S S T AH0 N} Street."
Args:
text: string to convert to a sequence
cleaner_names: names of the cleaner functions to run the text through
Returns:
List of integers corresponding to the symbols in the text
'''
sequence = []
# Check for curly braces and treat their contents as ARPAbet:
while len(text):
m = _curly_re.match(text)
if not m:
sequence += _symbols_to_sequence(text)
break
sequence += _symbols_to_sequence(m.group(1))
sequence += _arpabet_to_sequence(m.group(2))
text = m.group(3)
return sequence
def sequence_to_text(sequence):
'''Converts a sequence of IDs back to a string'''
result = ''
for symbol_id in sequence:
if symbol_id in _id_to_symbol:
s = _id_to_symbol[symbol_id]
# Enclose ARPAbet back in curly braces:
if len(s) > 1 and s[0] == '@':
s = '{%s}' % s[1:]
result += s
return result.replace('}{', ' ')
def _clean_text(text, cleaner_names):
for name in cleaner_names:
cleaner = getattr(cleaners, name)
if not cleaner:
raise Exception('Unknown cleaner: %s' % name)
text = cleaner(text)
return text
def _symbols_to_sequence(symbols):
return [_symbol_to_id[s] for s in symbols if _should_keep_symbol(s)]
def _arpabet_to_sequence(text):
return _symbols_to_sequence(['@' + s for s in text.split()])
def _should_keep_symbol(s):
return s in _symbol_to_id and s != '_' and s != '~'
def get_arpabet(word, cmudict, index=0):
re_start_punc = r"\A\W+"
re_end_punc = r"\W+\Z"
start_symbols = re.findall(re_start_punc, word)
if len(start_symbols):
start_symbols = start_symbols[0]
word = word[len(start_symbols):]
else:
start_symbols = ''
end_symbols = re.findall(re_end_punc, word)
if len(end_symbols):
end_symbols = end_symbols[0]
word = word[:-len(end_symbols)]
else:
end_symbols = ''
arpabet_suffix = ''
if _apostrophe.match(word) is not None and word.lower() != "it's" and word.lower()[-1] == 's':
word = word[:-2]
arpabet_suffix = ' Z'
arpabet = None if word.lower() in HETERONYMS else cmudict.lookup(word)
if arpabet is not None:
return start_symbols + '{%s}' % (arpabet[index] + arpabet_suffix) + end_symbols
else:
return start_symbols + word + end_symbols
def files_to_list(filename):
"""
Takes a text file of filenames and makes a list of filenames
"""
with open(filename, encoding='utf-8') as f:
files = f.readlines()
files = [f.rstrip() for f in files]
return files
HETERONYMS = set(files_to_list('data/heteronyms'))
| 29.958678 | 98 | 0.636138 |
cd0be28569d644b45029e560da2e917a9ab86dd9 | 1,379 | py | Python | src/ai/backend/client/exceptions.py | dexterastin/backend.ai-client-py | 91a80cb5b1ebec52016db7e976571949386e6bda | [
"MIT"
] | null | null | null | src/ai/backend/client/exceptions.py | dexterastin/backend.ai-client-py | 91a80cb5b1ebec52016db7e976571949386e6bda | [
"MIT"
] | null | null | null | src/ai/backend/client/exceptions.py | dexterastin/backend.ai-client-py | 91a80cb5b1ebec52016db7e976571949386e6bda | [
"MIT"
] | null | null | null | from typing import Any
import json
__all__ = (
'BackendError',
'BackendAPIError',
'BackendClientError',
'APIVersionWarning',
)
class BackendError(Exception):
'''Exception type to catch all ai.backend-related errors.'''
def __str__(self):
return repr(self)
class BackendAPIError(BackendError):
'''Exceptions returned by the API gateway.'''
def __init__(self, status: int, reason: str, data: Any):
if isinstance(data, (str, bytes)):
try:
data = json.loads(data)
except json.JSONDecodeError:
data = {
'type': 'https://api.backend.ai/probs/generic-error',
'title': 'Generic Error (could not parse error string)',
'content': data,
}
super().__init__(status, reason, data)
@property
def status(self) -> int:
return self.args[0]
@property
def reason(self) -> str:
return self.args[1]
@property
def data(self) -> Any:
return self.args[2]
class BackendClientError(BackendError):
"""
Exceptions from the client library, such as argument validation
errors and connection failures.
"""
pass
class APIVersionWarning(UserWarning):
"""
The warning generated if the server's API version is higher.
"""
pass
| 22.241935 | 76 | 0.591008 |
961b50676cbcaf66c41d50e3212dad12d647b9a3 | 1,070 | py | Python | joommf/cube/hysteresis.py | fangohr/oommf-python | 9c9f617c4efe4b488f01703186c1126070ea5d3f | [
"BSD-2-Clause"
] | 7 | 2016-01-25T09:36:46.000Z | 2021-09-03T01:42:19.000Z | joommf/cube/hysteresis.py | fangohr/oommf-python | 9c9f617c4efe4b488f01703186c1126070ea5d3f | [
"BSD-2-Clause"
] | 1 | 2016-03-07T17:11:44.000Z | 2016-03-07T17:11:44.000Z | joommf/cube/hysteresis.py | fangohr/oommf-python | 9c9f617c4efe4b488f01703186c1126070ea5d3f | [
"BSD-2-Clause"
] | 9 | 2015-09-30T10:53:06.000Z | 2021-05-12T20:21:52.000Z | import joommf
from joommf.energies import Exchange
from joommf.energies import Demag
from joommf.energies import FixedZeeman, UniformZeeman
lx = ly = lz = 50e-9
dx = dy = dz = 5e-9
Ms, A = 8e5, 1.3e-11
mT_conv = 795.77472
H = 200*mT_conv
m_init = (0, 0, 0) # initial magnetisation
mesh = joommf.Mesh((lx, ly, lz), (dx, dy, dz))
sim = joommf.Sim(mesh, Ms, name='cube_example')
sim.add_energy(Exchange(A))
sim.add_energy(UniformZeeman([0, 0, 0], [0, 0, H], 30))
sim.add_energy(Demag())
sim.set_evolver(joommf.Minimiser(m_init, Ms, name='cube'))
sim.add_output('Magnetization')
# Run simulation.
sim.minimise()
sim2 = joommf.Sim(mesh, Ms, name='cube_example')
sim2.add_energy(Exchange(A))
sim2.add_energy(UniformZeeman([0, 0, 0], [0, 0, -H], 30))
sim2.add_energy(Demag())
sim2.set_evolver(joommf.Minimiser(m_init, Ms, name='cube'))
sim2.add_output('Magnetization')
# Run simulation.
sim2.minimise()
plt.plot(sim.df.UZeeman_Bz.values, sim.df.MinDriver_mz)
plt.plot(sim2.df.UZeeman_Bz.values, sim2.df.MinDriver_mz)
plt.ylim(-1.1, 1.1)
plt.xlabel('B (mT)')
plt.ylabel('Mz')
| 31.470588 | 59 | 0.720561 |
fd33434dab7d57c76eaea169b92c5416f991138a | 1,273 | py | Python | web_flask/100-bootcampbnb.py | johncoleman83/AirBnB | 0be3aa5e2598a4950920a689196e518e8c94a463 | [
"MIT"
] | 2 | 2018-02-03T16:20:39.000Z | 2020-02-28T03:37:02.000Z | web_flask/100-bootcampbnb.py | johncoleman83/AirBnB | 0be3aa5e2598a4950920a689196e518e8c94a463 | [
"MIT"
] | 1 | 2017-11-15T18:05:03.000Z | 2017-11-15T18:05:03.000Z | web_flask/100-bootcampbnb.py | johncoleman83/AirBnB | 0be3aa5e2598a4950920a689196e518e8c94a463 | [
"MIT"
] | 4 | 2018-01-09T14:29:36.000Z | 2020-09-29T13:24:35.000Z | #!/usr/bin/python3
"""
Flask App that integrates with AirBnB static HTML Template
"""
from flask import Flask, render_template, url_for
from models import storage
# flask setup
app = Flask(__name__)
app.url_map.strict_slashes = False
port = 5000
host = '0.0.0.0'
# begin flask page rendering
@app.teardown_appcontext
def teardown_db(exception):
"""
after each request, this method calls .close() (i.e. .remove()) on
the current SQLAlchemy Session
"""
storage.close()
@app.route('/btcpbnb')
def btcpbnb_filters(the_id=None):
"""
handles request to custom template with states, cities & amentities
"""
state_objs = storage.all('State').values()
states = dict([state.name, state] for state in state_objs)
amens = storage.all('Amenity').values()
places = storage.all('Place').values()
users = dict([user.id, "{} {}".format(user.first_name, user.last_name)]
for user in storage.all('User').values())
return render_template('100-btcpbnb.html',
states=states,
amens=amens,
places=places,
users=users)
if __name__ == "__main__":
"""
MAIN Flask App"""
app.run(host=host, port=port)
| 27.085106 | 75 | 0.619796 |
ec053c9b6d7955a7ab024704c7afd206206c1dcb | 327 | py | Python | zaifapi/__init__.py | fcce-proj/zaifapi | 6299a744363ee84765839dbf997a049d8b90e0cc | [
"MIT"
] | null | null | null | zaifapi/__init__.py | fcce-proj/zaifapi | 6299a744363ee84765839dbf997a049d8b90e0cc | [
"MIT"
] | null | null | null | zaifapi/__init__.py | fcce-proj/zaifapi | 6299a744363ee84765839dbf997a049d8b90e0cc | [
"MIT"
] | null | null | null | from .exchange_api import *
from .oauth import ZaifTokenApi
_MAX_COUNT = 1000
_MIN_WAIT_TIME_SEC = 1
__version__ = '1.6.3'
__all__ = [
'__version__',
'ZaifTradeApi',
'ZaifPublicApi',
'ZaifTokenTradeApi',
'ZaifTokenApi',
'ZaifPublicStreamApi',
'ZaifLeverageTradeApi',
'ZaifFuturesPublicApi'
]
| 17.210526 | 31 | 0.69419 |
a7a0bff388608948faf30905a4af80357719a1b0 | 1,114 | py | Python | SyncIn/SyncIn/urls.py | LesGrailleurs/H22-GR1-SyncIn | 05315bbc996e2f056895f6373f2f463981a0dea5 | [
"MIT"
] | 1 | 2022-01-28T22:08:57.000Z | 2022-01-28T22:08:57.000Z | SyncIn/SyncIn/urls.py | LesGrailleurs/H22-GR1-SyncIn | 05315bbc996e2f056895f6373f2f463981a0dea5 | [
"MIT"
] | null | null | null | SyncIn/SyncIn/urls.py | LesGrailleurs/H22-GR1-SyncIn | 05315bbc996e2f056895f6373f2f463981a0dea5 | [
"MIT"
] | null | null | null | """SyncIn URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/4.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
# Permet de lier touts les fichiers urls.py et le frontend à l'aide de path URL
urlpatterns = [
path('admin/', admin.site.urls),
path('', include("main.urls")),
path('search/', include("search.urls")),
path('music/', include("music.urls")),
path('login/', include("login.urls")),
path('favorites/', include("favorites.urls")),
path("accounts/", include("django.contrib.auth.urls"))
]
| 38.413793 | 79 | 0.68851 |
63818b5fe7ddad48ba9863c1fe50975853f4ee0f | 1,231 | py | Python | covid/impl/Categorical2.py | tgrrr/covid19uk | afde150b841d7a0773b0fe2b918aa1773f791bfa | [
"MIT"
] | null | null | null | covid/impl/Categorical2.py | tgrrr/covid19uk | afde150b841d7a0773b0fe2b918aa1773f791bfa | [
"MIT"
] | null | null | null | covid/impl/Categorical2.py | tgrrr/covid19uk | afde150b841d7a0773b0fe2b918aa1773f791bfa | [
"MIT"
] | null | null | null | """Categorical2 corrects a bug in the tfd.Categorical.log_prob"""
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow_probability.python.internal import distribution_util
from tensorflow_probability.python.internal import dtype_util
from tensorflow_probability.python.distributions.categorical import (
_broadcast_cat_event_and_params,
)
tfd = tfp.distributions
# Todo remove this class when https://github.com/tensorflow/tensorflow/issues/40606
# is fixed
class Categorical2(tfd.Categorical):
"""Done to override the faulty log_prob in tfd.Categorical due to
https://github.com/tensorflow/tensorflow/issues/40606"""
def _log_prob(self, k):
with tf.name_scope("Cat2log_prob"):
logits = self.logits_parameter()
if self.validate_args:
k = distribution_util.embed_check_integer_casting_closed(
k, target_dtype=self.dtype
)
k, logits = _broadcast_cat_event_and_params(
k, logits, base_dtype=dtype_util.base_dtype(self.dtype)
)
logits_normalised = tf.math.log(tf.math.softmax(logits))
return tf.gather(logits_normalised, k, batch_dims=1)
| 39.709677 | 83 | 0.709992 |
805f286c874ccdd28439f22ca0aa125f9a157433 | 683 | py | Python | getpinnedmessages.py | conradwee/telegram-analysis | 354dadc2123d3f4747dd97ce9b24e1fd94ebc76c | [
"MIT"
] | 104 | 2017-04-08T21:47:35.000Z | 2022-01-28T02:11:19.000Z | getpinnedmessages.py | conradwee/telegram-analysis | 354dadc2123d3f4747dd97ce9b24e1fd94ebc76c | [
"MIT"
] | 10 | 2016-11-22T20:07:28.000Z | 2017-04-07T23:23:21.000Z | getpinnedmessages.py | conradwee/telegram-analysis | 354dadc2123d3f4747dd97ce9b24e1fd94ebc76c | [
"MIT"
] | 15 | 2018-03-22T21:13:36.000Z | 2021-07-13T04:09:55.000Z | #!/usr/bin/env python3
from json import loads
import argparse
parser = argparse.ArgumentParser(
"Print all the pinned text messages from a Telegram chat log")
parser.add_argument(
'file',
help='path to the json file (chat log) to analyse')
args = parser.parse_args()
with open(args.file,'r') as f:
jsn = [loads(line) for line in f.readlines()]
pins = [x['reply_id'] for x in jsn if
'text' in x and x['text'] == 'pinned the message']
#reply_id is the ID of the message that has been pinned.
pin_msgs = [x for x in jsn if x['id'] in pins if 'text' in x]
#ignore pins with no text
_ = [print(x['text'],'\n------------------') for x in pin_msgs]
| 29.695652 | 70 | 0.642753 |
ddcd3d7078a71e9d576abb986942471b247cc420 | 14,974 | py | Python | examples/data_augmentation/renderer/kittiLib.py | BehaviorPredictionTestingPlatform/VerifAI | db05f3573c2e7d98c03029c1b4efca93e6b08edb | [
"BSD-3-Clause"
] | 109 | 2019-04-29T03:30:42.000Z | 2022-03-31T03:06:26.000Z | examples/data_augmentation/renderer/kittiLib.py | BehaviorPredictionTestingPlatform/VerifAI | db05f3573c2e7d98c03029c1b4efca93e6b08edb | [
"BSD-3-Clause"
] | 25 | 2019-03-25T00:27:39.000Z | 2022-03-27T20:29:23.000Z | examples/data_augmentation/renderer/kittiLib.py | BehaviorPredictionTestingPlatform/VerifAI | db05f3573c2e7d98c03029c1b4efca93e6b08edb | [
"BSD-3-Clause"
] | 35 | 2019-02-12T20:50:32.000Z | 2022-01-05T11:25:06.000Z | """Generates a library with images in Kitti forma"""
import numpy as np
from renderer.library import Library
# Helper functions that load the current library objects
BACK_ORIENT = -np.pi / 2
FRONT_ORIENT = np.pi / 2
#Paths to car and road image directories
FORE_SPACES_FILE = './renderer/imgSampSpaces.pickle' # File with foreground spaces info
LIBRARY_PATH = 'renderer/library/'
BACK_PATH = LIBRARY_PATH + 'roads/'
FORE_PATH = LIBRARY_PATH + 'cars/'
def loadImages():
"""Load car and road images."""
roadImages = []
roadImages.append({'roadPath':BACK_PATH + 'desert_kitti.png', \
'roadType':'Desert Road', 'roadId':0, 'backgroundColor': 'brown light, blue light', 'environment': 'desert'})
roadImages.append({'roadPath':BACK_PATH + 'city_kitti.png',\
'roadType':'City Road', 'roadId':1, 'backgroundColor': 'brown light, gray', 'environment': 'city'})
roadImages.append({'roadPath':BACK_PATH + 'forest_kitti.png',\
'roadType':'Forest Road', 'roadId':2, 'backgroundColor': 'green light, green dark', 'environment': 'forest'})
roadImages.append({'roadPath':BACK_PATH + 'big_sur_kitti.png',\
'roadType':'Big Sur Road', 'roadId':3, 'backgroundColor': 'brown, blue', 'environment': 'city'})
roadImages.append({'roadPath':BACK_PATH + 'mountain_kitti.jpg',\
'roadType':'Mountain Road', 'roadId':4, 'backgroundColor': 'green', 'environment': 'forest'})
roadImages.append({'roadPath':BACK_PATH + 'bridge_kitti.jpg',\
'roadType':'Bridge Road', 'roadId':5, 'backgroundColor': 'green, red', 'environment': 'forest'})
roadImages.append({'roadPath':BACK_PATH + 'tunnel_kitti.jpg',\
'roadType':'Tunnel Road', 'roadId':6, 'backgroundColor': 'gray', 'environment': 'mountain'})
roadImages.append({'roadPath':BACK_PATH + 'island_kitti.jpg',\
'roadType':'Island Road', 'roadId':7, 'backgroundColor': 'blue light, green, brown light', 'environment': 'field'})
roadImages.append({'roadPath':BACK_PATH + 'countryside_kitti.jpg',\
'roadType':'Countryside Road', 'roadId':8, 'backgroundColor': 'green', 'environment': 'forest'})
roadImages.append({'roadPath':BACK_PATH + 'hill_kitti.jpg',\
'roadType':'Hill Road', 'roadId':9, 'backgroundColor': 'green, white', 'environment': 'field'})
roadImages.append({'roadPath':BACK_PATH + 'alps_kitti.png',\
'roadType':'Alps Road', 'roadId':10, 'backgroundColor': 'brown light, gray', 'environment': 'mountain'})
roadImages.append({'roadPath':BACK_PATH + 'bridge_1_kitti.png',\
'roadType':'Bridge 1 Road', 'roadId':11, 'backgroundColor': 'gray light, blue light', 'environment': 'city'})
roadImages.append({'roadPath':BACK_PATH + 'building_kitti.png',\
'roadType':'Building Road', 'roadId':12, 'backgroundColor': 'gray, brown light', 'environment': 'city'})
roadImages.append({'roadPath':BACK_PATH + 'cloud_kitti.png',\
'roadType':'Cloud Road', 'roadId':13, 'backgroundColor': 'green, brown, black', 'environment': 'field'})
roadImages.append({'roadPath':BACK_PATH + 'downtown_kitti.png',\
'roadType':'Downtown Road', 'roadId':14, 'backgroundColor': 'brown light, yellow, gray', 'environment': 'city'})
roadImages.append({'roadPath':BACK_PATH + 'freeway_kitti.png',\
'roadType':'Freeway Road', 'roadId':15, 'backgroundColor': 'gray', 'environment': 'city'})
roadImages.append({'roadPath':BACK_PATH + 'track_kitti.jpg',\
'roadType':'Track Road', 'roadId':16, 'backgroundColor': 'blue, blue light', 'environment': 'city'})
roadImages.append({'roadPath':BACK_PATH + 'rainforest_kitti.png',\
'roadType':'Rainforest Road', 'roadId':17, 'backgroundColor': 'green, brown light', 'environment': 'forest'})
roadImages.append({'roadPath':BACK_PATH + 'tree_kitti.png',\
'roadType':'Tree Road', 'roadId':18, 'backgroundColor': 'green, yellow', 'environment': 'forest'})
roadImages.append({'roadPath':BACK_PATH + 'trees_kitti.png',\
'roadType':'Trees Road', 'roadId':19, 'backgroundColor': 'green', 'environment': 'forest'})
roadImages.append({'roadPath':BACK_PATH + 'fields_kitti.png',\
'roadType':'Fields Road', 'roadId':20, 'backgroundColor': 'green, brown', 'environment': 'forest, fields'})
roadImages.append({'roadPath':BACK_PATH + 'construction_kitti.png',\
'roadType':'Construction Road', 'roadId':21, 'backgroundColor': 'gray, brown', 'environment': 'city'})
roadImages.append({'roadPath':BACK_PATH + 'little_bridge_kitti.jpg',\
'roadType':'Little Bridge', 'roadId':22, 'backgroundColor': 'green, gray', 'environment': 'forest'})
roadImages.append({'roadPath':BACK_PATH + 'parking_lot_kitti.png',\
'roadType':'Parking Lot', 'roadId':23, 'backgroundColor': 'gray', 'environment': 'city, parking'})
roadImages.append({'roadPath':BACK_PATH + 'indoor_parking_kitti.png',\
'roadType':'Indoor Parking Road', 'roadId':24, 'backgroundColor': 'gray', 'environment': 'city, parking'})
roadImages.append({'roadPath':BACK_PATH + 'freeway_moto_kitti.jpg',\
'roadType':'Freeway Moto Road', 'roadId':25, 'backgroundColor': 'black, brow', 'environment': 'desert, freeway'})
roadImages.append({'roadPath':BACK_PATH + 'freeway_kitti.jpg',\
'roadType':'Freeway Road', 'roadId':26, 'backgroundColor': 'black, blue, green', 'environment': 'freeway'})
roadImages.append({'roadPath':BACK_PATH + 'snow_kitti.jpg',\
'roadType':'Snow Road', 'roadId':27, 'backgroundColor': 'white', 'environment': 'snow, forest'})
roadImages.append({'roadPath':BACK_PATH + 'icy_kitti.jpg',\
'roadType':'Icy Road', 'roadId':28, 'backgroundColor': 'white', 'environment': 'snow, forest'})
roadImages.append({'roadPath':BACK_PATH + 'night_road_kitti.jpg',\
'roadType':'Night Road', 'roadId':29, 'backgroundColor': 'black', 'environment': 'fields'})
roadImages.append({'roadPath':BACK_PATH + 'night_bridge_kitti.jpg',\
'roadType':'Night Bridge Road', 'roadId':30, 'backgroundColor': 'black', 'environment': 'bridge'})
roadImages.append({'roadPath':BACK_PATH + 'in_tunnel_kitti.jpg',\
'roadType':'In Tunnel Road', 'roadId':31, 'backgroundColor': 'gray, blue, red', 'environment': 'tunnel'})
roadImages.append({'roadPath':BACK_PATH + 'rainy_bridge_kitti.jpg',\
'roadType':'Rainy Bridge Road', 'roadId':32, 'backgroundColor': 'gray, blue', 'environment': 'bridge'})
roadImages.append({'roadPath':BACK_PATH + 'joshua_tree_kitti.jpg',\
'roadType':'Joshua Tree Road', 'roadId':33, 'backgroundColor': 'brown, green, blue', 'environment': 'desert'})
roadImages.append({'roadPath':BACK_PATH + 'yosemite_kitti.png',\
'roadType':'Yosemite Road', 'roadId':34, 'backgroundColor': 'gray, green, blue', 'environment': 'forest'})
carImages = [{'carPath':FORE_PATH + 'bmw_gray_front_kitti.png', 'type':'BMW Kitti', \
'carId':0, 'carCategory': 'car', 'carColor': 'gray', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'suzuki_rear_kitti.png','type':'Suzuki Kitti',\
'carId':1, 'carCategory': 'jeep', 'carColor': 'red dark', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'tesla_rear_kitti.png', 'type':'Tesla Kitti', \
'carId':2, 'carCategory': 'car', 'carColor': 'white', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'fiat_front_kitti.png', 'type':'Fiat Kitti',\
'carId':3, 'carCategory': 'car', 'carColor': 'green', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'honda_kitti.png', 'type':'Honda Kitti',\
'carId':4, 'carCategory': 'car', 'carColor': 'gray', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'toyota_kitti.png', 'type':'Toyota Kitti',\
'carId':5, 'carCategory': 'car', 'carColor': 'white', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'peugeot_kitti.png', 'type':'Peugeot Kitti',\
'carId':6, 'carCategory': 'car', 'carColor': 'orange', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'chrysler_kitti.png', 'type':'Chrysler Kitti', \
'carId':7, 'carCategory': 'van', 'carColor': 'gray', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'bmw_blue_kitti.png', 'type': 'BMW Blue Kitti', \
'carId':8, 'carCategory': 'car', 'carColor': 'blue', 'carOrientation': BACK_ORIENT},
{'carPath':FORE_PATH + 'honda_civic_front_kitti.png', 'type':'Honda Civic Front Kitti', \
'carId':9, 'carCategory': 'car', 'carColor': 'gray', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'toyota_camry_front_kitti.png', 'type': 'Toyota Camry Front Kitti', \
'carId':10, 'carCategory': 'car', 'carColor': 'cream', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'toyota_prius_front_kitti.png', 'type': 'Toyota Prius Front Kitti', \
'carId':11, 'carCategory': 'car', 'carColor': 'white', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'benz_front_kitti.png', 'type': 'Benz Front Kitti', \
'carId':12, 'carCategory': 'car', 'carColor': 'white', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'ford_front_kitti.png', 'type': 'Ford Front Kitti', \
'carId':13, 'carCategory': 'car', 'carColor': 'red', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'jeep_front_kitti.png', 'type': 'Jeep Front Kitti', \
'carId':14, 'carCategory': 'jeep', 'carColor': 'red', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'jeep_cherokee_front_kitti.png', 'type': 'Jeep Cherokee Front Kitti', \
'carId':15, 'carCategory': 'jeep', 'carColor': 'cream', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'fiat_front_kitti.png', 'type': 'Fiat Front Kitti', \
'carId':16, 'carCategory': 'car', 'carColor': 'blue', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'bmw_front_kitti.png', 'type': 'BMW Front Kitti', \
'carId':17, 'carCategory': 'car', 'carColor': 'blue dark', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'suzuki_front_kitti.png', 'type': 'Suzuki Front Kitti', \
'carId':18, 'carCategory': 'jeep', 'carColor': 'red dark', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'volkswagen_golf_front_kitti.png', 'type': 'Volkswagen Golf Kitti', \
'carId':19, 'carCategory': 'car', 'carColor': 'blue light', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'toyota_new_prius_front_kitti.png', 'type': 'Toyota New Prius Kitti', \
'carId':20, 'carCategory': 'car', 'carColor': 'gray', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'volvo_rear_kitti.png', 'type': 'Volvo Kitti', \
'carId':21, 'carCategory': 'car', 'carColor': 'brown', 'carOrientation': BACK_ORIENT },
{'carPath': FORE_PATH + 'porche_rear_kitti.png', 'type': 'Porche Kitti', \
'carId':22, 'carCategory': 'car', 'carColor': 'white', 'carOrientation': BACK_ORIENT },
{'carPath': FORE_PATH + 'corvette_front_kitti.png', 'type': 'Corvette Kitti', \
'carId':23, 'carCategory': 'car', 'carColor': 'yellow', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'ford_truck_rear_kitti.png', 'type': 'Ford Kitti', \
'carId':24, 'carCategory': 'truck', 'carColor': 'white', 'carOrientation': BACK_ORIENT },
{'carPath': FORE_PATH + 'chevrolet_truck_rear_kitti.png', 'type': 'Chevrolet Kitti', \
'carId':25, 'carCategory': 'truck', 'carColor': 'red', 'carOrientation': BACK_ORIENT },
{'carPath': FORE_PATH + 'mercedes_rear_kitti.png', 'type': 'Mercedes Kitti', \
'carId':26, 'carCategory': 'car', 'carColor': 'black', 'carOrientation': BACK_ORIENT },
{'carPath': FORE_PATH + 'tesla_front_kitti.png', 'type': 'Tesla Kitti', \
'carId':27, 'carCategory': 'car', 'carColor': 'black', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'mercedes_front_kitti.png', 'type': 'Mercedes Kitti', \
'carId':28, 'carCategory': 'jeep', 'carColor': 'gray', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'mazda_front_kitti.png', 'type': 'Mazda Kitti', \
'carId':29, 'carCategory': 'car', 'carColor': 'blue light', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'mazda_rear_kitti.png', 'type': 'Mazda Kitti', \
'carId':30, 'carCategory': 'car', 'carColor': 'gray', 'carOrientation': BACK_ORIENT },
{'carPath': FORE_PATH + 'scion_rear_kitti.png', 'type': 'Scion Kitti', \
'carId':31, 'carCategory': 'car', 'carColor': 'orange', 'carOrientation': BACK_ORIENT },
{'carPath': FORE_PATH + 'scion_front_kitti.png', 'type': 'Scion Kitti', \
'carId':32, 'carCategory': 'car', 'carColor': 'orange', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'fiat_abarth_front_kitti.png', 'type': 'Fiat Abarth Kitti', \
'carId':33, 'carCategory': 'car', 'carColor': 'orange', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'volkswagen_beetle_front_kitti.png', 'type': 'Volkswagen Beetle Kitti', \
'carId':34, 'carCategory': 'car', 'carColor': 'red dark', 'carOrientation': FRONT_ORIENT },
{'carPath': FORE_PATH + 'smart_rear_kitti.png', 'type': 'Smart Kitti', \
'carId':35, 'carCategory': 'car', 'carColor': 'black', 'carOrientation': BACK_ORIENT },
{'carPath': FORE_PATH + 'smart_front_kitti.png', 'type': 'Smart Kitti', \
'carId':36, 'carCategory': 'car', 'carColor': 'blue light', 'carOrientation': FRONT_ORIENT }
]
return roadImages, carImages
def getLib():
"""Instantiate the library"""
roadImages, carImages = loadImages()
return Library(roadImages, carImages, FORE_SPACES_FILE)
| 85.565714 | 134 | 0.598704 |
a341e6afca622634643339eb92ff785dccfc069e | 7,726 | py | Python | docs/conf.py | wimlds-trojmiasto/batalionki | 11d082641e00e65b96aeab6651488bbb8a20712a | [
"MIT"
] | 29 | 2019-08-21T14:06:34.000Z | 2022-03-27T12:33:53.000Z | docs/conf.py | wimlds-trojmiasto/batalionki | 11d082641e00e65b96aeab6651488bbb8a20712a | [
"MIT"
] | 4 | 2019-08-21T18:14:19.000Z | 2019-08-31T19:39:35.000Z | docs/conf.py | wimlds-trojmiasto/batalionki | 11d082641e00e65b96aeab6651488bbb8a20712a | [
"MIT"
] | 25 | 2019-08-19T14:21:25.000Z | 2022-03-26T19:55:45.000Z | # -*- coding: utf-8 -*-
#
# birds documentation build configuration file, created by
# sphinx-quickstart.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'birds'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'birdsdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index',
'birds.tex',
u'birds Documentation',
u"WiMLDS", 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'birds', u'birds Documentation',
[u"WiMLDS"], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'birds', u'birds Documentation',
u"WiMLDS", 'birds',
'Analysis and modelling of Polish birds songs', 'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
| 31.534694 | 80 | 0.705281 |
76d5f3935b4a72c93bbbade8e950f38ae48243ee | 34,823 | py | Python | tests/test_commands.py | alexpdev/scrapy | 636127ec1ea2b8949438015c2167ab5d009ff1bf | [
"BSD-3-Clause"
] | null | null | null | tests/test_commands.py | alexpdev/scrapy | 636127ec1ea2b8949438015c2167ab5d009ff1bf | [
"BSD-3-Clause"
] | 1 | 2021-09-20T19:54:42.000Z | 2021-09-20T19:54:42.000Z | tests/test_commands.py | alexpdev/scrapy | 636127ec1ea2b8949438015c2167ab5d009ff1bf | [
"BSD-3-Clause"
] | null | null | null | import inspect
import json
import argparse
import os
import platform
import re
import subprocess
import sys
import tempfile
from contextlib import contextmanager
from itertools import chain
from os.path import exists, join, abspath, getmtime
from pathlib import Path
from shutil import rmtree, copytree
from stat import S_IWRITE as ANYONE_WRITE_PERMISSION
from tempfile import mkdtemp
from threading import Timer
from unittest import skipIf
from pytest import mark
from twisted import version as twisted_version
from twisted.python.versions import Version
from twisted.trial import unittest
import scrapy
from scrapy.commands import view, ScrapyCommand, ScrapyHelpFormatter
from scrapy.commands.startproject import IGNORE
from scrapy.settings import Settings
from scrapy.utils.python import to_unicode
from scrapy.utils.test import get_testenv
from tests.test_crawler import ExceptionSpider, NoRequestsSpider
class CommandSettings(unittest.TestCase):
def setUp(self):
self.command = ScrapyCommand()
self.command.settings = Settings()
self.parser = argparse.ArgumentParser(formatter_class=ScrapyHelpFormatter,
conflict_handler='resolve')
self.command.add_options(self.parser)
def test_settings_json_string(self):
feeds_json = '{"data.json": {"format": "json"}, "data.xml": {"format": "xml"}}'
opts, args = self.parser.parse_known_args(args=['-s', f'FEEDS={feeds_json}', 'spider.py'])
self.command.process_options(args, opts)
self.assertIsInstance(self.command.settings['FEEDS'], scrapy.settings.BaseSettings)
self.assertEqual(dict(self.command.settings['FEEDS']), json.loads(feeds_json))
def test_help_formatter(self):
formatter = ScrapyHelpFormatter(prog='scrapy')
part_strings = ['usage: scrapy genspider [options] <name> <domain>\n\n',
'\n', 'optional arguments:\n', '\n', 'Global Options:\n']
self.assertEqual(
formatter._join_parts(part_strings),
('Usage\n=====\n scrapy genspider [options] <name> <domain>\n\n\n'
'Optional Arguments\n==================\n\n'
'Global Options\n--------------\n')
)
class ProjectTest(unittest.TestCase):
project_name = 'testproject'
def setUp(self):
self.temp_path = mkdtemp()
self.cwd = self.temp_path
self.proj_path = join(self.temp_path, self.project_name)
self.proj_mod_path = join(self.proj_path, self.project_name)
self.env = get_testenv()
def tearDown(self):
rmtree(self.temp_path)
def call(self, *new_args, **kwargs):
with tempfile.TemporaryFile() as out:
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
return subprocess.call(args, stdout=out, stderr=out, cwd=self.cwd,
env=self.env, **kwargs)
def proc(self, *new_args, **popen_kwargs):
args = (sys.executable, '-m', 'scrapy.cmdline') + new_args
p = subprocess.Popen(
args,
cwd=popen_kwargs.pop('cwd', self.cwd),
env=self.env,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**popen_kwargs,
)
def kill_proc():
p.kill()
p.communicate()
assert False, 'Command took too much time to complete'
timer = Timer(15, kill_proc)
try:
timer.start()
stdout, stderr = p.communicate()
finally:
timer.cancel()
return p, to_unicode(stdout), to_unicode(stderr)
def find_in_file(self, filename, regex):
"""Find first pattern occurrence in file"""
pattern = re.compile(regex)
with open(filename, "r") as f:
for line in f:
match = pattern.search(line)
if match is not None:
return match
class StartprojectTest(ProjectTest):
def test_startproject(self):
p, out, err = self.proc('startproject', self.project_name)
print(out)
print(err, file=sys.stderr)
self.assertEqual(p.returncode, 0)
assert exists(join(self.proj_path, 'scrapy.cfg'))
assert exists(join(self.proj_path, 'testproject'))
assert exists(join(self.proj_mod_path, '__init__.py'))
assert exists(join(self.proj_mod_path, 'items.py'))
assert exists(join(self.proj_mod_path, 'pipelines.py'))
assert exists(join(self.proj_mod_path, 'settings.py'))
assert exists(join(self.proj_mod_path, 'spiders', '__init__.py'))
self.assertEqual(1, self.call('startproject', self.project_name))
self.assertEqual(1, self.call('startproject', 'wrong---project---name'))
self.assertEqual(1, self.call('startproject', 'sys'))
def test_startproject_with_project_dir(self):
project_dir = mkdtemp()
self.assertEqual(0, self.call('startproject', self.project_name, project_dir))
assert exists(join(abspath(project_dir), 'scrapy.cfg'))
assert exists(join(abspath(project_dir), 'testproject'))
assert exists(join(join(abspath(project_dir), self.project_name), '__init__.py'))
assert exists(join(join(abspath(project_dir), self.project_name), 'items.py'))
assert exists(join(join(abspath(project_dir), self.project_name), 'pipelines.py'))
assert exists(join(join(abspath(project_dir), self.project_name), 'settings.py'))
assert exists(join(join(abspath(project_dir), self.project_name), 'spiders', '__init__.py'))
self.assertEqual(0, self.call('startproject', self.project_name, project_dir + '2'))
self.assertEqual(1, self.call('startproject', self.project_name, project_dir))
self.assertEqual(1, self.call('startproject', self.project_name + '2', project_dir))
self.assertEqual(1, self.call('startproject', 'wrong---project---name'))
self.assertEqual(1, self.call('startproject', 'sys'))
self.assertEqual(2, self.call('startproject'))
self.assertEqual(2, self.call('startproject', self.project_name, project_dir, 'another_params'))
def test_existing_project_dir(self):
project_dir = mkdtemp()
project_name = self.project_name + '_existing'
project_path = os.path.join(project_dir, project_name)
os.mkdir(project_path)
p, out, err = self.proc('startproject', project_name, cwd=project_dir)
print(out)
print(err, file=sys.stderr)
self.assertEqual(p.returncode, 0)
assert exists(join(abspath(project_path), 'scrapy.cfg'))
assert exists(join(abspath(project_path), project_name))
assert exists(join(join(abspath(project_path), project_name), '__init__.py'))
assert exists(join(join(abspath(project_path), project_name), 'items.py'))
assert exists(join(join(abspath(project_path), project_name), 'pipelines.py'))
assert exists(join(join(abspath(project_path), project_name), 'settings.py'))
assert exists(join(join(abspath(project_path), project_name), 'spiders', '__init__.py'))
def get_permissions_dict(path, renamings=None, ignore=None):
def get_permissions(path):
return oct(os.stat(path).st_mode)
renamings = renamings or tuple()
permissions_dict = {
'.': get_permissions(path),
}
for root, dirs, files in os.walk(path):
nodes = list(chain(dirs, files))
if ignore:
ignored_names = ignore(root, nodes)
nodes = [node for node in nodes if node not in ignored_names]
for node in nodes:
absolute_path = os.path.join(root, node)
relative_path = os.path.relpath(absolute_path, path)
for search_string, replacement in renamings:
relative_path = relative_path.replace(
search_string,
replacement
)
permissions = get_permissions(absolute_path)
permissions_dict[relative_path] = permissions
return permissions_dict
class StartprojectTemplatesTest(ProjectTest):
maxDiff = None
def setUp(self):
super().setUp()
self.tmpl = join(self.temp_path, 'templates')
self.tmpl_proj = join(self.tmpl, 'project')
def test_startproject_template_override(self):
copytree(join(scrapy.__path__[0], 'templates'), self.tmpl)
with open(join(self.tmpl_proj, 'root_template'), 'w'):
pass
assert exists(join(self.tmpl_proj, 'root_template'))
args = ['--set', f'TEMPLATES_DIR={self.tmpl}']
p, out, err = self.proc('startproject', self.project_name, *args)
self.assertIn(f"New Scrapy project '{self.project_name}', "
"using template directory", out)
self.assertIn(self.tmpl_proj, out)
assert exists(join(self.proj_path, 'root_template'))
def test_startproject_permissions_from_writable(self):
"""Check that generated files have the right permissions when the
template folder has the same permissions as in the project, i.e.
everything is writable."""
scrapy_path = scrapy.__path__[0]
project_template = os.path.join(scrapy_path, 'templates', 'project')
project_name = 'startproject1'
renamings = (
('module', project_name),
('.tmpl', ''),
)
expected_permissions = get_permissions_dict(
project_template,
renamings,
IGNORE,
)
destination = mkdtemp()
process = subprocess.Popen(
(
sys.executable,
'-m',
'scrapy.cmdline',
'startproject',
project_name,
),
cwd=destination,
env=self.env,
)
process.wait()
project_dir = os.path.join(destination, project_name)
actual_permissions = get_permissions_dict(project_dir)
self.assertEqual(actual_permissions, expected_permissions)
def test_startproject_permissions_from_read_only(self):
"""Check that generated files have the right permissions when the
template folder has been made read-only, which is something that some
systems do.
See https://github.com/scrapy/scrapy/pull/4604
"""
scrapy_path = scrapy.__path__[0]
templates_dir = os.path.join(scrapy_path, 'templates')
project_template = os.path.join(templates_dir, 'project')
project_name = 'startproject2'
renamings = (
('module', project_name),
('.tmpl', ''),
)
expected_permissions = get_permissions_dict(
project_template,
renamings,
IGNORE,
)
def _make_read_only(path):
current_permissions = os.stat(path).st_mode
os.chmod(path, current_permissions & ~ANYONE_WRITE_PERMISSION)
read_only_templates_dir = str(Path(mkdtemp()) / 'templates')
copytree(templates_dir, read_only_templates_dir)
for root, dirs, files in os.walk(read_only_templates_dir):
for node in chain(dirs, files):
_make_read_only(os.path.join(root, node))
destination = mkdtemp()
process = subprocess.Popen(
(
sys.executable,
'-m',
'scrapy.cmdline',
'startproject',
project_name,
'--set',
f'TEMPLATES_DIR={read_only_templates_dir}',
),
cwd=destination,
env=self.env,
)
process.wait()
project_dir = os.path.join(destination, project_name)
actual_permissions = get_permissions_dict(project_dir)
self.assertEqual(actual_permissions, expected_permissions)
def test_startproject_permissions_unchanged_in_destination(self):
"""Check that pre-existing folders and files in the destination folder
do not see their permissions modified."""
scrapy_path = scrapy.__path__[0]
project_template = os.path.join(scrapy_path, 'templates', 'project')
project_name = 'startproject3'
renamings = (
('module', project_name),
('.tmpl', ''),
)
expected_permissions = get_permissions_dict(
project_template,
renamings,
IGNORE,
)
destination = mkdtemp()
project_dir = os.path.join(destination, project_name)
existing_nodes = {
oct(permissions)[2:] + extension: permissions
for extension in ('', '.d')
for permissions in (
0o444, 0o555, 0o644, 0o666, 0o755, 0o777,
)
}
os.mkdir(project_dir)
project_dir_path = Path(project_dir)
for node, permissions in existing_nodes.items():
path = project_dir_path / node
if node.endswith('.d'):
path.mkdir(mode=permissions)
else:
path.touch(mode=permissions)
expected_permissions[node] = oct(path.stat().st_mode)
process = subprocess.Popen(
(
sys.executable,
'-m',
'scrapy.cmdline',
'startproject',
project_name,
'.',
),
cwd=project_dir,
env=self.env,
)
process.wait()
actual_permissions = get_permissions_dict(project_dir)
self.assertEqual(actual_permissions, expected_permissions)
def test_startproject_permissions_umask_022(self):
"""Check that generated files have the right permissions when the
system uses a umask value that causes new files to have different
permissions than those from the template folder."""
@contextmanager
def umask(new_mask):
cur_mask = os.umask(new_mask)
yield
os.umask(cur_mask)
scrapy_path = scrapy.__path__[0]
project_template = os.path.join(
scrapy_path,
'templates',
'project'
)
project_name = 'umaskproject'
renamings = (
('module', project_name),
('.tmpl', ''),
)
expected_permissions = get_permissions_dict(
project_template,
renamings,
IGNORE,
)
with umask(0o002):
destination = mkdtemp()
process = subprocess.Popen(
(
sys.executable,
'-m',
'scrapy.cmdline',
'startproject',
project_name,
),
cwd=destination,
env=self.env,
)
process.wait()
project_dir = os.path.join(destination, project_name)
actual_permissions = get_permissions_dict(project_dir)
self.assertEqual(actual_permissions, expected_permissions)
class CommandTest(ProjectTest):
def setUp(self):
super().setUp()
self.call('startproject', self.project_name)
self.cwd = join(self.temp_path, self.project_name)
self.env['SCRAPY_SETTINGS_MODULE'] = f'{self.project_name}.settings'
class GenspiderCommandTest(CommandTest):
def test_arguments(self):
# only pass one argument. spider script shouldn't be created
self.assertEqual(2, self.call('genspider', 'test_name'))
assert not exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
# pass two arguments <name> <domain>. spider script should be created
self.assertEqual(0, self.call('genspider', 'test_name', 'test.com'))
assert exists(join(self.proj_mod_path, 'spiders', 'test_name.py'))
def test_template(self, tplname='crawl'):
args = [f'--template={tplname}'] if tplname else []
spname = 'test_spider'
spmodule = f"{self.project_name}.spiders.{spname}"
p, out, err = self.proc('genspider', spname, 'test.com', *args)
self.assertIn(f"Created spider {spname!r} using template {tplname!r} in module:{os.linesep} {spmodule}", out)
self.assertTrue(exists(join(self.proj_mod_path, 'spiders', 'test_spider.py')))
modify_time_before = getmtime(join(self.proj_mod_path, 'spiders', 'test_spider.py'))
p, out, err = self.proc('genspider', spname, 'test.com', *args)
self.assertIn(f"Spider {spname!r} already exists in module", out)
modify_time_after = getmtime(join(self.proj_mod_path, 'spiders', 'test_spider.py'))
self.assertEqual(modify_time_after, modify_time_before)
def test_template_basic(self):
self.test_template('basic')
def test_template_csvfeed(self):
self.test_template('csvfeed')
def test_template_xmlfeed(self):
self.test_template('xmlfeed')
def test_list(self):
self.assertEqual(0, self.call('genspider', '--list'))
def test_dump(self):
self.assertEqual(0, self.call('genspider', '--dump=basic'))
self.assertEqual(0, self.call('genspider', '-d', 'basic'))
def test_same_name_as_project(self):
self.assertEqual(2, self.call('genspider', self.project_name))
assert not exists(join(self.proj_mod_path, 'spiders', f'{self.project_name}.py'))
def test_same_filename_as_existing_spider(self, force=False):
file_name = 'example'
file_path = join(self.proj_mod_path, 'spiders', f'{file_name}.py')
self.assertEqual(0, self.call('genspider', file_name, 'example.com'))
assert exists(file_path)
# change name of spider but not its file name
with open(file_path, 'r+') as spider_file:
file_data = spider_file.read()
file_data = file_data.replace("name = \'example\'", "name = \'renamed\'")
spider_file.seek(0)
spider_file.write(file_data)
spider_file.truncate()
modify_time_before = getmtime(file_path)
file_contents_before = file_data
if force:
p, out, err = self.proc('genspider', '--force', file_name, 'example.com')
self.assertIn(f"Created spider {file_name!r} using template \'basic\' in module", out)
modify_time_after = getmtime(file_path)
self.assertNotEqual(modify_time_after, modify_time_before)
file_contents_after = open(file_path, 'r').read()
self.assertNotEqual(file_contents_after, file_contents_before)
else:
p, out, err = self.proc('genspider', file_name, 'example.com')
self.assertIn(f"{file_path} already exists", out)
modify_time_after = getmtime(file_path)
self.assertEqual(modify_time_after, modify_time_before)
file_contents_after = open(file_path, 'r').read()
self.assertEqual(file_contents_after, file_contents_before)
def test_same_filename_as_existing_spider_force(self):
self.test_same_filename_as_existing_spider(force=True)
def test_url(self, url='test.com', domain="test.com"):
self.assertEqual(0, self.call('genspider', '--force', 'test_name', url))
self.assertEqual(domain,
self.find_in_file(join(self.proj_mod_path,
'spiders', 'test_name.py'),
r'allowed_domains\s*=\s*\[\'(.+)\'\]').group(1))
self.assertEqual(f'http://{domain}/',
self.find_in_file(join(self.proj_mod_path,
'spiders', 'test_name.py'),
r'start_urls\s*=\s*\[\'(.+)\'\]').group(1))
def test_url_schema(self):
self.test_url('http://test.com', 'test.com')
def test_url_path(self):
self.test_url('test.com/some/other/page', 'test.com')
def test_url_schema_path(self):
self.test_url('https://test.com/some/other/page', 'test.com')
class GenspiderStandaloneCommandTest(ProjectTest):
def test_generate_standalone_spider(self):
self.call('genspider', 'example', 'example.com')
assert exists(join(self.temp_path, 'example.py'))
def test_same_name_as_existing_file(self, force=False):
file_name = 'example'
file_path = join(self.temp_path, file_name + '.py')
p, out, err = self.proc('genspider', file_name, 'example.com')
self.assertIn(f"Created spider {file_name!r} using template \'basic\' ", out)
assert exists(file_path)
modify_time_before = getmtime(file_path)
file_contents_before = open(file_path, 'r').read()
if force:
# use different template to ensure contents were changed
p, out, err = self.proc('genspider', '--force', '-t', 'crawl', file_name, 'example.com')
self.assertIn(f"Created spider {file_name!r} using template \'crawl\' ", out)
modify_time_after = getmtime(file_path)
self.assertNotEqual(modify_time_after, modify_time_before)
file_contents_after = open(file_path, 'r').read()
self.assertNotEqual(file_contents_after, file_contents_before)
else:
p, out, err = self.proc('genspider', file_name, 'example.com')
self.assertIn(f"{join(self.temp_path, file_name + '.py')} already exists", out)
modify_time_after = getmtime(file_path)
self.assertEqual(modify_time_after, modify_time_before)
file_contents_after = open(file_path, 'r').read()
self.assertEqual(file_contents_after, file_contents_before)
def test_same_name_as_existing_file_force(self):
self.test_same_name_as_existing_file(force=True)
class MiscCommandsTest(CommandTest):
def test_list(self):
self.assertEqual(0, self.call('list'))
class RunSpiderCommandTest(CommandTest):
spider_filename = 'myspider.py'
debug_log_spider = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
def start_requests(self):
self.logger.debug("It Works!")
return []
"""
badspider = """
import scrapy
class BadSpider(scrapy.Spider):
name = "bad"
def start_requests(self):
raise Exception("oops!")
"""
@contextmanager
def _create_file(self, content, name=None):
tmpdir = self.mktemp()
os.mkdir(tmpdir)
if name:
fname = abspath(join(tmpdir, name))
else:
fname = abspath(join(tmpdir, self.spider_filename))
with open(fname, 'w') as f:
f.write(content)
try:
yield fname
finally:
rmtree(tmpdir)
def runspider(self, code, name=None, args=()):
with self._create_file(code, name) as fname:
return self.proc('runspider', fname, *args)
def get_log(self, code, name=None, args=()):
p, stdout, stderr = self.runspider(code, name, args=args)
return stderr
def test_runspider(self):
log = self.get_log(self.debug_log_spider)
self.assertIn("DEBUG: It Works!", log)
self.assertIn("INFO: Spider opened", log)
self.assertIn("INFO: Closing spider (finished)", log)
self.assertIn("INFO: Spider closed (finished)", log)
def test_run_fail_spider(self):
proc, _, _ = self.runspider("import scrapy\n" + inspect.getsource(ExceptionSpider))
ret = proc.returncode
self.assertNotEqual(ret, 0)
def test_run_good_spider(self):
proc, _, _ = self.runspider("import scrapy\n" + inspect.getsource(NoRequestsSpider))
ret = proc.returncode
self.assertEqual(ret, 0)
def test_runspider_log_level(self):
log = self.get_log(self.debug_log_spider,
args=('-s', 'LOG_LEVEL=INFO'))
self.assertNotIn("DEBUG: It Works!", log)
self.assertIn("INFO: Spider opened", log)
def test_runspider_dnscache_disabled(self):
# see https://github.com/scrapy/scrapy/issues/2811
# The spider below should not be able to connect to localhost:12345,
# which is intended,
# but this should not be because of DNS lookup error
# assumption: localhost will resolve in all cases (true?)
dnscache_spider = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
start_urls = ['http://localhost:12345']
def parse(self, response):
return {'test': 'value'}
"""
log = self.get_log(dnscache_spider, args=('-s', 'DNSCACHE_ENABLED=False'))
self.assertNotIn("DNSLookupError", log)
self.assertIn("INFO: Spider opened", log)
def test_runspider_log_short_names(self):
log1 = self.get_log(self.debug_log_spider,
args=('-s', 'LOG_SHORT_NAMES=1'))
self.assertIn("[myspider] DEBUG: It Works!", log1)
self.assertIn("[scrapy]", log1)
self.assertNotIn("[scrapy.core.engine]", log1)
log2 = self.get_log(self.debug_log_spider,
args=('-s', 'LOG_SHORT_NAMES=0'))
self.assertIn("[myspider] DEBUG: It Works!", log2)
self.assertNotIn("[scrapy]", log2)
self.assertIn("[scrapy.core.engine]", log2)
def test_runspider_no_spider_found(self):
log = self.get_log("from scrapy.spiders import Spider\n")
self.assertIn("No spider found in file", log)
def test_runspider_file_not_found(self):
_, _, log = self.proc('runspider', 'some_non_existent_file')
self.assertIn("File not found: some_non_existent_file", log)
def test_runspider_unable_to_load(self):
log = self.get_log('', name='myspider.txt')
self.assertIn('Unable to load', log)
def test_start_requests_errors(self):
log = self.get_log(self.badspider, name='badspider.py')
self.assertIn("start_requests", log)
self.assertIn("badspider.py", log)
def test_asyncio_enabled_true(self):
log = self.get_log(self.debug_log_spider, args=[
'-s', 'TWISTED_REACTOR=twisted.internet.asyncioreactor.AsyncioSelectorReactor'
])
self.assertIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
def test_asyncio_enabled_false(self):
log = self.get_log(self.debug_log_spider, args=[])
self.assertNotIn("Using reactor: twisted.internet.asyncioreactor.AsyncioSelectorReactor", log)
@mark.skipif(sys.implementation.name == 'pypy', reason='uvloop does not support pypy properly')
@mark.skipif(platform.system() == 'Windows', reason='uvloop does not support Windows')
@mark.skipif(twisted_version == Version('twisted', 21, 2, 0), reason='https://twistedmatrix.com/trac/ticket/10106')
def test_custom_asyncio_loop_enabled_true(self):
log = self.get_log(self.debug_log_spider, args=[
'-s',
'TWISTED_REACTOR=twisted.internet.asyncioreactor.AsyncioSelectorReactor',
'-s',
'ASYNCIO_EVENT_LOOP=uvloop.Loop',
])
self.assertIn("Using asyncio event loop: uvloop.Loop", log)
def test_custom_asyncio_loop_enabled_false(self):
log = self.get_log(self.debug_log_spider, args=[
'-s', 'TWISTED_REACTOR=twisted.internet.asyncioreactor.AsyncioSelectorReactor'
])
import asyncio
if sys.platform != 'win32':
loop = asyncio.new_event_loop()
else:
loop = asyncio.SelectorEventLoop()
self.assertIn(f"Using asyncio event loop: {loop.__module__}.{loop.__class__.__name__}", log)
def test_output(self):
spider_code = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
def start_requests(self):
self.logger.debug('FEEDS: {}'.format(self.settings.getdict('FEEDS')))
return []
"""
args = ['-o', 'example.json']
log = self.get_log(spider_code, args=args)
self.assertIn("[myspider] DEBUG: FEEDS: {'example.json': {'format': 'json'}}", log)
def test_overwrite_output(self):
spider_code = """
import json
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
def start_requests(self):
self.logger.debug(
'FEEDS: {}'.format(
json.dumps(self.settings.getdict('FEEDS'), sort_keys=True)
)
)
return []
"""
with open(os.path.join(self.cwd, "example.json"), "w") as f1:
f1.write("not empty")
args = ['-O', 'example.json']
log = self.get_log(spider_code, args=args)
self.assertIn('[myspider] DEBUG: FEEDS: {"example.json": {"format": "json", "overwrite": true}}', log)
with open(os.path.join(self.cwd, "example.json")) as f2:
first_line = f2.readline()
self.assertNotEqual(first_line, "not empty")
def test_output_and_overwrite_output(self):
spider_code = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
def start_requests(self):
return []
"""
args = ['-o', 'example1.json', '-O', 'example2.json']
log = self.get_log(spider_code, args=args)
self.assertIn("error: Please use only one of -o/--output and -O/--overwrite-output", log)
def test_output_stdout(self):
spider_code = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
def start_requests(self):
self.logger.debug('FEEDS: {}'.format(self.settings.getdict('FEEDS')))
return []
"""
args = ['-o', '-:json']
log = self.get_log(spider_code, args=args)
self.assertIn("[myspider] DEBUG: FEEDS: {'stdout:': {'format': 'json'}}", log)
@skipIf(platform.system() != 'Windows', "Windows required for .pyw files")
class WindowsRunSpiderCommandTest(RunSpiderCommandTest):
spider_filename = 'myspider.pyw'
def setUp(self):
super(WindowsRunSpiderCommandTest, self).setUp()
def test_start_requests_errors(self):
log = self.get_log(self.badspider, name='badspider.pyw')
self.assertIn("start_requests", log)
self.assertIn("badspider.pyw", log)
def test_run_good_spider(self):
super().test_run_good_spider()
def test_runspider(self):
super().test_runspider()
def test_runspider_dnscache_disabled(self):
super().test_runspider_dnscache_disabled()
def test_runspider_log_level(self):
super().test_runspider_log_level()
def test_runspider_log_short_names(self):
super().test_runspider_log_short_names()
def test_runspider_no_spider_found(self):
super().test_runspider_no_spider_found()
def test_output(self):
super().test_output()
def test_overwrite_output(self):
super().test_overwrite_output()
def test_runspider_unable_to_load(self):
raise unittest.SkipTest("Already Tested in 'RunSpiderCommandTest' ")
class BenchCommandTest(CommandTest):
def test_run(self):
_, _, log = self.proc('bench', '-s', 'LOGSTATS_INTERVAL=0.001',
'-s', 'CLOSESPIDER_TIMEOUT=0.01')
self.assertIn('INFO: Crawled', log)
self.assertNotIn('Unhandled Error', log)
class ViewCommandTest(CommandTest):
def test_methods(self):
command = view.Command()
command.settings = Settings()
parser = argparse.ArgumentParser(prog='scrapy', prefix_chars='-',
formatter_class=ScrapyHelpFormatter,
conflict_handler='resolve')
command.add_options(parser)
self.assertEqual(command.short_desc(),
"Open URL in browser, as seen by Scrapy")
self.assertIn("URL using the Scrapy downloader and show its",
command.long_desc())
class CrawlCommandTest(CommandTest):
def crawl(self, code, args=()):
fname = abspath(join(self.proj_mod_path, 'spiders', 'myspider.py'))
with open(fname, 'w') as f:
f.write(code)
return self.proc('crawl', 'myspider', *args)
def get_log(self, code, args=()):
_, _, stderr = self.crawl(code, args=args)
return stderr
def test_no_output(self):
spider_code = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
def start_requests(self):
self.logger.debug('It works!')
return []
"""
log = self.get_log(spider_code)
self.assertIn("[myspider] DEBUG: It works!", log)
def test_output(self):
spider_code = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
def start_requests(self):
self.logger.debug('FEEDS: {}'.format(self.settings.getdict('FEEDS')))
return []
"""
args = ['-o', 'example.json']
log = self.get_log(spider_code, args=args)
self.assertIn("[myspider] DEBUG: FEEDS: {'example.json': {'format': 'json'}}", log)
def test_overwrite_output(self):
spider_code = """
import json
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
def start_requests(self):
self.logger.debug(
'FEEDS: {}'.format(
json.dumps(self.settings.getdict('FEEDS'), sort_keys=True)
)
)
return []
"""
with open(os.path.join(self.cwd, "example.json"), "w") as f1:
f1.write("not empty")
args = ['-O', 'example.json']
log = self.get_log(spider_code, args=args)
self.assertIn('[myspider] DEBUG: FEEDS: {"example.json": {"format": "json", "overwrite": true}}', log)
with open(os.path.join(self.cwd, "example.json")) as f2:
first_line = f2.readline()
self.assertNotEqual(first_line, "not empty")
def test_output_and_overwrite_output(self):
spider_code = """
import scrapy
class MySpider(scrapy.Spider):
name = 'myspider'
def start_requests(self):
return []
"""
args = ['-o', 'example1.json', '-O', 'example2.json']
log = self.get_log(spider_code, args=args)
self.assertIn("error: Please use only one of -o/--output and -O/--overwrite-output", log)
class HelpMessageTest(CommandTest):
def setUp(self):
super().setUp()
self.commands = ["parse", "startproject", "view", "crawl", "edit",
"list", "fetch", "settings", "shell", "runspider",
"version", "genspider", "check", "bench"]
def test_help_messages(self):
for command in self.commands:
_, out, _ = self.proc(command, "-h")
self.assertIn("Usage", out)
| 36.771911 | 119 | 0.618471 |
727d6eb490e1b0288fcf975f055084a550b2b513 | 12,079 | py | Python | tests/unit/extractor/test_athena_metadata_extractor.py | Gusto/amundsendatabuilder-1 | d24cba9d51795f908c8325d847e020b3c949f34a | [
"Apache-2.0"
] | 1 | 2020-08-20T16:22:07.000Z | 2020-08-20T16:22:07.000Z | tests/unit/extractor/test_athena_metadata_extractor.py | Gusto/amundsendatabuilder-1 | d24cba9d51795f908c8325d847e020b3c949f34a | [
"Apache-2.0"
] | 2 | 2020-07-20T16:03:49.000Z | 2020-08-14T16:14:10.000Z | tests/unit/extractor/test_athena_metadata_extractor.py | Gusto/amundsendatabuilder-1 | d24cba9d51795f908c8325d847e020b3c949f34a | [
"Apache-2.0"
] | null | null | null | # Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
import logging
import unittest
from mock import patch, MagicMock
from pyhocon import ConfigFactory
from typing import Any, Dict # noqa: F401
from databuilder.extractor.athena_metadata_extractor import AthenaMetadataExtractor
from databuilder.extractor.sql_alchemy_extractor import SQLAlchemyExtractor
from databuilder.models.table_metadata import TableMetadata, ColumnMetadata
class TestAthenaMetadataExtractor(unittest.TestCase):
def setUp(self):
# type: () -> None
logging.basicConfig(level=logging.INFO)
config_dict = {
'extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.CONN_STRING):
'TEST_CONNECTION',
'extractor.athena_metadata.{}'.format(AthenaMetadataExtractor.CATALOG_KEY):
'MY_CATALOG'
}
self.conf = ConfigFactory.from_dict(config_dict)
def test_extraction_with_empty_query_result(self):
# type: () -> None
"""
Test Extraction with empty result from query
"""
with patch.object(SQLAlchemyExtractor, '_get_connection'):
extractor = AthenaMetadataExtractor()
extractor.init(self.conf)
results = extractor.extract()
self.assertEqual(results, None)
def test_extraction_with_single_result(self):
# type: () -> None
with patch.object(SQLAlchemyExtractor, '_get_connection') as mock_connection:
connection = MagicMock()
mock_connection.return_value = connection
sql_execute = MagicMock()
connection.execute = sql_execute
table = {'schema': 'test_schema',
'name': 'test_table',
'description': '',
'cluster': self.conf['extractor.athena_metadata.{}'.format(AthenaMetadataExtractor.CATALOG_KEY)],
}
sql_execute.return_value = [
self._union(
{'col_name': 'col_id1',
'col_type': 'bigint',
'col_description': 'description of id1',
'col_sort_order': 0,
'extras': None}, table),
self._union(
{'col_name': 'col_id2',
'col_type': 'bigint',
'col_description': 'description of id2',
'col_sort_order': 1,
'extras': None}, table),
self._union(
{'col_name': 'is_active',
'col_type': 'boolean',
'col_description': None,
'col_sort_order': 2,
'extras': None}, table),
self._union(
{'col_name': 'source',
'col_type': 'varchar',
'col_description': 'description of source',
'col_sort_order': 3,
'extras': None}, table),
self._union(
{'col_name': 'etl_created_at',
'col_type': 'timestamp',
'col_description': None,
'col_sort_order': 4,
'extras': 'partition key'}, table),
self._union(
{'col_name': 'ds',
'col_type': 'varchar',
'col_description': None,
'col_sort_order': 5,
'extras': None}, table)
]
extractor = AthenaMetadataExtractor()
extractor.init(self.conf)
actual = extractor.extract()
expected = TableMetadata('athena', self.conf['extractor.athena_metadata.{}'.
format(AthenaMetadataExtractor.CATALOG_KEY)], 'test_schema',
'test_table', '',
[ColumnMetadata('col_id1', 'description of id1', 'bigint', 0),
ColumnMetadata('col_id2', 'description of id2', 'bigint', 1),
ColumnMetadata('is_active', None, 'boolean', 2),
ColumnMetadata('source', 'description of source', 'varchar', 3),
ColumnMetadata('etl_created_at', 'partition key', 'timestamp', 4),
ColumnMetadata('ds', None, 'varchar', 5)])
self.assertEqual(expected.__repr__(), actual.__repr__())
self.assertIsNone(extractor.extract())
def test_extraction_with_multiple_result(self):
# type: () -> None
with patch.object(SQLAlchemyExtractor, '_get_connection') as mock_connection:
connection = MagicMock()
mock_connection.return_value = connection
sql_execute = MagicMock()
connection.execute = sql_execute
table = {'schema': 'test_schema1',
'name': 'test_table1',
'description': '',
'cluster': self.conf['extractor.athena_metadata.{}'.format(AthenaMetadataExtractor.CATALOG_KEY)],
}
table1 = {'schema': 'test_schema1',
'name': 'test_table2',
'description': '',
'cluster': self.conf['extractor.athena_metadata.{}'.format(AthenaMetadataExtractor.CATALOG_KEY)],
}
table2 = {'schema': 'test_schema2',
'name': 'test_table3',
'description': '',
'cluster': self.conf['extractor.athena_metadata.{}'.format(AthenaMetadataExtractor.CATALOG_KEY)],
}
sql_execute.return_value = [
self._union(
{'col_name': 'col_id1',
'col_type': 'bigint',
'col_description': 'description of col_id1',
'col_sort_order': 0,
'extras': None}, table),
self._union(
{'col_name': 'col_id2',
'col_type': 'bigint',
'col_description': 'description of col_id2',
'col_sort_order': 1,
'extras': None}, table),
self._union(
{'col_name': 'is_active',
'col_type': 'boolean',
'col_description': None,
'col_sort_order': 2,
'extras': None}, table),
self._union(
{'col_name': 'source',
'col_type': 'varchar',
'col_description': 'description of source',
'col_sort_order': 3,
'extras': None}, table),
self._union(
{'col_name': 'etl_created_at',
'col_type': 'timestamp',
'col_description': '',
'col_sort_order': 4,
'extras': 'partition key'}, table),
self._union(
{'col_name': 'ds',
'col_type': 'varchar',
'col_description': None,
'col_sort_order': 5,
'extras': None}, table),
self._union(
{'col_name': 'col_name',
'col_type': 'varchar',
'col_description': 'description of col_name',
'col_sort_order': 0,
'extras': None}, table1),
self._union(
{'col_name': 'col_name2',
'col_type': 'varchar',
'col_description': 'description of col_name2',
'col_sort_order': 1,
'extras': None}, table1),
self._union(
{'col_name': 'col_id3',
'col_type': 'varchar',
'col_description': 'description of col_id3',
'col_sort_order': 0,
'extras': None}, table2),
self._union(
{'col_name': 'col_name3',
'col_type': 'varchar',
'col_description': 'description of col_name3',
'col_sort_order': 1,
'extras': None}, table2)
]
extractor = AthenaMetadataExtractor()
extractor.init(self.conf)
expected = TableMetadata('athena',
self.conf['extractor.athena_metadata.{}'.format(
AthenaMetadataExtractor.CATALOG_KEY)],
'test_schema1', 'test_table1', '',
[ColumnMetadata('col_id1', 'description of col_id1', 'bigint', 0),
ColumnMetadata('col_id2', 'description of col_id2', 'bigint', 1),
ColumnMetadata('is_active', None, 'boolean', 2),
ColumnMetadata('source', 'description of source', 'varchar', 3),
ColumnMetadata('etl_created_at', 'partition key', 'timestamp', 4),
ColumnMetadata('ds', None, 'varchar', 5)])
self.assertEqual(expected.__repr__(), extractor.extract().__repr__())
expected = TableMetadata('athena',
self.conf['extractor.athena_metadata.{}'.format(
AthenaMetadataExtractor.CATALOG_KEY)],
'test_schema1', 'test_table2', '',
[ColumnMetadata('col_name', 'description of col_name', 'varchar', 0),
ColumnMetadata('col_name2', 'description of col_name2', 'varchar', 1)])
self.assertEqual(expected.__repr__(), extractor.extract().__repr__())
expected = TableMetadata('athena',
self.conf['extractor.athena_metadata.{}'.format(
AthenaMetadataExtractor.CATALOG_KEY)],
'test_schema2', 'test_table3', '',
[ColumnMetadata('col_id3', 'description of col_id3', 'varchar', 0),
ColumnMetadata('col_name3', 'description of col_name3',
'varchar', 1)])
self.assertEqual(expected.__repr__(), extractor.extract().__repr__())
self.assertIsNone(extractor.extract())
self.assertIsNone(extractor.extract())
def _union(self, target, extra):
# type: (Dict[Any, Any], Dict[Any, Any]) -> Dict[Any, Any]
target.update(extra)
return target
class TestAthenaMetadataExtractorWithWhereClause(unittest.TestCase):
def setUp(self):
# type: () -> None
logging.basicConfig(level=logging.INFO)
self.where_clause_suffix = """
where table_schema in ('public') and table_name = 'movies'
"""
config_dict = {
AthenaMetadataExtractor.WHERE_CLAUSE_SUFFIX_KEY: self.where_clause_suffix,
'extractor.sqlalchemy.{}'.format(SQLAlchemyExtractor.CONN_STRING):
'TEST_CONNECTION'
}
self.conf = ConfigFactory.from_dict(config_dict)
def test_sql_statement(self):
# type: () -> None
"""
Test Extraction with empty result from query
"""
with patch.object(SQLAlchemyExtractor, '_get_connection'):
extractor = AthenaMetadataExtractor()
extractor.init(self.conf)
self.assertTrue(self.where_clause_suffix in extractor.sql_stmt)
if __name__ == '__main__':
unittest.main()
| 45.581132 | 119 | 0.488285 |
0725f533061943d937eca76cf9332b7183dcd121 | 1,123 | py | Python | VMEncryption/main/common.py | qingfuwang/azure-linux-extensions | 17b40f2522d53e5ced158b6789ffe6ef75f9791e | [
"Apache-2.0"
] | null | null | null | VMEncryption/main/common.py | qingfuwang/azure-linux-extensions | 17b40f2522d53e5ced158b6789ffe6ef75f9791e | [
"Apache-2.0"
] | null | null | null | VMEncryption/main/common.py | qingfuwang/azure-linux-extensions | 17b40f2522d53e5ced158b6789ffe6ef75f9791e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
#
# VM Backup extension
#
# Copyright 2014 Microsoft Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.7+
#
class CommonVariables:
azure_path = 'main/azure'
utils_path_name = 'Utils'
extension_name = 'VMEncryption'
extension_version = 1.0
extension_type = extension_name
extension_media_link = 'https://andliu.blob.core.windows.net/extensions/' + extension_name + '-' + str(extension_version) + '.zip'
extension_label = 'Windows Azure VMBackup Extension for Linux IaaS'
extension_description = extension_label
| 37.433333 | 135 | 0.727516 |
efff472a170258289ec1a29534bf49634d3598da | 1,070 | py | Python | var/spack/repos/builtin/packages/r-munsell/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2020-10-15T01:08:42.000Z | 2021-10-18T01:28:18.000Z | var/spack/repos/builtin/packages/r-munsell/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2019-07-30T10:12:28.000Z | 2019-12-17T09:02:27.000Z | var/spack/repos/builtin/packages/r-munsell/package.py | adrianjhpc/spack | 0a9e4fcee57911f2db586aa50c8873d9cca8de92 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 5 | 2019-07-30T09:42:14.000Z | 2021-01-25T05:39:20.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RMunsell(RPackage):
"""Provides easy access to, and manipulation of, the Munsell colours.
Provides a mapping between Munsell's original notation (e.g. "5R 5/10") and
hexadecimal strings suitable for use directly in R graphics. Also provides
utilities to explore slices through the Munsell colour tree, to transform
Munsell colours and display colour palettes."""
homepage = "https://cloud.r-project.org/package=munsell"
url = "https://cloud.r-project.org/src/contrib/munsell_0.4.3.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/munsell"
version('0.5.0', sha256='d0f3a9fb30e2b5d411fa61db56d4be5733a2621c0edf017d090bdfa5e377e199')
version('0.4.3', sha256='397c3c90af966f48eebe8f5d9e40c41b17541f0baaa102eec3ea4faae5a2bd49')
depends_on('r-colorspace', type=('build', 'run'))
| 44.583333 | 95 | 0.750467 |
f35787ec3c6f1eaf13935a6b9d8773a21c46ed2d | 1,046 | py | Python | deepxde/metrics.py | wangcj05/deepxde | c246eacdf89682fc84706a427c11fa5e00932198 | [
"Apache-2.0"
] | 8 | 2021-03-21T18:43:52.000Z | 2021-05-26T04:01:53.000Z | deepxde/metrics.py | wangcj05/deepxde | c246eacdf89682fc84706a427c11fa5e00932198 | [
"Apache-2.0"
] | null | null | null | deepxde/metrics.py | wangcj05/deepxde | c246eacdf89682fc84706a427c11fa5e00932198 | [
"Apache-2.0"
] | 1 | 2021-12-11T14:18:17.000Z | 2021-12-11T14:18:17.000Z | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
def accuracy(y_true, y_pred):
return np.mean(np.equal(np.argmax(y_pred, axis=-1), np.argmax(y_true, axis=-1)))
def l2_relative_error(y_true, y_pred):
return np.linalg.norm(y_true - y_pred) / np.linalg.norm(y_true)
def mean_absolute_percentage_error(y_true, y_pred):
return 100 * np.mean(np.abs(y_true - y_pred) / y_true)
def absolute_percentage_error_std(y_true, y_pred):
return 100 * np.std(np.abs(y_true - y_pred) / y_true)
def get(identifier):
metric_identifier = {
"accuracy": accuracy,
"l2 relative error": l2_relative_error,
"MAPE": mean_absolute_percentage_error,
"APE SD": absolute_percentage_error_std,
}
if isinstance(identifier, str):
return metric_identifier[identifier]
elif callable(identifier):
return identifier
else:
raise ValueError("Could not interpret metric function identifier:", identifier)
| 27.526316 | 87 | 0.717973 |
4f0910e04ad8ef8d8a4034083699ab0416e7f422 | 909 | py | Python | app.py | jferdizzle/concretespicy | d68a4ac75f53e181ef7e740232415648beb83291 | [
"MIT"
] | null | null | null | app.py | jferdizzle/concretespicy | d68a4ac75f53e181ef7e740232415648beb83291 | [
"MIT"
] | 2 | 2019-04-13T01:55:38.000Z | 2019-04-15T03:43:59.000Z | app.py | jferdizzle/concretespicy | d68a4ac75f53e181ef7e740232415648beb83291 | [
"MIT"
] | null | null | null | import os
from flask import Flask, render_template, g
from gifs import get_giphy_results
from helpers import divide_chunks
app = Flask(__name__)
@app.route('/', strict_slashes=False)
def index():
gifs = get_giphy_results()
gifs = divide_chunks(gifs, int(len(gifs)/4))
return render_template(
'index.html',
gif_set_0=gifs[0],
gif_set_1=gifs[1],
gif_set_2=gifs[2],
gif_set_3=gifs[3],
nav_link="https://media.giphy.com/media/xUOwG3nVH6Of928xJm/giphy.gif",
nav_path="./map",
)
@app.errorhandler(404)
def page_not_found(page_name):
return index()
@app.route('/map', strict_slashes=False)
def map():
return render_template(
'map.html',
nav_link="https://media.giphy.com/media/TFedSWdWdQnoOJ3YWL/giphy.gif",
nav_path="./",
)
if __name__ == '__main__':
app.run(debug=os.environ.get('DEBUG', False))
| 24.567568 | 78 | 0.657866 |
923a98e9f69159a0da2e92093e05950d37ecec8e | 104 | py | Python | stubs/pydbus/registration.py | trickeydan/pepper2 | 3aba1c74568cd0a04c9178caba26e9238c90e9ba | [
"MIT"
] | null | null | null | stubs/pydbus/registration.py | trickeydan/pepper2 | 3aba1c74568cd0a04c9178caba26e9238c90e9ba | [
"MIT"
] | 22 | 2019-12-22T20:11:24.000Z | 2020-01-18T19:09:11.000Z | stubs/pydbus/registration.py | j5api/pepper2 | 3aba1c74568cd0a04c9178caba26e9238c90e9ba | [
"MIT"
] | null | null | null | """Stubs for pydbus.registration."""
class ObjectRegistration:
def unregister(self) -> None: ...
| 14.857143 | 37 | 0.673077 |
ed8a2679fde3fe415b3ca6bb32be1caece4d17d0 | 664 | py | Python | manage.py | aklauritzen/django_bootcamp | 1020a7e5d51e0de322cd97273ee523633d24147f | [
"MIT"
] | 9 | 2020-11-11T08:22:06.000Z | 2021-03-02T14:34:56.000Z | manage.py | aklauritzen/django_bootcamp | 1020a7e5d51e0de322cd97273ee523633d24147f | [
"MIT"
] | 1 | 2020-11-11T08:14:27.000Z | 2020-11-11T08:14:27.000Z | manage.py | aklauritzen/django_bootcamp | 1020a7e5d51e0de322cd97273ee523633d24147f | [
"MIT"
] | 4 | 2020-11-11T08:05:54.000Z | 2020-11-13T12:24:37.000Z | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'bootcamp.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.869565 | 73 | 0.679217 |
66bea2c89d806d024aeae8831ffd39b04f966af4 | 8,045 | py | Python | py/lib/utils/voc_map.py | zjZSTU/YOLO_v1 | 230c74187216e6e34dcb5a243290467b3b37d9f7 | [
"Apache-2.0"
] | 8 | 2020-08-07T20:17:15.000Z | 2021-11-23T09:47:32.000Z | py/lib/utils/voc_map.py | zjZSTU/YOLO_v1 | 230c74187216e6e34dcb5a243290467b3b37d9f7 | [
"Apache-2.0"
] | 1 | 2021-09-26T03:13:34.000Z | 2021-09-26T03:14:16.000Z | py/lib/utils/voc_map.py | zjZSTU/YOLO_v1 | 230c74187216e6e34dcb5a243290467b3b37d9f7 | [
"Apache-2.0"
] | 2 | 2021-04-28T08:16:19.000Z | 2022-02-28T08:11:49.000Z | # -*- coding: utf-8 -*-
"""
@date: 2020/4/20 下午3:30
@file: voc_map.py
@author: zj
@description: PASCAL VOC版本的mAP计算
"""
import json
import shutil
import os
import glob
import numpy as np
from utils import file
from utils import util
def pretreat(ground_truth_dir, detection_result_dir, tmp_json_dir):
"""
预处理,保证真值边界框文件与预测边界框的文件一一对应,清空临时文件夹
:param ground_truth_dir: 目录,保存真值边界框信息
:param detection_result_dir: 目录,保存预测边界框信息
:param tmp_json_dir: 临时文件夹
"""
gt_list = [os.path.splitext(name)[0] for name in os.listdir(ground_truth_dir)]
dr_list = [os.path.splitext(name)[0] for name in os.listdir(detection_result_dir)]
if len(gt_list) == len(dr_list) and len(gt_list) == np.sum(
[True if name in dr_list else False for name in gt_list]):
pass
else:
util.error('真值边界框文件和预测边界框文件没有一一对应')
if os.path.exists(tmp_json_dir):
# if it exist already
# reset the tmp directory
shutil.rmtree(tmp_json_dir)
os.mkdir(tmp_json_dir)
def compute_tp_fp(cate, dt_list, tmp_json_dir, MIN_OVERLAP=0.5):
# {cate_1: [], cate2: [], ...}
nd = len(dt_list)
tp = [0] * nd # creates an array of zeros of size nd
fp = [0] * nd
# 遍历所有候选预测框,判断TP/FP/FN
for idx, dt_data in enumerate(dt_list):
# {"confidence": "0.999", "file_id": "cucumber_61", "bbox": [16, 42, 225, 163]}
# 读取保存的信息
file_id = dt_data['file_id']
dt_bbox = dt_data['bbox']
confidence = dt_data['confidence']
# 读取对应文件的真值标注框
gt_path = os.path.join(tmp_json_dir, file_id + ".json")
gt_data = json.load(open(gt_path))
# 逐个计算预测边界框和对应类别的真值标注框的IoU,得到其对应最大IoU的真值标注框
ovmax = -1
gt_match = -1
# load detected object bounding-box
for obj in gt_data:
# {"cate": "cucumber", "bbox": [23, 42, 206, 199], "used": true}
# 读取保存的信息
obj_cate = obj['cate']
obj_bbox = obj['bbox']
obj_used = obj['used']
# look for a class_name match
if obj_cate == cate:
bi = [max(dt_bbox[0], obj_bbox[0]), max(dt_bbox[1], obj_bbox[1]),
min(dt_bbox[2], obj_bbox[2]), min(dt_bbox[3], obj_bbox[3])]
iw = bi[2] - bi[0] + 1
ih = bi[3] - bi[1] + 1
if iw > 0 and ih > 0:
# compute overlap (IoU) = area of intersection / area of union
ua = (dt_bbox[2] - dt_bbox[0] + 1) * (dt_bbox[3] - dt_bbox[1] + 1) + \
(obj_bbox[2] - obj_bbox[0] + 1) * (obj_bbox[3] - obj_bbox[1] + 1) \
- iw * ih
ov = iw * ih / ua
if ov > ovmax:
ovmax = ov
gt_match = obj
# 如果大于最小IoU阈值,还需要进一步判断是否为TP
if ovmax >= MIN_OVERLAP:
if not bool(gt_match["used"]):
# true positive
tp[idx] = 1
gt_match["used"] = True
# update the ".json" file
with open(gt_path, 'w') as f:
json.dump(gt_data, f)
else:
# false positive (multiple detection)
fp[idx] = 1
else:
# false positive
fp[idx] = 1
return tp, fp
def compute_precision_recall(tp, fp, gt_per_classes_num):
"""
计算不同阈值下的precision/recall
"""
# compute precision/recall
cumsum = 0
for idx, val in enumerate(fp):
fp[idx] += cumsum
cumsum += val
cumsum = 0
for idx, val in enumerate(tp):
tp[idx] += cumsum
cumsum += val
rec = tp[:]
for idx, val in enumerate(tp):
rec[idx] = float(val) / gt_per_classes_num
# print(rec)
prec = tp[:]
for idx, val in enumerate(tp):
prec[idx] = float(tp[idx]) / (fp[idx] + tp[idx])
return prec, rec
def voc_ap(rec, prec):
"""
--- Official matlab code VOC2012---
mrec=[0 ; rec ; 1];
mpre=[0 ; prec ; 0];
for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
end
i=find(mrec(2:end)~=mrec(1:end-1))+1;
ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
rec.insert(0, 0.0) # insert 0.0 at begining of list
rec.append(1.0) # insert 1.0 at end of list
mrec = rec[:]
prec.insert(0, 0.0) # insert 0.0 at begining of list
prec.append(0.0) # insert 0.0 at end of list
mpre = prec[:]
"""
This part makes the precision monotonically decreasing
(goes from the end to the beginning)
matlab: for i=numel(mpre)-1:-1:1
mpre(i)=max(mpre(i),mpre(i+1));
"""
# matlab indexes start in 1 but python in 0, so I have to do:
# range(start=(len(mpre) - 2), end=0, step=-1)
# also the python function range excludes the end, resulting in:
# range(start=(len(mpre) - 2), end=-1, step=-1)
for i in range(len(mpre) - 2, -1, -1):
mpre[i] = max(mpre[i], mpre[i + 1])
"""
This part creates a list of indexes where the recall changes
matlab: i=find(mrec(2:end)~=mrec(1:end-1))+1;
"""
i_list = []
for i in range(1, len(mrec)):
if mrec[i] != mrec[i - 1]:
i_list.append(i) # if it was matlab would be i + 1
"""
The Average Precision (AP) is the area under the curve
(numerical integration)
matlab: ap=sum((mrec(i)-mrec(i-1)).*mpre(i));
"""
ap = 0.0
for i in i_list:
ap += ((mrec[i] - mrec[i - 1]) * mpre[i])
return ap, mrec, mpre
def voc_ap2(rec, prec, use_07_metric=False):
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
the VOC 07 11-point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_map(ground_truth_dir, detection_result_dir, tmp_json_dir):
pretreat(ground_truth_dir, detection_result_dir, tmp_json_dir)
# 将.txt文件解析成json格式
gt_per_classes_dict = file.parse_ground_truth(ground_truth_dir, tmp_json_dir)
gt_classes = list(gt_per_classes_dict.keys())
# let's sort the classes alphabetically
gt_classes = sorted(gt_classes)
n_classes = len(gt_classes)
# print(gt_classes)
print(gt_per_classes_dict)
dt_per_classes_dict = file.parse_detection_results(detection_result_dir, tmp_json_dir)
MIN_OVERLAP = 0.5
# 计算每个类别的tp/fp
sum_AP = 0.0
for cate in gt_classes:
tp, fp = compute_tp_fp(cate, dt_per_classes_dict[cate], tmp_json_dir, MIN_OVERLAP=MIN_OVERLAP)
prec, rec = compute_precision_recall(tp, fp, gt_per_classes_dict[cate])
# ap, mrec, mprec = voc_ap(rec[:], prec[:])
ap = voc_ap2(rec[:], prec[:])
sum_AP += ap
# class_name + " AP = {0:.2f}%".format(ap*100)
text = "{0:.2f}%".format(ap * 100) + " = " + cate + " AP "
print(text)
mAP = sum_AP / n_classes
text = "mAP = {0:.2f}%\n".format(mAP * 100)
print(text)
if __name__ == '__main__':
ground_truth_dir = '../../data/outputs/targets'
detection_result_dir = '../../data/outputs/preds'
tmp_json_dir = '../../data/outputs/.tmp_files'
voc_map(ground_truth_dir, detection_result_dir, tmp_json_dir)
| 32.439516 | 102 | 0.556122 |
d43c264b23936aed8203d75968e07da95141ae92 | 87,873 | py | Python | src/amuse/rfi/channel.py | GFTwrt/amuse | ff9e1ff6904e191f6b5a2e6f84c078062f553293 | [
"Apache-2.0"
] | null | null | null | src/amuse/rfi/channel.py | GFTwrt/amuse | ff9e1ff6904e191f6b5a2e6f84c078062f553293 | [
"Apache-2.0"
] | null | null | null | src/amuse/rfi/channel.py | GFTwrt/amuse | ff9e1ff6904e191f6b5a2e6f84c078062f553293 | [
"Apache-2.0"
] | null | null | null | import inspect
import numpy
import os.path
import pickle as pickle
import sys
import struct
import threading
import select
import atexit
import time
import socket
import array
import logging
import shlex
logger = logging.getLogger(__name__)
#
# we want to use the automatic initialization and finalization
# of the MPI library, but sometime MPI should not be imported
# when importing the channel
# so actual import is in function ensure_mpi_initialized
#
MPI = None
from subprocess import Popen, PIPE
try:
from amuse import config
except ImportError:
config = None
from amuse.support.options import OptionalAttributes, option, GlobalOptions
from amuse.support.core import late
from amuse.support import exceptions
from amuse.support import get_amuse_root_dir
from amuse.rfi import run_command_redirected
from amuse.rfi import slurm
from . import async_request
class AbstractMessage(object):
def __init__(self,
call_id=0, function_id=-1, call_count=1,
dtype_to_arguments={},
error=False,
big_endian=(sys.byteorder.lower() == 'big'),
polling_interval=0,
encoded_units = ()):
self.polling_interval = polling_interval
# flags
self.big_endian = big_endian
self.error = error
# header
self.call_id = call_id
self.function_id = function_id
self.call_count = call_count
# data (numpy arrays)
self.ints = []
self.longs = []
self.floats = []
self.doubles = []
self.strings = []
self.booleans = []
self.pack_data(dtype_to_arguments)
self.encoded_units = encoded_units
def pack_data(self, dtype_to_arguments):
for dtype, attrname in self.dtype_to_message_attribute():
if dtype in dtype_to_arguments:
array = pack_array(dtype_to_arguments[dtype], self.call_count, dtype)
setattr(self, attrname, array)
def to_result(self, handle_as_array=False):
dtype_to_result = {}
for dtype, attrname in self.dtype_to_message_attribute():
result = getattr(self, attrname)
if self.call_count > 1 or handle_as_array:
dtype_to_result[dtype] = unpack_array(result , self.call_count, dtype)
else:
dtype_to_result[dtype] = result
return dtype_to_result
def dtype_to_message_attribute(self):
return (
('int32', 'ints'),
('int64', 'longs'),
('float32', 'floats'),
('float64', 'doubles'),
('bool', 'booleans'),
('string', 'strings'),
)
def receive(self, comm):
raise NotImplementedError
def send(self, comm):
raise NotImplementedError
def set_error(self, message):
self.strings = [message]
self.error = True
class MPIMessage(AbstractMessage):
def receive(self, comm):
header = self.receive_header(comm)
self.receive_content(comm, header)
def receive_header(self, comm):
header = numpy.zeros(11, dtype='i')
self.mpi_receive(comm, [header, MPI.INT])
return header
def receive_content(self, comm, header):
# 4 flags as 8bit booleans in 1st 4 bytes of header
# endiannes(not supported by MPI channel), error, unused, unused
flags = header.view(dtype='bool8')
self.big_endian = flags[0]
self.error = flags[1]
self.is_continued = flags[2]
self.call_id = header[1]
self.function_id = header[2]
self.call_count = header[3]
number_of_ints = header[4]
number_of_longs = header[5]
number_of_floats = header[6]
number_of_doubles = header[7]
number_of_booleans = header[8]
number_of_strings = header[9]
number_of_units = header[10]
self.ints = self.receive_ints(comm, number_of_ints)
self.longs = self.receive_longs(comm, number_of_longs)
self.floats = self.receive_floats(comm, number_of_floats)
self.doubles = self.receive_doubles(comm, number_of_doubles)
self.booleans = self.receive_booleans(comm, number_of_booleans)
self.strings = self.receive_strings(comm, number_of_strings)
self.encoded_units = self.receive_doubles(comm, number_of_units)
def nonblocking_receive(self, comm):
header = numpy.zeros(11, dtype='i')
request = self.mpi_nonblocking_receive(comm, [header, MPI.INT])
return async_request.ASyncRequest(request, self, comm, header)
def receive_doubles(self, comm, total):
if total > 0:
result = numpy.empty(total, dtype='d')
self.mpi_receive(comm, [result, MPI.DOUBLE])
return result
else:
return []
def receive_ints(self, comm, total):
if total > 0:
result = numpy.empty(total, dtype='i')
self.mpi_receive(comm, [result, MPI.INT])
return result
else:
return []
def receive_longs(self, comm, total):
if total > 0:
result = numpy.empty(total, dtype='int64')
self.mpi_receive(comm, [result, MPI.INTEGER8])
return result
else:
return []
def receive_floats(self, comm, total):
if total > 0:
result = numpy.empty(total, dtype='f')
self.mpi_receive(comm, [result, MPI.FLOAT])
return result
else:
return []
def receive_booleans(self, comm, total):
if total > 0:
result = numpy.empty(total, dtype='b')
self.mpi_receive(comm, [result, MPI.C_BOOL or MPI.BYTE]) # if C_BOOL null datatype (ie undefined) fallback
return numpy.logical_not(result == 0)
else:
return []
def receive_strings(self, comm, total):
if total > 0:
sizes = numpy.empty(total, dtype='i')
self.mpi_receive(comm, [sizes, MPI.INT])
logger.debug("got %d strings of size %s", total, sizes)
byte_size = 0
for size in sizes:
byte_size = byte_size + size + 1
data_bytes = numpy.empty(byte_size, dtype=numpy.uint8)
self.mpi_receive(comm, [data_bytes, MPI.CHARACTER])
strings = []
begin = 0
for size in sizes:
strings.append(data_bytes[begin:begin + size].tostring().decode('latin_1'))
begin = begin + size + 1
logger.debug("got %d strings of size %s, data = %s", total, sizes, strings)
return strings
else:
return []
def send(self, comm):
header = numpy.array([
0,
self.call_id,
self.function_id,
self.call_count,
len(self.ints) ,
len(self.longs) ,
len(self.floats) ,
len(self.doubles) ,
len(self.booleans) ,
len(self.strings) ,
len(self.encoded_units)
], dtype='i')
flags = header.view(dtype='bool8')
flags[0] = self.big_endian
flags[1] = self.error
flags[2] = len(self.encoded_units) > 0
self.send_header(comm, header)
self.send_content(comm)
def send_header(self, comm, header):
self.mpi_send(comm, [header, MPI.INT])
def send_content(self, comm):
self.send_ints(comm, self.ints)
self.send_longs(comm, self.longs)
self.send_floats(comm, self.floats)
self.send_doubles(comm, self.doubles)
self.send_booleans(comm, self.booleans)
self.send_strings(comm, self.strings)
self.send_doubles(comm, self.encoded_units)
def send_ints(self, comm, array):
if len(array) > 0:
sendbuffer = numpy.array(array, dtype='int32')
self.mpi_send(comm, [sendbuffer, MPI.INT])
def send_longs(self, comm, array):
if len(array) > 0:
sendbuffer = numpy.array(array, dtype='int64')
self.mpi_send(comm, [sendbuffer, MPI.INTEGER8])
def send_doubles(self, comm, array):
if len(array) > 0:
sendbuffer = numpy.array(array, dtype='d')
self.mpi_send(comm, [sendbuffer, MPI.DOUBLE])
def send_floats(self, comm, array):
if len(array) > 0:
sendbuffer = numpy.array(array, dtype='f')
self.mpi_send(comm, [sendbuffer, MPI.FLOAT])
def send_strings(self, comm, array):
if len(array) == 0:
return
lengths = numpy.array( [len(s) for s in array] ,dtype='i')
chars=(chr(0).join(array)+chr(0)).encode("utf-8")
chars = numpy.fromstring(chars, dtype='uint8')
if len(chars) != lengths.sum()+len(lengths):
raise Exception("send_strings size mismatch {0} vs {1}".format( len(chars) , lengths.sum()+len(lengths) ))
self.mpi_send(comm, [lengths, MPI.INT])
self.mpi_send(comm, [chars, MPI.CHARACTER])
def send_booleans(self, comm, array):
if len(array) > 0:
sendbuffer = numpy.array(array, dtype='b')
self.mpi_send(comm, [sendbuffer, MPI.C_BOOL or MPI.BYTE])
def set_error(self, message):
self.strings = [message]
self.error = True
def mpi_nonblocking_receive(self, comm, array):
raise NotImplementedError()
def mpi_receive(self, comm, array):
raise NotImplementedError()
def mpi_send(self, comm, array):
raise NotImplementedError()
class ServerSideMPIMessage(MPIMessage):
def mpi_receive(self, comm, array):
request = comm.Irecv(array, source=0, tag=999)
request.Wait()
def mpi_send(self, comm, array):
comm.Bcast(array, root=MPI.ROOT)
def send_header(self, comm, array):
requests = []
for rank in range(comm.Get_remote_size()):
request = comm.Isend(array, dest=rank, tag=989)
requests.append(request)
MPI.Request.Waitall(requests)
def mpi_nonblocking_receive(self, comm, array):
return comm.Irecv(array, source=0, tag=999)
def receive_header(self, comm):
header = numpy.zeros(11, dtype='i')
request = self.mpi_nonblocking_receive(comm, [header, MPI.INT])
if self.polling_interval > 0:
is_finished = request.Test()
while not is_finished:
time.sleep(self.polling_interval / 1000000.)
is_finished = request.Test()
request.Wait()
else:
request.Wait()
return header
class ClientSideMPIMessage(MPIMessage):
def mpi_receive(self, comm, array):
comm.Bcast(array, root=0)
def mpi_send(self, comm, array):
comm.Send(array, dest=0, tag=999)
def mpi_nonblocking_receive(self, comm, array):
return comm.Irecv(array, source=0, tag=999)
def receive_header(self, comm):
header = numpy.zeros(11, dtype='i')
request = comm.Irecv([header, MPI.INT], source=0, tag=989)
if self.polling_interval > 0:
is_finished = request.Test()
while not is_finished:
time.sleep(self.polling_interval / 1000000.)
is_finished = request.Test()
request.Wait()
else:
request.Wait()
return header
MAPPING = {}
def pack_array(array, length, dtype):
if dtype == 'string':
if length == 1 and len(array) > 0 and isinstance(array[0], str):
return array
result = []
for x in array:
if isinstance(x, str):
for _ in range(length):
result.append(x)
elif len(x) == 1 and length > 1:
for _ in range(length):
result.append(x[0])
else:
result.extend(x)
return result
else:
total_length = length * len(array)
if dtype in MAPPING:
result = MAPPING.dtype
if len(result) != total_length:
result = numpy.empty(length * len(array), dtype=dtype)
else:
result = numpy.empty(length * len(array), dtype=dtype)
for i in range(len(array)):
offset = i * length
result[offset:offset + length] = array[i]
return result
def unpack_array(array, length, dtype=None):
result = []
total = len(array) // length
for i in range(total):
offset = i * length
result.append(array[offset:offset + length])
return result
class AbstractMessageChannel(OptionalAttributes):
"""
Abstract base class of all message channel.
A message channel is used to send and retrieve messages from
a remote party. A message channel can also setup the remote
party. For example starting an instance of an application
using MPI calls.
The messages are encoded as arguments to the send and retrieve
methods. Each message has an id and and optional list of doubles,
integers, floats and/or strings.
"""
def __init__(self, **options):
OptionalAttributes.__init__(self, **options)
@classmethod
def GDB(cls, full_name_of_the_worker, channel, interpreter_executable=None):
arguments = ['-hold', '-display', os.environ['DISPLAY'], '-e', 'gdb', '--args']
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = 'xterm'
return command, arguments
@classmethod
def LLDB(cls, full_name_of_the_worker, channel, interpreter_executable=None):
arguments = ['-hold', '-display', os.environ['DISPLAY'], '-e', 'lldb', '--']
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = 'xterm'
return command, arguments
@classmethod
def DDD(cls, full_name_of_the_worker, channel, interpreter_executable=None):
if os.name == 'nt':
arguments = [full_name_of_the_worker, "--args",full_name_of_the_worker]
command = channel.adg_exe
return command, arguments
else:
arguments = ['-display', os.environ['DISPLAY'], '-e', 'ddd', '--args']
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = 'xterm'
return command, arguments
@classmethod
def CUDDD(cls, full_name_of_the_worker, channel, interpreter_executable=None):
if os.name == 'nt':
arguments = [full_name_of_the_worker, "--args",full_name_of_the_worker]
command = channel.adg_exe
return command, arguments
else:
arguments = ['-display', os.environ['DISPLAY'], '-e', 'ddd', '--debugger', 'cuda-gdb', '--args']
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = 'xterm'
return command, arguments
@classmethod
def VALGRIND(cls, full_name_of_the_worker, channel, interpreter_executable=None):
# arguments = ['-hold', '-display', os.environ['DISPLAY'], '-e', 'valgrind', full_name_of_the_worker]
arguments = []
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = 'valgrind'
return command, arguments
@classmethod
def XTERM(cls, full_name_of_the_worker, channel, interpreter_executable=None):
arguments = ['-hold', '-display', os.environ['DISPLAY'], '-e']
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = 'xterm'
return command, arguments
@classmethod
def REDIRECT(cls, full_name_of_the_worker, stdoutname, stderrname, command=None, interpreter_executable=None, **options):
fname = run_command_redirected.__file__
arguments = [fname , stdoutname, stderrname]
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
if command is None :
command = sys.executable
return command, arguments
@classmethod
def GDBR(cls, full_name_of_the_worker, channel, interpreter_executable=None):
"remote gdb, can run without xterm"
arguments = ['localhost:{0}'.format(channel.debugger_port)]
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = channel.gdbserver_exe
return command, arguments
@classmethod
def NODEBUGGER(cls, full_name_of_the_worker, channel, interpreter_executable=None):
if not interpreter_executable is None:
return interpreter_executable, [full_name_of_the_worker]
else:
return full_name_of_the_worker, []
@classmethod
def STRACE(cls, full_name_of_the_worker, channel, interpreter_executable=None):
arguments = ['-ostrace-out', '-ff']
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = 'strace'
return command, arguments
@classmethod
def CUSTOM(cls, full_name_of_the_worker, channel, interpreter_executable=None):
arguments = list(shlex.split(channel.custom_args))
if not interpreter_executable is None:
arguments.append(interpreter_executable)
arguments.append(full_name_of_the_worker)
command = channel.custom_exe
return command, arguments
@classmethod
def is_multithreading_supported(cls):
return True
@option(type='string', sections=("channel",))
def worker_code_suffix(self):
return ''
@option(type='string', sections=("channel",))
def worker_code_prefix(self):
return ''
@option(type='string', sections=("channel",))
def worker_code_directory(self):
return ''
@option(type="boolean", sections=("channel",))
def can_redirect_output(self):
return True
@option(sections=("channel",))
def python_exe_for_redirection(self):
return None
@option(type="int", sections=("channel",))
def debugger_port(self):
return 4343
@option(type="string", sections=("channel",))
def gdbserver_exe(self):
return 'gdbserver'
@option(type="string", sections=("channel",))
def adg_exe(self):
return 'adg.exe'
@option(type="string", sections=("channel",))
def custom_exe(self):
return 'mintty.exe'
@option(type="string", sections=("channel",))
def custom_args(self):
return '--hold -e gdb --args'
@option(type='boolean', sections=("channel",))
def must_check_if_worker_is_up_to_date(self):
return True
@option(type='boolean', sections=("channel",))
def check_worker_location(self):
return True
@option(type="int", sections=("channel",))
def number_of_workers(self):
return 1
def get_amuse_root_directory(self):
return self.amuse_root_dir
@option(type="string", sections=('data',))
def amuse_root_dir(self): # needed for location of data, so same as in support.__init__
return get_amuse_root_dir()
def check_if_worker_is_up_to_date(self, object):
if not self.must_check_if_worker_is_up_to_date:
return
name_of_the_compiled_file = self.full_name_of_the_worker
modificationtime_of_worker = os.stat(name_of_the_compiled_file).st_mtime
my_class = type(object)
for x in dir(my_class):
if x.startswith('__'):
continue
value = getattr(my_class, x)
if hasattr(value, 'crc32'):
is_up_to_date = value.is_compiled_file_up_to_date(modificationtime_of_worker)
if not is_up_to_date:
raise exceptions.CodeException("""The worker code of the '{0}' interface class is not up to date.
Please do a 'make clean; make' in the root directory.
""".format(type(object).__name__))
def get_full_name_of_the_worker(self, type):
if os.path.isabs(self.name_of_the_worker):
full_name_of_the_worker=self.name_of_the_worker
if not self.check_worker_location:
return full_name_of_the_worker
if not os.path.exists(full_name_of_the_worker):
raise exceptions.CodeException("The worker path has been specified, but it is not found: \n{0}".format(full_name_of_the_worker))
if not os.access(full_name_of_the_worker, os.X_OK):
raise exceptions.CodeException("The worker application exists, but it is not executable.\n{0}".format(full_name_of_the_worker))
return full_name_of_the_worker
exe_name = self.worker_code_prefix + self.name_of_the_worker + self.worker_code_suffix
if not self.check_worker_location:
if len(self.worker_code_directory) > 0:
full_name_of_the_worker = os.path.join(self.worker_code_directory, exe_name)
full_name_of_the_worker = os.path.normpath(os.path.abspath(full_name_of_the_worker))
return full_name_of_the_worker
else:
raise Exception("Must provide a worker_code_directory")
tried_workers = []
directory = os.path.dirname(inspect.getfile(type))
full_name_of_the_worker = os.path.join(directory, '..','..','_workers', exe_name)
full_name_of_the_worker = os.path.normpath(os.path.abspath(full_name_of_the_worker))
if os.path.exists(full_name_of_the_worker):
return full_name_of_the_worker
tried_workers.append(full_name_of_the_worker)
if len(self.worker_code_directory) > 0:
full_name_of_the_worker = os.path.join(self.worker_code_directory, exe_name)
full_name_of_the_worker = os.path.normpath(os.path.abspath(full_name_of_the_worker))
if os.path.exists(full_name_of_the_worker):
return full_name_of_the_worker
tried_workers.append(full_name_of_the_worker)
directory_of_this_module = os.path.dirname(os.path.dirname(__file__))
full_name_of_the_worker = os.path.join(directory_of_this_module, '_workers', exe_name)
full_name_of_the_worker = os.path.normpath(os.path.abspath(full_name_of_the_worker))
if os.path.exists(full_name_of_the_worker):
return full_name_of_the_worker
tried_workers.append(full_name_of_the_worker)
current_type = type
while not current_type.__bases__[0] is object:
directory_of_this_module = os.path.dirname(inspect.getfile(current_type))
full_name_of_the_worker = os.path.join(directory_of_this_module, exe_name)
full_name_of_the_worker = os.path.normpath(os.path.abspath(full_name_of_the_worker))
if os.path.exists(full_name_of_the_worker):
return full_name_of_the_worker
tried_workers.append(full_name_of_the_worker)
current_type = current_type.__bases__[0]
raise exceptions.CodeException("The worker application does not exist, it should be at: \n{0}".format('\n'.join(tried_workers)))
def send_message(self, call_id=0, function_id=-1, dtype_to_arguments={}, encoded_units = None):
pass
def recv_message(self, call_id=0, function_id=-1, handle_as_array=False, has_units = False):
pass
def nonblocking_recv_message(self, call_id=0, function_id=-1, handle_as_array=False):
pass
def start(self):
pass
def stop(self):
pass
def is_active(self):
return True
@classmethod
def is_root(self):
return True
def is_polling_supported(self):
return False
def determine_length_from_data(self, dtype_to_arguments):
def get_length(type_and_values):
argument_type, argument_values = type_and_values
if argument_values:
result = 1
for argument_value in argument_values:
try:
if not isinstance(argument_value, str):
result = max(result, len(argument_value))
except:
result = max(result, 1)
return result
lengths = [get_length(x) for x in dtype_to_arguments.items()]
if len(lengths) == 0:
return 1
return max(1, max(lengths))
def split_message(self, call_id, function_id, call_count, dtype_to_arguments, encoded_units = ()):
if call_count<=1:
raise Exception("split message called with call_count<=1")
dtype_to_result = {}
ndone=0
while ndone<call_count:
split_dtype_to_argument = {}
for key, value in dtype_to_arguments.items():
split_dtype_to_argument[key] = \
[tmp[ndone:ndone+self.max_message_length] if hasattr(tmp, '__iter__') else tmp for tmp in value]
self.send_message(
call_id,
function_id,
split_dtype_to_argument,
encoded_units=encoded_units
)
partial_dtype_to_result = self.recv_message(call_id, function_id, True)
for datatype, value in partial_dtype_to_result.items():
if not datatype in dtype_to_result:
dtype_to_result[datatype] = []
for j, element in enumerate(value):
if datatype == 'string':
dtype_to_result[datatype].append([])
else:
dtype_to_result[datatype].append(numpy.zeros((call_count,), dtype=datatype))
for j, element in enumerate(value):
if datatype == 'string':
dtype_to_result[datatype][j].extend(element)
else:
dtype_to_result[datatype][j][ndone:ndone+self.max_message_length] = element
ndone+=self.max_message_length
self._communicated_splitted_message = True
self._merged_results_splitted_message = dtype_to_result
AbstractMessageChannel.DEBUGGERS = {
"none":None,
"gdb":AbstractMessageChannel.GDB,
"lldb":AbstractMessageChannel.LLDB,
"ddd":AbstractMessageChannel.DDD,
"cuddd":AbstractMessageChannel.CUDDD,
"xterm":AbstractMessageChannel.XTERM,
"gdb-remote":AbstractMessageChannel.GDBR,
"valgrind":AbstractMessageChannel.VALGRIND,
"strace":AbstractMessageChannel.STRACE,
"custom":AbstractMessageChannel.CUSTOM
}
# import time
# import ctypes
# clib_library = ctypes.CDLL("libc.so.6")
# memcpy = clib_library.memcpy
# memcpy.argtypes = [ctypes.c_void_p, ctypes.c_void_p, ctypes.c_size_t]
def is_mpd_running():
"""
Determine if the MPD daemon process is running.
Needed for installations of AMUSE in a MPICH2 environment using
the default MPD daemon. The MPD deamon must be
running before the first MPI_COMN_SPAWN call is made.
Returns True for other MPI vendors (OpenMPI)
:returns: Boolean result of check whether MPD daemon is running.
:rtype: bool
>>> is_mpd_running()
True
"""
if not MpiChannel.is_supported():
return True
MpiChannel.ensure_mpi_initialized()
name_of_the_vendor, version = MPI.get_vendor()
if name_of_the_vendor == 'MPICH2':
must_check_mpd = True
if 'AMUSE_MPD_CHECK' in os.environ:
must_check_mpd = os.environ['AMUSE_MPD_CHECK'] == '1'
if 'PMI_PORT' in os.environ:
must_check_mpd = False
if 'PMI_RANK' in os.environ:
must_check_mpd = False
if 'HYDRA_CONTROL_FD' in os.environ:
must_check_mpd = False
if not must_check_mpd:
return True
try:
process = Popen(['mpdtrace'], stdout=PIPE, stderr=PIPE)
(output_string, error_string) = process.communicate()
return not (process.returncode == 255)
except OSError as ex:
return True
else:
return True
class MpiChannel(AbstractMessageChannel):
"""
Message channel based on MPI calls to send and recv the messages
:argument name_of_the_worker: Name of the application to start
:argument number_of_workers: Number of parallel processes
:argument legacy_interface_type: Type of the legacy interface
:argument debug_with_gdb: If True opens an xterm with a gdb to debug the remote process
:argument hostname: Name of the node to run the application on
"""
_mpi_is_broken_after_possible_code_crash = False
_intercomms_to_disconnect = []
_is_registered = False
_scheduler_nodes = []
_scheduler_index = 0
_scheduler_initialized = False
def __init__(self, name_of_the_worker, legacy_interface_type=None, interpreter_executable=None, **options):
AbstractMessageChannel.__init__(self, **options)
self.inuse_semaphore = threading.Semaphore()
# logging.basicConfig(level=logging.WARN)
# logger.setLevel(logging.DEBUG)
# logging.getLogger("code").setLevel(logging.DEBUG)
self.ensure_mpi_initialized()
self.name_of_the_worker = name_of_the_worker
self.interpreter_executable = interpreter_executable
if not legacy_interface_type is None:
self.full_name_of_the_worker = self.get_full_name_of_the_worker(legacy_interface_type)
else:
self.full_name_of_the_worker = self.name_of_the_worker
if self.check_mpi:
if not is_mpd_running():
raise exceptions.CodeException("The mpd daemon is not running, please make sure it is started before starting this code")
if self._mpi_is_broken_after_possible_code_crash:
raise exceptions.CodeException("Another code has crashed, cannot spawn a new code, please stop the script and retry")
if not self.hostname is None:
self.info = MPI.Info.Create()
self.info['host'] = self.hostname
else:
if self.job_scheduler:
self.info = self.get_info_from_job_scheduler(self.job_scheduler)
else:
self.info = MPI.INFO_NULL
self.cached = None
self.intercomm = None
self._is_inuse = False
self._communicated_splitted_message = False
@classmethod
def ensure_mpi_initialized(cls):
global MPI
if MPI is None:
import mpi4py.MPI
MPI = mpi4py.MPI
cls.register_finalize_code()
@classmethod
def is_threaded(cls):
#We want this for backwards compatibility with mpi4py versions < 2.0.0
#currently unused after Init/Init_threaded was removed from
#this module.
from mpi4py import rc
try:
return rc.threaded
except AttributeError:
return rc.threads
@classmethod
def register_finalize_code(cls):
if not cls._is_registered:
atexit.register(cls.finialize_mpi_atexit)
cls._is_registered = True
@classmethod
def finialize_mpi_atexit(cls):
if not MPI.Is_initialized():
return
if MPI.Is_finalized():
return
try:
for x in cls._intercomms_to_disconnect:
x.Disconnect()
except MPI.Exception as ex:
return
@classmethod
def is_multithreading_supported(cls):
return MPI.Query_thread() == MPI.THREAD_MULTIPLE
@option(type="boolean", sections=("channel",))
def check_mpi(self):
return True
@option(type="boolean", sections=("channel",))
def debug_with_gdb(self):
return False
@option(sections=("channel",))
def hostname(self):
return None
@option(choices=AbstractMessageChannel.DEBUGGERS.keys(), sections=("channel",))
def debugger(self):
"""Name of the debugger to use when starting the code"""
return "none"
@option(type="int", sections=("channel",))
def max_message_length(self):
"""
For calls to functions that can handle arrays, MPI messages may get too long for large N.
The MPI channel will split long messages into blocks of size max_message_length.
"""
return 1000000
@late
def redirect_stdout_file(self):
return "/dev/null"
@late
def redirect_stderr_file(self):
return "/dev/null"
@late
def debugger_method(self):
return self.DEBUGGERS[self.debugger]
@classmethod
def is_supported(cls):
if hasattr(config, 'mpi') and hasattr(config.mpi, 'is_enabled'):
if not config.mpi.is_enabled:
return False
try:
from mpi4py import MPI
return True
except ImportError:
return False
@option(type="boolean", sections=("channel",))
def can_redirect_output(self):
name_of_the_vendor, version = MPI.get_vendor()
if name_of_the_vendor == 'MPICH2':
if 'MPISPAWN_ARGV_0' in os.environ:
return False
return True
@option(type="boolean", sections=("channel",))
def must_disconnect_on_stop(self):
name_of_the_vendor, version = MPI.get_vendor()
if name_of_the_vendor == 'MPICH2':
if 'MPISPAWN_ARGV_0' in os.environ:
return False
return True
@option(type="int", sections=("channel",))
def polling_interval_in_milliseconds(self):
return 0
@classmethod
def is_root(cls):
cls.ensure_mpi_initialized()
return MPI.COMM_WORLD.rank == 0
def start(self):
if not self.debugger_method is None:
command, arguments = self.debugger_method(self.full_name_of_the_worker, self, interpreter_executable=self.interpreter_executable)
else:
if not self.can_redirect_output or (self.redirect_stdout_file == 'none' and self.redirect_stderr_file == 'none'):
if self.interpreter_executable is None:
command = self.full_name_of_the_worker
arguments = None
else:
command = self.interpreter_executable
arguments = [self.full_name_of_the_worker]
else:
command, arguments = self.REDIRECT(self.full_name_of_the_worker, self.redirect_stdout_file, self.redirect_stderr_file, command=self.python_exe_for_redirection, interpreter_executable=self.interpreter_executable)
self.intercomm = MPI.COMM_SELF.Spawn(command, arguments, self.number_of_workers, info=self.info)
def stop(self):
if not self.intercomm is None:
try:
if self.must_disconnect_on_stop:
self.intercomm.Disconnect()
else:
self._intercomms_to_disconnect.append(self.intercomm)
except MPI.Exception as ex:
if ex.error_class == MPI.ERR_OTHER:
type(self)._mpi_is_broken_after_possible_code_crash = True
self.intercomm = None
def determine_length_from_datax(self, dtype_to_arguments):
def get_length(x):
if x:
try:
if not isinstance(x[0], str):
return len(x[0])
except:
return 1
return 1
lengths = [get_length(x) for x in dtype_to_arguments.values()]
if len(lengths) == 0:
return 1
return max(1, max(lengths))
def send_message(self, call_id, function_id, dtype_to_arguments={}, encoded_units = ()):
if self.intercomm is None:
raise exceptions.CodeException("You've tried to send a message to a code that is not running")
call_count = self.determine_length_from_data(dtype_to_arguments)
if call_count > self.max_message_length:
self.split_message(call_id, function_id, call_count, dtype_to_arguments, encoded_units)
else:
if self.is_inuse():
raise exceptions.CodeException("You've tried to send a message to a code that is already handling a message, this is not correct")
self.inuse_semaphore.acquire()
try:
if self._is_inuse:
raise exceptions.CodeException("You've tried to send a message to a code that is already handling a message, this is not correct")
self._is_inuse = True
finally:
self.inuse_semaphore.release()
message = ServerSideMPIMessage(
call_id, function_id,
call_count, dtype_to_arguments,
encoded_units = encoded_units
)
message.send(self.intercomm)
def recv_message(self, call_id, function_id, handle_as_array, has_units = False):
if self._communicated_splitted_message:
x = self._merged_results_splitted_message
self._communicated_splitted_message = False
del self._merged_results_splitted_message
return x
message = ServerSideMPIMessage(
polling_interval=self.polling_interval_in_milliseconds * 1000
)
try:
message.receive(self.intercomm)
except MPI.Exception as ex:
self._is_inuse = False
self.stop()
raise ex
self.inuse_semaphore.acquire()
try:
if not self._is_inuse:
raise exceptions.CodeException("You've tried to recv a message to a code that is not handling a message, this is not correct")
self._is_inuse = False
finally:
self.inuse_semaphore.release()
if message.error:
error_message=message.strings[0] if len(message.strings)>0 else "no error message"
if message.call_id != call_id or message.function_id != function_id:
self.stop()
error_message+=" - code probably died, sorry."
raise exceptions.CodeException("Error in code: " + error_message)
if message.call_id != call_id:
self.stop()
raise exceptions.CodeException('Received reply for call id {0} but expected {1}'.format(message.call_id, call_id))
if message.function_id != function_id:
self.stop()
raise exceptions.CodeException('Received reply for function id {0} but expected {1}'.format(message.function_id, function_id))
if has_units:
return message.to_result(handle_as_array), message.encoded_units
else:
return message.to_result(handle_as_array)
def nonblocking_recv_message(self, call_id, function_id, handle_as_array, has_units = False):
request = ServerSideMPIMessage().nonblocking_receive(self.intercomm)
def handle_result(function):
self._is_inuse = False
message = function()
if message.error:
error_message=message.strings[0] if len(message.strings)>0 else "no error message"
if message.call_id != call_id or message.function_id != function_id:
self.stop()
error_message+=" - code probably died, sorry."
raise exceptions.CodeException("Error in (asynchronous) communication with worker: " + error_message)
if message.call_id != call_id:
self.stop()
raise exceptions.CodeException('Received reply for call id {0} but expected {1}'.format(message.call_id, call_id))
if message.function_id != function_id:
self.stop()
raise exceptions.CodeException('Received reply for function id {0} but expected {1}'.format(message.function_id, function_id))
if has_units:
return message.to_result(handle_as_array), message.encoded_units
else:
return message.to_result(handle_as_array)
request.add_result_handler(handle_result)
return request
def is_active(self):
return self.intercomm is not None
def is_inuse(self):
return self._is_inuse
def is_polling_supported(self):
return True
def __getstate__(self):
return {'state':'empty'}
def __setstate__(self, state):
self.info = MPI.INFO_NULL
self.cached = None
self.intercomm = None
self._is_inuse = False
self._communicated_splitted_message = False
self.inuse_semaphore = threading.Semaphore()
@option(sections=("channel",))
def job_scheduler(self):
"""Name of the job scheduler to use when starting the code, if given will use job scheduler to find list of hostnames for spawning"""
return ""
def get_info_from_job_scheduler(self, name, number_of_workers = 1):
if name == "slurm":
return self.get_info_from_slurm(number_of_workers)
return MPI.INFO_NULL
@classmethod
def get_info_from_slurm(cls, number_of_workers):
has_slurm_env_variables = 'SLURM_NODELIST' in os.environ and 'SLURM_TASKS_PER_NODE' in os.environ
if not has_slurm_env_variables:
return MPI.INFO_NULL
if not cls._scheduler_initialized:
nodelist = slurm.parse_slurm_nodelist(os.environ['SLURM_NODELIST'])
tasks_per_node = slurm.parse_slurm_tasks_per_node(os.environ['SLURM_TASKS_PER_NODE'])
all_nodes = []
for node, tasks in zip(nodelist, tasks_per_node):
for _ in range(tasks):
all_nodes.append(node)
cls._scheduler_nodes = all_nodes
cls._scheduler_index = 1 # start at 1 assumes that the python script is running on the first node as the first task
cls._scheduler_initialized = True
print("NODES:", cls._scheduler_nodes)
hostnames = []
count = 0
while count < number_of_workers:
hostnames.append(cls._scheduler_nodes[cls._scheduler_index])
count += 1
cls._scheduler_index += 1
if cls._scheduler_index >= len(cls._scheduler_nodes):
cls._scheduler_index = 0
host = ','.join(hostnames)
print("HOST:", host, cls._scheduler_index, os.environ['SLURM_TASKS_PER_NODE'])
info = MPI.Info.Create()
info['host'] = host #actually in mpich and openmpi, the host parameter is interpreted as a comma separated list of host names,
return info
class MultiprocessingMPIChannel(AbstractMessageChannel):
"""
Message channel based on JSON messages.
The remote party functions as a message forwarder.
Each message is forwarded to a real application using MPI.
This is message channel is a lot slower than the MPI message
channel. But, it is useful during testing with
the MPICH2 nemesis channel. As the tests will run as one
application on one node they will cause oversaturation
of the processor(s) on the node. Each legacy code
will call the MPI_FINALIZE call and this call will wait
for the MPI_FINALIZE call of the main test process. During
this wait it will consume about 10% of the processor power.
To mitigate this problem, we can use objects of this class
instead of the normal MPIChannel. Then, part of the
test is performed in a separate application (at least
as MPI sees it) and this part can be stopped after each
sub-test, thus removing unneeded applications.
"""
def __init__(self, name_of_the_worker, legacy_interface_type=None, interpreter_executable=None, **options):
AbstractMessageChannel.__init__(self, **options)
self.name_of_the_worker = name_of_the_worker
self.interpreter_executable = interpreter_executable
if not legacy_interface_type is None:
self.full_name_of_the_worker = self.get_full_name_of_the_worker(legacy_interface_type)
else:
self.full_name_of_the_worker = self.name_of_the_worker
self.process = None
@option(type="boolean")
def debug_with_gdb(self):
return False
@option
def hostname(self):
return None
def start(self):
name_of_dir = "/tmp/amuse_" + os.getenv('USER')
self.name_of_the_socket, self.server_socket = self._createAServerUNIXSocket(name_of_dir)
environment = os.environ.copy()
if 'PYTHONPATH' in environment:
environment['PYTHONPATH'] = environment['PYTHONPATH'] + ':' + self._extra_path_item(__file__)
else:
environment['PYTHONPATH'] = self._extra_path_item(__file__)
all_options = {}
for x in self.iter_options():
all_options[x.name] = getattr(self, x.name)
template = """from {3} import {4}
o = {1!r}
m = channel.MultiprocessingMPIChannel('{0}',**o)
m.run_mpi_channel('{2}')"""
modulename = type(self).__module__
packagagename, thismodulename = modulename.rsplit('.', 1)
code_string = template.format(
self.full_name_of_the_worker,
all_options,
self.name_of_the_socket,
packagagename,
thismodulename,
)
self.process = Popen([sys.executable, "-c", code_string], env=environment)
self.client_socket, undef = self.server_socket.accept()
def is_active(self):
return self.process is not None
def stop(self):
self._send(self.client_socket, ('stop', (),))
result = self._recv(self.client_socket)
self.process.wait()
self.client_socket.close()
self.server_socket.close()
self._remove_socket(self.name_of_the_socket)
self.process = None
def run_mpi_channel(self, name_of_the_socket):
channel = MpiChannel(self.full_name_of_the_worker, **self._local_options)
channel.start()
socket = self._createAClientUNIXSocket(name_of_the_socket)
try:
is_running = True
while is_running:
message, args = self._recv(socket)
result = None
if message == 'stop':
channel.stop()
is_running = False
if message == 'send_message':
result = channel.send_message(*args)
if message == 'recv_message':
result = channel.recv_message(*args)
self._send(socket, result)
finally:
socket.close()
def send_message(self, call_id=0, function_id=-1, dtype_to_arguments={}, encoded_units = ()):
self._send(self.client_socket, ('send_message', (call_id, function_id, dtype_to_arguments),))
result = self._recv(self.client_socket)
return result
def recv_message(self, call_id=0, function_id=-1, handle_as_array=False, has_units=False):
self._send(self.client_socket, ('recv_message', (call_id, function_id, handle_as_array),))
result = self._recv(self.client_socket)
return result
def _send(self, client_socket, message):
message_string = pickle.dumps(message)
header = struct.pack("i", len(message_string))
client_socket.sendall(header)
client_socket.sendall(message_string)
def _recv(self, client_socket):
header = self._receive_all(client_socket, 4)
length = struct.unpack("i", header)
message_string = self._receive_all(client_socket, length[0])
return pickle.loads(message_string)
def _receive_all(self, client_socket, number_of_bytes):
block_size = 4096
bytes_left = number_of_bytes
blocks = []
while bytes_left > 0:
if bytes_left < block_size:
block_size = bytes_left
block = client_socket.recv(block_size)
blocks.append(block)
bytes_left -= len(block)
return ''.join(blocks)
def _createAServerUNIXSocket(self, name_of_the_directory, name_of_the_socket=None):
import uuid
import socket
if name_of_the_socket == None:
name_of_the_socket = os.path.join(name_of_the_directory, str(uuid.uuid1()))
if not os.path.exists(name_of_the_directory):
os.makedirs(name_of_the_directory)
server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self._remove_socket(name_of_the_socket)
server_socket.bind(name_of_the_socket)
server_socket.listen(5)
return (name_of_the_socket, server_socket)
def _createAClientUNIXSocket(self, name_of_the_socket):
import socket
client_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
# client_socket.settimeout(0)header
client_socket.connect(name_of_the_socket)
return client_socket
def _remove_socket(self, name_of_the_socket):
try:
os.remove(name_of_the_socket)
except OSError:
pass
def _extra_path_item(self, path_of_the_module):
result = ''
for x in sys.path:
if path_of_the_module.startswith(x):
if len(x) > len(result):
result = x
return result
@option(choices=AbstractMessageChannel.DEBUGGERS.keys(), sections=("channel",))
def debugger(self):
"""Name of the debugger to use when starting the code"""
return "none"
@option(type="boolean")
def check_mpi(self):
return True
class SocketMessage(AbstractMessage):
def _receive_all(self, nbytes, thesocket):
# logger.debug("receiving %d bytes", nbytes)
result = []
while nbytes > 0:
chunk = min(nbytes, 10240)
data_bytes = thesocket.recv(chunk)
if len(data_bytes) == 0:
raise exceptions.CodeException("lost connection to code")
result.append(data_bytes)
nbytes -= len(data_bytes)
# logger.debug("got %d bytes, result length = %d", len(data_bytes), len(result))
if len(result) > 0:
return type(result[0])().join(result)
else:
return b""
def receive(self, socket):
# logger.debug("receiving message")
header_bytes = self._receive_all(44, socket)
flags = numpy.frombuffer(header_bytes, dtype="b", count=4, offset=0)
if flags[0] != self.big_endian:
raise exceptions.CodeException("endianness in message does not match native endianness")
if flags[1]:
self.error = True
else:
self.error = False
header = numpy.copy(numpy.frombuffer(header_bytes, dtype="i", offset=0))
# logger.debug("receiving message with flags %s and header %s", flags, header)
# id of this call
self.call_id = header[1]
# function ID
self.function_id = header[2]
# number of calls in this message
self.call_count = header[3]
# number of X's in TOTAL
number_of_ints = header[4]
number_of_longs = header[5]
number_of_floats = header[6]
number_of_doubles = header[7]
number_of_booleans = header[8]
number_of_strings = header[9]
number_of_units = header[10]
self.ints = self.receive_ints(socket, number_of_ints)
self.longs = self.receive_longs(socket, number_of_longs)
self.floats = self.receive_floats(socket, number_of_floats)
self.doubles = self.receive_doubles(socket, number_of_doubles)
self.booleans = self.receive_booleans(socket, number_of_booleans)
self.strings = self.receive_strings(socket, number_of_strings)
self.encoded_units = self.receive_doubles(socket, number_of_units)
# logger.debug("message received")
def receive_ints(self, socket, count):
if count > 0:
nbytes = count * 4 # size of int
data_bytes = self._receive_all(nbytes, socket)
result = numpy.copy(numpy.frombuffer(data_bytes, dtype='int32'))
return result
else:
return []
def receive_longs(self, socket, count):
if count > 0:
nbytes = count * 8 # size of long
data_bytes = self._receive_all(nbytes, socket)
result = numpy.copy(numpy.frombuffer(data_bytes, dtype='int64'))
return result
else:
return []
def receive_floats(self, socket, count):
if count > 0:
nbytes = count * 4 # size of float
data_bytes = self._receive_all(nbytes, socket)
result = numpy.copy(numpy.frombuffer(data_bytes, dtype='f4'))
return result
else:
return []
def receive_doubles(self, socket, count):
if count > 0:
nbytes = count * 8 # size of double
data_bytes = self._receive_all(nbytes, socket)
result = numpy.copy(numpy.frombuffer(data_bytes, dtype='f8'))
return result
else:
return []
def receive_booleans(self, socket, count):
if count > 0:
nbytes = count * 1 # size of boolean/byte
data_bytes = self._receive_all(nbytes, socket)
result = numpy.copy(numpy.frombuffer(data_bytes, dtype='b'))
return result
else:
return []
def receive_strings(self, socket, count):
if count > 0:
lengths = self.receive_ints(socket, count)
total = lengths.sum() + len(lengths)
data_bytes = self._receive_all(total, socket)
strings = []
begin = 0
for size in lengths:
strings.append(data_bytes[begin:begin + size].decode('utf-8'))
begin = begin + size + 1
return strings
else:
return []
def nonblocking_receive(self, socket):
return async_request.ASyncSocketRequest(self, socket)
def send(self, socket):
flags = numpy.array([self.big_endian, self.error, len(self.encoded_units) > 0, False], dtype="b")
header = numpy.array([
self.call_id,
self.function_id,
self.call_count,
len(self.ints),
len(self.longs),
len(self.floats),
len(self.doubles),
len(self.booleans),
len(self.strings),
len(self.encoded_units),
], dtype='i')
# logger.debug("sending message with flags %s and header %s", flags, header)
socket.sendall(flags.tostring())
socket.sendall(header.tostring())
self.send_ints(socket, self.ints)
self.send_longs(socket, self.longs)
self.send_floats(socket, self.floats)
self.send_doubles(socket, self.doubles)
self.send_booleans(socket, self.booleans)
self.send_strings(socket, self.strings)
self.send_doubles(socket, self.encoded_units)
# logger.debug("message send")
def send_doubles(self, socket, array):
if len(array) > 0:
data_buffer = numpy.array(array, dtype='f8')
socket.sendall(data_buffer.tostring())
def send_ints(self, socket, array):
if len(array) > 0:
data_buffer = numpy.array(array, dtype='int32')
socket.sendall(data_buffer.tostring())
def send_floats(self, socket, array):
if len(array) > 0:
data_buffer = numpy.array(array, dtype='f4')
socket.sendall(data_buffer.tostring())
def send_strings(self, socket, array):
if len(array) > 0:
lengths = numpy.array( [len(s) for s in array] ,dtype='int32')
chars=(chr(0).join(array)+chr(0)).encode("utf-8")
if len(chars) != lengths.sum()+len(lengths):
raise Exception("send_strings size mismatch {0} vs {1}".format( len(chars) , lengths.sum()+len(lengths) ))
self.send_ints(socket, lengths)
socket.sendall(chars)
def send_booleans(self, socket, array):
if len(array) > 0:
data_buffer = numpy.array(array, dtype='b')
socket.sendall(data_buffer.tostring())
def send_longs(self, socket, array):
if len(array) > 0:
data_buffer = numpy.array(array, dtype='int64')
socket.sendall(data_buffer.tostring())
class SocketChannel(AbstractMessageChannel):
def __init__(self, name_of_the_worker, legacy_interface_type=None, interpreter_executable=None, **options):
AbstractMessageChannel.__init__(self, **options)
#logging.getLogger().setLevel(logging.DEBUG)
logger.debug("initializing SocketChannel with options %s", options)
# self.name_of_the_worker = name_of_the_worker + "_sockets"
self.name_of_the_worker = name_of_the_worker
self.interpreter_executable = interpreter_executable
if self.hostname != None and self.hostname not in ['localhost',socket.gethostname()]:
raise exceptions.CodeException("can only run codes on local machine using SocketChannel, not on %s", self.hostname)
self.id = 0
if not legacy_interface_type is None:
self.full_name_of_the_worker = self.get_full_name_of_the_worker(legacy_interface_type)
else:
self.full_name_of_the_worker = self.name_of_the_worker
logger.debug("full name of worker is %s", self.full_name_of_the_worker)
self._is_inuse = False
self._communicated_splitted_message = False
self.socket = None
@option(sections=("channel",))
def mpiexec(self):
"""mpiexec with arguments"""
if len(config.mpi.mpiexec):
return config.mpi.mpiexec
return ''
@option(sections=("channel",))
def mpiexec_number_of_workers_flag(self):
"""flag to use, so that the number of workers are defined"""
return '-n'
@late
def debugger_method(self):
return self.DEBUGGERS[self.debugger]
def accept_worker_connection(self, server_socket, process):
#wait for the worker to connect. check if the process is still running once in a while
for i in range(0, 60):
#logger.debug("accepting connection")
try:
server_socket.settimeout(1.0)
return server_socket.accept()
except socket.timeout:
#update and read returncode
if process.poll() is not None:
raise exceptions.CodeException('could not connect to worker, worker process terminated')
#logger.error("worker not connecting, waiting...")
raise exceptions.CodeException('worker still not started after 60 seconds')
def start(self):
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(('', 0))
server_socket.settimeout(1.0)
server_socket.listen(1)
logger.debug("starting socket worker process, listening for worker connection on %s", server_socket.getsockname())
#this option set by CodeInterface
logger.debug("mpi_enabled: %s", str(self.initialize_mpi))
# set arguments to name of the worker, and port number we listen on
self.stdout = None
self.stderr = None
arguments = []
if not self.debugger_method is None:
command, arguments = self.debugger_method(self.full_name_of_the_worker, self, interpreter_executable=self.interpreter_executable)
else:
if self.redirect_stdout_file == 'none' and self.redirect_stderr_file == 'none':
if self.interpreter_executable is None:
command = self.full_name_of_the_worker
arguments = []
else:
command = self.interpreter_executable
arguments = [self.full_name_of_the_worker]
else:
command, arguments = self.REDIRECT(self.full_name_of_the_worker, self.redirect_stdout_file, self.redirect_stderr_file, command=self.python_exe_for_redirection, interpreter_executable=self.interpreter_executable)
#start arguments with command
arguments.insert(0, command)
if self.initialize_mpi and len(self.mpiexec) > 0:
mpiexec = shlex.split(self.mpiexec)
# prepend with mpiexec and arguments back to front
arguments.insert(0, str(self.number_of_workers))
arguments.insert(0, self.mpiexec_number_of_workers_flag)
arguments[:0] = mpiexec
command = mpiexec[0]
#append with port and hostname where the worker should connect
arguments.append(str(server_socket.getsockname()[1]))
#hostname of this machine
arguments.append(str(socket.gethostname()))
#initialize MPI inside worker executable
arguments.append('true')
else:
#append arguments with port and socket where the worker should connect
arguments.append(str(server_socket.getsockname()[1]))
#local machine
arguments.append('localhost')
#do not initialize MPI inside worker executable
arguments.append('false')
logger.debug("starting process with command `%s`, arguments `%s` and environment '%s'", command, arguments, os.environ)
self.process = Popen(arguments, executable=command, stdin=PIPE, stdout=None, stderr=None, close_fds=True)
logger.debug("waiting for connection from worker")
self.socket, address = self.accept_worker_connection(server_socket, self.process)
self.socket.setblocking(1)
self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
server_socket.close()
# logger.debug("got connection from %s", address)
# logger.info("worker %s initialized", self.name_of_the_worker)
@option(choices=AbstractMessageChannel.DEBUGGERS.keys(), sections=("channel",))
def debugger(self):
"""Name of the debugger to use when starting the code"""
return "none"
@option(sections=("channel",))
def hostname(self):
return None
def stop(self):
if (self.socket == None):
return
logger.debug("stopping socket worker %s", self.name_of_the_worker)
self.socket.close()
self.socket = None
# should lookinto using poll with a timeout or some other mechanism
# when debugger method is on, no killing
count = 0
while(count < 5):
returncode = self.process.poll()
if not returncode is None:
break
time.sleep(0.2)
count += 1
if not self.stdout is None:
self.stdout.close()
if not self.stderr is None:
self.stderr.close()
def is_active(self):
return self.socket is not None
def is_inuse(self):
return self._is_inuse
def determine_length_from_datax(self, dtype_to_arguments):
def get_length(type_and_values):
argument_type, argument_values = type_and_values
if argument_values:
result = 1
for argument_value in argument_values:
try:
if not isinstance(argument_value, str):
result = max(result, len(argument_value))
except:
result = max(result, 1)
return result
lengths = [get_length(x) for x in dtype_to_arguments.items()]
if len(lengths) == 0:
return 1
return max(1, max(lengths))
def send_message(self, call_id, function_id, dtype_to_arguments={}, encoded_units = ()):
call_count = self.determine_length_from_data(dtype_to_arguments)
# logger.info("sending message for call id %d, function %d, length %d", id, tag, length)
if self.is_inuse():
raise exceptions.CodeException("You've tried to send a message to a code that is already handling a message, this is not correct")
if self.socket is None:
raise exceptions.CodeException("You've tried to send a message to a code that is not running")
if call_count > self.max_message_length:
self.split_message(call_id, function_id, call_count, dtype_to_arguments, encoded_units)
else:
message = SocketMessage(call_id, function_id, call_count, dtype_to_arguments, encoded_units = encoded_units)
message.send(self.socket)
self._is_inuse = True
def recv_message(self, call_id, function_id, handle_as_array, has_units=False):
self._is_inuse = False
if self._communicated_splitted_message:
x = self._merged_results_splitted_message
self._communicated_splitted_message = False
del self._merged_results_splitted_message
return x
message = SocketMessage()
message.receive(self.socket)
if message.error:
error_message=message.strings[0] if len(message.strings)>0 else "no error message"
if message.call_id != call_id or message.function_id != function_id:
self.stop()
error_message+=" - code probably died, sorry."
raise exceptions.CodeException("Error in code: " + error_message)
if message.call_id != call_id:
self.stop()
raise exceptions.CodeException('Received reply for call id {0} but expected {1}'.format(message.call_id, call_id))
if message.function_id != function_id:
self.stop()
raise exceptions.CodeException('Received reply for function id {0} but expected {1}'.format(message.function_id, function_id))
if has_units:
return message.to_result(handle_as_array), message.encoded_units
else:
return message.to_result(handle_as_array)
def nonblocking_recv_message(self, call_id, function_id, handle_as_array, has_units=False):
request = SocketMessage().nonblocking_receive(self.socket)
def handle_result(function):
self._is_inuse = False
message = function()
if message.error:
error_message=message.strings[0] if len(message.strings)>0 else "no error message"
if message.call_id != call_id or message.function_id != function_id:
self.stop()
error_message+=" - code probably died, sorry."
raise exceptions.CodeException("Error in (asynchronous) communication with worker: " + error_message)
if message.call_id != call_id:
self.stop()
raise exceptions.CodeException('Received reply for call id {0} but expected {1}'.format(message.call_id, call_id))
if message.function_id != function_id:
self.stop()
raise exceptions.CodeException('Received reply for function id {0} but expected {1}'.format(message.function_id, function_id))
if has_units:
return message.to_result(handle_as_array), message.encoded_units
else:
return message.to_result(handle_as_array)
request.add_result_handler(handle_result)
return request
@option(type="int", sections=("channel",))
def max_message_length(self):
"""
For calls to functions that can handle arrays, MPI messages may get too long for large N.
The MPI channel will split long messages into blocks of size max_message_length.
"""
return 1000000
class OutputHandler(threading.Thread):
def __init__(self, stream, port):
threading.Thread.__init__(self)
self.stream = stream
logger.debug("output handler connecting to daemon at %d", port)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
address = ('localhost', port)
try:
self.socket.connect(address)
except:
raise exceptions.CodeException("Could not connect to Distributed Daemon at " + str(address))
self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self.socket.sendall('TYPE_OUTPUT'.encode('utf-8'))
# fetch ID of this connection
result = SocketMessage()
result.receive(self.socket)
self.id = result.strings[0]
logger.debug("output handler successfully connected to daemon at %d", port)
self.daemon = True
self.start()
def run(self):
while True:
# logger.debug("receiving data for output")
data = self.socket.recv(1024)
if len(data) == 0:
# logger.debug("end of output", len(data))
return
# logger.debug("got %d bytes", len(data))
self.stream.write(data)
class DistributedChannel(AbstractMessageChannel):
default_distributed_instance = None
@staticmethod
def getStdoutID(instance):
if not hasattr(instance, "_stdoutHandler") or instance._stdoutHandler is None:
instance._stdoutHandler = OutputHandler(sys.stdout, instance.port)
return instance._stdoutHandler.id
@staticmethod
def getStderrID(instance):
if not hasattr(instance, "_stderrHandler") or instance._stderrHandler is None:
instance._stderrHandler = OutputHandler(sys.stderr, instance.port)
return instance._stderrHandler.id
def __init__(self, name_of_the_worker, legacy_interface_type=None, interpreter_executable=None,
distributed_instance=None, dynamic_python_code=False, **options):
AbstractMessageChannel.__init__(self, **options)
self._is_inuse = False
self._communicated_splitted_message = False
if distributed_instance is None:
if self.default_distributed_instance is None:
raise Exception("No default distributed instance present, and none explicitly passed to code")
self.distributed_instance = self.default_distributed_instance
else:
self.distributed_instance = distributed_instance
#logger.setLevel(logging.DEBUG)
logger.info("initializing DistributedChannel with options %s", options)
self.socket=None
self.name_of_the_worker = name_of_the_worker
self.interpreter_executable = interpreter_executable
self.dynamic_python_code = dynamic_python_code
if self.number_of_workers == 0:
self.number_of_workers = 1
if self.label == None:
self.label = ""
logger.debug("number of workers is %d, number of threads is %s, label is %s", self.number_of_workers, self.number_of_threads, self.label)
self.daemon_host = 'localhost' # Distributed process always running on the local machine
self.daemon_port = self.distributed_instance.port # Port number for the Distributed process
logger.debug("port is %d", self.daemon_port)
self.id = 0
if not legacy_interface_type is None:
# worker specified by type. Figure out where this file is
# mostly (only?) used by dynamic python codes
directory_of_this_module = os.path.dirname(inspect.getfile(legacy_interface_type))
worker_path = os.path.join(directory_of_this_module, self.name_of_the_worker)
self.full_name_of_the_worker = os.path.normpath(os.path.abspath(worker_path))
self.name_of_the_worker = os.path.basename(self.full_name_of_the_worker)
else:
# worker specified by executable (usually already absolute)
self.full_name_of_the_worker = os.path.normpath(os.path.abspath(self.name_of_the_worker))
global_options = GlobalOptions()
self.executable = os.path.relpath(self.full_name_of_the_worker, global_options.amuse_rootdirectory)
self.worker_dir = os.path.dirname(self.full_name_of_the_worker)
logger.debug("executable is %s", self.executable)
logger.debug("full name of the worker is %s", self.full_name_of_the_worker)
logger.debug("worker dir is %s", self.worker_dir)
self._is_inuse = False
def check_if_worker_is_up_to_date(self, object):
# if self.hostname != 'localhost':
# return
#
# logger.debug("hostname = %s, checking for worker", self.hostname)
#
# AbstractMessageChannel.check_if_worker_is_up_to_date(self, object)
pass
def start(self):
logger.debug("connecting to daemon")
# if redirect = none, set output file to console stdout stream ID, otherwise make absolute
if (self.redirect_stdout_file == 'none'):
self.redirect_stdout_file = self.getStdoutID(self.distributed_instance)
else:
self.redirect_stdout_file = os.path.abspath(self.redirect_stdout_file)
# if redirect = none, set error file to console stderr stream ID, otherwise make absolute
if (self.redirect_stderr_file == 'none'):
self.redirect_stderr_file = self.getStderrID(self.distributed_instance)
else:
self.redirect_stderr_file = os.path.abspath(self.redirect_stderr_file)
logger.debug("output send to = " + self.redirect_stdout_file)
logger.debug("error send to = " + self.redirect_stderr_file)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
self.socket.connect((self.daemon_host, self.daemon_port))
except:
self.socket = None
raise exceptions.CodeException("Could not connect to Ibis Daemon at " + str(self.daemon_port))
self.socket.setblocking(1)
self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
self.socket.sendall('TYPE_WORKER'.encode('utf-8'))
arguments = {'string': [self.executable, self.redirect_stdout_file, self.redirect_stderr_file, self.label, self.worker_dir], 'int32': [self.number_of_workers, self.number_of_threads], 'bool': [ self.dynamic_python_code]}
message = SocketMessage(call_id=1, function_id=10101010, call_count=1, dtype_to_arguments=arguments)
message.send(self.socket)
logger.info("waiting for worker %s to be initialized", self.name_of_the_worker)
result = SocketMessage()
result.receive(self.socket)
if result.error:
logger.error("Could not start worker: %s", result.strings[0])
self.stop()
raise exceptions.CodeException("Could not start worker for " + self.name_of_the_worker + ": " + result.strings[0])
self.remote_amuse_dir = result.strings[0]
logger.info("worker %s initialized", self.name_of_the_worker)
logger.info("worker remote amuse dir = %s", self.remote_amuse_dir)
@option(choices=AbstractMessageChannel.DEBUGGERS.keys(), sections=("channel",))
def debugger(self):
"""Name of the debugger to use when starting the code"""
return "none"
def get_amuse_root_directory(self):
return self.remote_amuse_dir
@option(type="int", sections=("channel",))
def number_of_threads(self):
return 0
@option(type="string", sections=("channel",))
def label(self):
return None
def stop(self):
if self.socket is not None:
logger.info("stopping worker %s", self.name_of_the_worker)
self.socket.close()
self.socket = None
def is_active(self):
return self.socket is not None
def is_inuse(self):
return self._is_inuse
def determine_length_from_datax(self, dtype_to_arguments):
def get_length(x):
if x:
try:
if not isinstance(x[0], str):
return len(x[0])
except:
return 1
lengths = [get_length(x) for x in dtype_to_arguments.values()]
if len(lengths) == 0:
return 1
return max(1, max(lengths))
def send_message(self, call_id, function_id, dtype_to_arguments={}, encoded_units = None):
call_count = self.determine_length_from_data(dtype_to_arguments)
logger.debug("sending message for call id %d, function %d, length %d", call_id, function_id, call_count)
if self.is_inuse():
raise exceptions.CodeException("You've tried to send a message to a code that is already handling a message, this is not correct")
if self.socket is None:
raise exceptions.CodeException("You've tried to send a message to a code that is not running")
if call_count > self.max_message_length:
self.split_message(call_id, function_id, call_count, dtype_to_arguments, encoded_units)
else:
message = SocketMessage(call_id, function_id, call_count, dtype_to_arguments, False, False)
message.send(self.socket)
self._is_inuse = True
def recv_message(self, call_id, function_id, handle_as_array, has_units=False):
self._is_inuse = False
if self._communicated_splitted_message:
x = self._merged_results_splitted_message
self._communicated_splitted_message = False
del self._merged_results_splitted_message
return x
message = SocketMessage()
message.receive(self.socket)
if message.error:
error_message=message.strings[0] if len(message.strings)>0 else "no error message"
if message.call_id != call_id or message.function_id != function_id:
#~ self.stop()
error_message+=" - code probably died, sorry."
raise exceptions.CodeException("Error in worker: " + error_message)
if has_units:
return message.to_result(handle_as_array), message.encoded_units
else:
return message.to_result(handle_as_array)
def nonblocking_recv_message(self, call_id, function_id, handle_as_array, has_units=False):
# raise exceptions.CodeException("Nonblocking receive not supported by DistributedChannel")
request = SocketMessage().nonblocking_receive(self.socket)
def handle_result(function):
self._is_inuse = False
message = function()
if message.error:
error_message=message.strings[0] if len(message.strings)>0 else "no error message"
if message.call_id != call_id or message.function_id != function_id:
self.stop()
error_message+=" - code probably died, sorry."
raise exceptions.CodeException("Error in (asynchronous) communication with worker: " + error_message)
if message.call_id != call_id:
self.stop()
raise exceptions.CodeException('Received reply for call id {0} but expected {1}'.format(message.call_id, call_id))
if message.function_id != function_id:
self.stop()
raise exceptions.CodeException('Received reply for function id {0} but expected {1}'.format(message.function_id, function_id))
if has_units:
return message.to_result(handle_as_array), message.encoded_units
else:
return message.to_result(handle_as_array)
request.add_result_handler(handle_result)
return request
@option(type="int", sections=("channel",))
def max_message_length(self):
"""
For calls to functions that can handle arrays, MPI messages may get too long for large N.
The MPI channel will split long messages into blocks of size max_message_length.
"""
return 1000000
class LocalChannel(AbstractMessageChannel):
def __init__(self, name_of_the_worker, legacy_interface_type=None, interpreter_executable=None,
distributed_instance=None, dynamic_python_code=False, **options):
AbstractMessageChannel.__init__(self, **options)
MpiChannel.ensure_mpi_initialized()
if not legacy_interface_type is None:
self.so_module = legacy_interface_type.__so_module__
self.package, _ = legacy_interface_type.__module__.rsplit('.',1)
else:
raise Exception("Need to give the legacy interface type for the local channel")
self.legacy_interface_type = legacy_interface_type
self._is_inuse = False
self.module = None
def check_if_worker_is_up_to_date(self, object):
pass
def start(self):
from . import import_module
from . import python_code
module = import_module.import_unique(self.package + "." + self.so_module)
print(module, self.package + "." + self.so_module)
module.set_comm_world(MPI.COMM_SELF)
self.local_implementation = python_code.CythonImplementation(module, self.legacy_interface_type)
self.module = module
def stop(self):
from . import import_module
import_module.cleanup_module(self.module)
self.module = None
def is_active(self):
return not self.module is None
def is_inuse(self):
return self._is_inuse
def send_message(self, call_id, function_id, dtype_to_arguments={}, encoded_units = None):
call_count = self.determine_length_from_data(dtype_to_arguments)
self.message = LocalMessage(call_id, function_id, call_count, dtype_to_arguments, encoded_units = encoded_units)
self.is_inuse = True
def recv_message(self, call_id, function_id, handle_as_array, has_units=False):
output_message = LocalMessage(call_id, function_id, self.message.call_count)
self.local_implementation.handle_message(self.message, output_message)
if has_units:
return output_message.to_result(handle_as_array),output_message.encoded_units
else:
return output_message.to_result(handle_as_array)
def nonblocking_recv_message(self, call_id, function_id, handle_as_array):
pass
def determine_length_from_datax(self, dtype_to_arguments):
def get_length(x):
if x:
try:
if not isinstance(x[0], str):
return len(x[0])
except:
return 1
return 1
lengths = [get_length(x) for x in dtype_to_arguments.values()]
if len(lengths) == 0:
return 1
return max(1, max(lengths))
def is_polling_supported(self):
return False
class LocalMessage(AbstractMessage):
pass
| 35.866531 | 228 | 0.601277 |
ddfea09e945fafc7e59f74da912b6a098dedbd15 | 5,934 | py | Python | server/src/modules/base/handlers.py | cemeng/go-links | 50ecfadd79213f43c47a23d411475c23a64b39b4 | [
"Apache-2.0"
] | 176 | 2019-07-20T00:16:40.000Z | 2022-03-29T08:44:11.000Z | server/src/modules/base/handlers.py | cemeng/go-links | 50ecfadd79213f43c47a23d411475c23a64b39b4 | [
"Apache-2.0"
] | 45 | 2019-08-18T17:03:57.000Z | 2022-03-21T14:47:43.000Z | server/src/modules/base/handlers.py | cemeng/go-links | 50ecfadd79213f43c47a23d411475c23a64b39b4 | [
"Apache-2.0"
] | 44 | 2019-07-22T07:26:55.000Z | 2022-03-30T19:55:39.000Z | import jwt
import logging
from urllib.parse import urlencode
from flask import Blueprint, Response, abort, redirect, render_template, request, session, url_for
from flask_login import logout_user
from oauth2client.client import flow_from_clientsecrets
from oauth2client.client import FlowExchangeError
from modules.base import authentication, errors
from modules.organizations.utils import get_organization_id_for_email
from shared_helpers.config import get_config, get_path_to_oauth_secrets, get_config_by_key_path
from shared_helpers import utils
LOGIN_METHODS = [{'label': 'Sign in with Google',
'image': '/_images/auth/google_signin_button.png',
'url': '/_/auth/login/google'}
] + (get_config_by_key_path(['authentication', 'methods']) or [])
routes = Blueprint('base', __name__,
template_folder='../../static/templates')
def get_google_login_url(oauth_redirect_uri=None, redirect_to_after_oauth=None):
if not oauth_redirect_uri:
oauth_redirect_uri = '%s%s' % (authentication.get_host_for_request(request),
'/_/auth/oauth2_callback')
if not redirect_to_after_oauth:
redirect_to_after_oauth = 'http://localhost:5007' if request.host.startswith('localhost') else '/'
session['redirect_to_after_oauth'] = str(redirect_to_after_oauth)
# http://oauth2client.readthedocs.io/en/latest/source/oauth2client.client.html
flow = flow_from_clientsecrets(get_path_to_oauth_secrets(),
scope='https://www.googleapis.com/auth/userinfo.email',
redirect_uri=oauth_redirect_uri)
session['oauth_state'] = utils.generate_secret(32)
try:
return str(flow.step1_get_authorize_url(state=session['oauth_state']))
except TypeError:
# TODO: Fix breakage only appearing in tests.
return str(flow.step1_get_authorize_url())
@routes.route('/_/auth/login')
def login():
redirect_to = authentication.get_host_for_request(request)
if request.args.get('redirect_to', None):
redirect_to += request.args.get('redirect_to', None)
error_message = None
if request.args.get('e', None):
error_message = errors.get_error_message_from_code(request.args.get('e', None))
if error_message or len(LOGIN_METHODS) > 1:
return render_template('auth/login_selector.html',
login_methods=LOGIN_METHODS,
redirect_to=urlencode({'redirect_to': redirect_to}),
error_message=error_message)
return redirect(f"/_/auth/login/google?{urlencode({'redirect_to': redirect_to})}")
@routes.route('/_/auth/logout')
def logout():
logout_user()
return redirect('http://localhost:5007/' if request.host.startswith('localhost') else '/')
@routes.route('/_/auth/login/google')
def login_google():
return redirect(get_google_login_url(None, request.args.get('redirect_to', None)))
def login_via_test_token():
# used only for end-to-end tests
if not request.args.get('test_token'):
return False
payload = jwt.decode(request.args.get('test_token'), get_config()['testing']['secret'], 'HS256')
if payload['user_email'].split('@')[1] not in get_config()['testing']['domains']:
raise Exception('Invalid test user %s, with test token: %s' % (payload['user_email'],
request.args.get('test_token')))
authentication.login('test_token', user_email=payload['user_email'])
return True
def _redirect():
if session.get('redirect_to_after_oauth', '').startswith(authentication.get_host_for_request(request) + '/'):
return redirect(session.get('redirect_to_after_oauth'))
return redirect('/')
@routes.route('/_/auth/oauth2_callback')
def oauth2_callback():
try:
if login_via_test_token():
return redirect('/')
except:
return 'error', 500
flow = flow_from_clientsecrets(get_path_to_oauth_secrets(),
scope='https://www.googleapis.com/auth/userinfo.email',
redirect_uri=f'{authentication.get_host_for_request(request)}/_/auth/oauth2_callback')
if not session.get('oauth_state') or session.get('oauth_state') != request.args.get('state'):
return redirect(url_for('base.login'))
try:
credentials = flow.step2_exchange(request.args.get('code'))
except (FlowExchangeError, ValueError) as e:
logging.warning(e)
# user declined to auth; move on
return _redirect()
user_email = authentication.get_user_email(credentials)
if user_email:
authentication.login('google', user_email=user_email)
return _redirect()
@routes.route('/_/auth/jwt')
def login_with_jwt():
token = request.args.get('token')
if not token:
return abort(400)
try:
user_info = jwt.decode(token, get_config()['sessions_secret'], algorithms=['HS256'])
if 'id' in user_info:
authentication.login(user_info['method'], user_id=user_info['id'])
else:
if get_organization_id_for_email(user_info['email']) != user_info['organization']:
logging.warning('Attempt to use JWT with mismatched org: %s', token)
return abort(400)
authentication.login(user_info['method'], user_email=user_info['email'])
except jwt.DecodeError:
logging.warning('Attempt to use invalid JWT: %s', token)
return abort(400)
except jwt.ExpiredSignatureError:
logging.warning('Attempt to use expired JWT: %s', token)
redirect_to = authentication.get_host_for_request(request)
if request.args.get('redirect_to', '').startswith(redirect_to + '/'):
redirect_to = request.args['redirect_to']
return redirect(redirect_to)
@routes.route('/_/opensearch')
def opensearch():
return Response(response=render_template('opensearch/manifest.xml'),
status=200,
mimetype="application/opensearchdescription+xml")
| 34.701754 | 119 | 0.693124 |
1d6418d48a7b4fa1ebcc42fea65e8d3772966a52 | 4,231 | py | Python | dl/tests/test_model.py | sarkarchandan/nn-binary-classification | c8a0a865d1775d0d988d3cad52ab0ede5be7d03f | [
"MIT"
] | null | null | null | dl/tests/test_model.py | sarkarchandan/nn-binary-classification | c8a0a865d1775d0d988d3cad52ab0ede5be7d03f | [
"MIT"
] | null | null | null | dl/tests/test_model.py | sarkarchandan/nn-binary-classification | c8a0a865d1775d0d988d3cad52ab0ede5be7d03f | [
"MIT"
] | null | null | null | # pylint: disable=missing-module-docstring
# pylint: disable=missing-class-docstring
# pylint: disable=missing-function-docstring
import unittest
from typing import List, Tuple
from unittest import mock
from pathlib import Path
import numpy as np
from dl.data.utils import DataSource, random_split, generate_sample_data
from ..utils import fit
from ..nn import Model, Sequential, Dense
from ..act import TanH, Sigmoid, ReLU
from ..opt import Adam
from ..loss import BinaryCrossEntropy
np.seterr(all='raise')
class ModelTestCase(unittest.TestCase):
t_model: Model
t_data: np.ndarray
t_labels: np.ndarray
t_final_act: np.ndarray
@classmethod
def setUpClass(cls) -> None:
np.random.seed(1)
cls.t_data = np.random.randn(10, 256)
cls.t_labels = np.random.randn(1, 256)
cls.t_model = Sequential(
Dense(in_features=cls.t_data.shape[0], out_features=5, act_fn=TanH),
Dense(in_features=5, out_features=1, act_fn=Sigmoid),
)
def _compute_expected_activation(self) -> np.ndarray:
layer1_weight = self.t_model.layers()[0].weight
layer1_bias = self.t_model.layers()[0].bias
layer2_weight = self.t_model.layers()[1].weight
layer2_bias = self.t_model.layers()[1].bias
layer1_act = TanH(layer1_weight @ self.t_data + layer1_bias).value
layer2_act = Sigmoid(layer2_weight @ layer1_act + layer2_bias).value
return layer2_act
def _compute_expected_prediction(self) -> np.ndarray:
out_act = self._compute_expected_activation()
out_act[out_act > 0.5] = 1
out_act[out_act <= 0.5] = 0
return out_act
def test_model_forward(self) -> None:
e_act_value = self._compute_expected_activation()
t_act_value = self.t_model.forward(self.t_data)
np.testing.assert_equal(e_act_value, t_act_value)
@mock.patch.object(Dense, 'backward')
def test_model_backward(self, mock_backward: mock.Mock) -> None:
_ = self.t_model.forward(data=self.t_data)
self.t_model.backward(batch=(self.t_data, self.t_labels), rp=0.)
mock_backward.assert_called()
self.assertTrue(mock_backward.call_count, 2)
def test_model_parameters_before_back_propagation(self) -> None:
e_state = {
'W1': self.t_model.layers()[0].weight,
'b1': self.t_model.layers()[0].bias,
'W2': self.t_model.layers()[1].weight,
'b2': self.t_model.layers()[1].bias
}
t_state = self.t_model.parameters()
self.assertTrue(e_state == t_state)
def test_model_predict(self) -> None:
e_predicted = self._compute_expected_prediction()
t_predicted = self.t_model.predict(data=self.t_data, tp=0.5)
np.testing.assert_equal(e_predicted, t_predicted)
def test_model_save_load(self) -> None:
t_data, t_labels = generate_sample_data(n_samples=400)
datasets: List[Tuple[np.ndarray, np.ndarray]] = random_split(
dataset=(t_data, t_labels),
dist=[300, 50, 50])
trn_set, dev_set, test_set = datasets[0], datasets[1], datasets[2]
train_ds = DataSource(dataset=trn_set, batch_size=64)
dev_ds = DataSource(dataset=dev_set, batch_size=50)
t_model = Sequential(
Dense(in_features=t_data.shape[0], out_features=5, act_fn=ReLU),
Dense(in_features=5, out_features=2, act_fn=ReLU),
Dense(in_features=2, out_features=1, act_fn=Sigmoid)
)
epochs: int = 500
lr: float = 0.01
rp: float = 0.
_ = fit(epochs=epochs, lr=lr, rp=rp, model=t_model, train_ds=train_ds,
dev_ds=dev_ds,
loss_fn=BinaryCrossEntropy, opt_alg=Adam)
t_predicted = t_model.predict(data=test_set[0], tp=0.5)
model_path: str = Path().absolute().joinpath(
'dl/tests/res/s_model').__str__()
t_model.save(model_path)
e_model = Sequential.load(model_path)
self.assertTrue(isinstance(e_model, Sequential))
e_predicted = e_model.predict(data=test_set[0], tp=0.5)
np.testing.assert_equal(t_predicted, e_predicted)
if __name__ == '__main__':
unittest.main()
| 38.463636 | 80 | 0.662018 |
77101046114a4523b6679230a88e62995132ea31 | 2,248 | py | Python | linen_examples/mnist/mnist_benchmark.py | wrzadkow/flax | 45937af8379b1f653a7e1e187cc923d0a94585eb | [
"Apache-2.0"
] | null | null | null | linen_examples/mnist/mnist_benchmark.py | wrzadkow/flax | 45937af8379b1f653a7e1e187cc923d0a94585eb | [
"Apache-2.0"
] | null | null | null | linen_examples/mnist/mnist_benchmark.py | wrzadkow/flax | 45937af8379b1f653a7e1e187cc923d0a94585eb | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark for the MNIST example."""
import time
from absl import flags
from absl.testing import absltest
from absl.testing.flagsaver import flagsaver
import jax
import numpy as np
from flax.testing import Benchmark
import mnist_main
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
FLAGS = flags.FLAGS
class MnistBenchmark(Benchmark):
"""Benchmarks for the MNIST Flax example."""
@flagsaver
def test_cpu(self):
"""Run full training for MNIST CPU training."""
model_dir = self.get_tmp_model_dir()
FLAGS.model_dir = model_dir
start_time = time.time()
mnist_main.main([])
benchmark_time = time.time() - start_time
summaries = self.read_summaries(model_dir)
# Summaries contain all the information necessary for the regression
# metrics.
wall_time, _, eval_accuracy = zip(*summaries['eval_accuracy'])
wall_time = np.array(wall_time)
sec_per_epoch = np.mean(wall_time[1:] - wall_time[:-1])
end_eval_accuracy = eval_accuracy[-1]
# Assertions are deferred until the test finishes, so the metrics are
# always reported and benchmark success is determined based on *all*
# assertions.
self.assertBetween(end_eval_accuracy, 0.98, 1.0)
# Use the reporting API to report single or multiple metrics/extras.
self.report_wall_time(benchmark_time)
self.report_metrics({
'sec_per_epoch': sec_per_epoch,
'accuracy': end_eval_accuracy,
})
self.report_extras({
'model_name': 'MNIST',
'description': 'CPU test for MNIST.',
'implementation': 'linen',
})
if __name__ == '__main__':
absltest.main()
| 29.973333 | 74 | 0.722865 |
a8937bed1351408feed6f1dc5b150ca99fe6103b | 1,092 | py | Python | python/interpret-core/interpret/test/test_mli_interop.py | prateekiiest/interpret | b5530a587251a77516ab443037fc37f71708564c | [
"MIT"
] | 2,674 | 2019-10-03T14:14:35.000Z | 2022-03-31T13:40:49.000Z | python/interpret-core/interpret/test/test_mli_interop.py | prateekiiest/interpret | b5530a587251a77516ab443037fc37f71708564c | [
"MIT"
] | 257 | 2019-11-08T19:22:56.000Z | 2022-03-29T20:09:07.000Z | python/interpret-core/interpret/test/test_mli_interop.py | prateekiiest/interpret | b5530a587251a77516ab443037fc37f71708564c | [
"MIT"
] | 367 | 2019-10-31T15:33:21.000Z | 2022-03-31T13:40:50.000Z | # Copyright (c) 2019 Microsoft Corporation
# Distributed under the MIT software license
from ..glassbox import LogisticRegression
from .utils import synthetic_classification
# TODO: Harden these tests later to check content from data method.
def test_mli_visualize_interop():
data = synthetic_classification()
lr = LogisticRegression()
lr.fit(data["train"]["X"], data["train"]["y"])
global_exp = lr.explain_global()
assert "mli" in global_exp.data(-1)
global_overall_viz = global_exp.visualize()
assert global_overall_viz is not None
global_specific_viz = global_exp.visualize(0)
assert global_specific_viz is not None
mli_global_specific_viz = global_exp.visualize(("mli", 0))
assert mli_global_specific_viz is not None
local_exp = lr.explain_local(data["test"]["X"].head(), data["test"]["y"].head())
assert "mli" in local_exp.data(-1)
local_specific_viz = local_exp.visualize(0)
assert local_specific_viz is not None
mli_local_specific_viz = local_exp.visualize(("mli", 0))
assert mli_local_specific_viz is not None
| 36.4 | 84 | 0.738095 |
fd196820cf43002799a69222c6688bd962f5f9fd | 763 | py | Python | igssoftwaremanagement/urlpatterns/urls.py | srcsoftwareengineer/test4interview-software-management | a406acf1b828266aaa9f9c383deea4983c27b61b | [
"MIT"
] | 1 | 2021-11-09T16:42:57.000Z | 2021-11-09T16:42:57.000Z | igssoftwaremanagement/urlpatterns/urls.py | srcsoftwareengineer/test4interview-software-management | a406acf1b828266aaa9f9c383deea4983c27b61b | [
"MIT"
] | null | null | null | igssoftwaremanagement/urlpatterns/urls.py | srcsoftwareengineer/test4interview-software-management | a406acf1b828266aaa9f9c383deea4983c27b61b | [
"MIT"
] | 1 | 2021-11-09T15:02:01.000Z | 2021-11-09T15:02:01.000Z | """igssoftwaremanagement URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| 34.681818 | 77 | 0.714286 |
27e37561206b204bb875017607ff6e74686235ef | 21,994 | py | Python | base/views/estadisticas_views.py | francofgp/Syndeo | 888a1001f2cbb2ff8b7247e84a2899dcbd08af80 | [
"MIT"
] | 3 | 2022-01-04T17:38:04.000Z | 2022-01-05T12:45:22.000Z | base/views/estadisticas_views.py | francofgp/Syndeo | 888a1001f2cbb2ff8b7247e84a2899dcbd08af80 | [
"MIT"
] | null | null | null | base/views/estadisticas_views.py | francofgp/Syndeo | 888a1001f2cbb2ff8b7247e84a2899dcbd08af80 | [
"MIT"
] | null | null | null | import datetime
from datetime import datetime
from sqlite3 import Date
from django.shortcuts import render
from django.http import JsonResponse
# esto sirve para hashear la pass
from django.contrib.auth.hashers import make_password
from ..serializers import PalabrasSerializer
from ..models import Account, Idioma, Texto
from ..models import Palabra
from rest_framework_simplejwt.serializers import TokenObtainPairSerializer
from rest_framework_simplejwt.views import TokenObtainPairView
from rest_framework.decorators import api_view, permission_classes
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated, IsAdminUser
from rest_framework import status
from random import shuffle
from django.db.models import Count
from django.db.models.functions import TruncDay, Concat
from datetime import timedelta
from django.utils import timezone
from django.db.models import Sum
User = Account
# En react
"""
X = estadistica.map((o)=>Object.keys(o)[0])
Y = estadistica.map((o)=>Object.values(o)[0])
"""
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getCantidadDePalabrasRepasadasUltimaSemana(request):
user=request.user
dia_hace_una_semana = timezone.now().date() - timedelta(days=7)
fechaModificacion=[]
# Cantidad de palabras repasadas en la ultima semana
#query = Palabra.historial.filter(usuario=user,fechaUltimoRepaso__gte=dia_hace_una_semana
# ).annotate(date=TruncDay('fechaUltimoRepaso')
# ).values("date").annotate(created_count=Count('id')).order_by("-date")
# con historial
query = Palabra.historial.filter(usuario=user,fechaUltimoRepaso__gte=dia_hace_una_semana
).annotate(date=TruncDay('fechaUltimoRepaso')
).values("date").annotate(created_count=Count(Concat('cantidadRepasos', 'palabra'),distinct=True)).order_by("-date")
#query = query.annotate(date=TruncDay('fechaUltimoRepaso')
# ).values("date").annotate(created_count=Count('id')).order_by("-date")
# Voy a armar el diccionario
dias = []
for i in range(7,-1,-1):
dias.append(str(timezone.now().date() - timedelta(days=i)))
estadistica = []
for dia in dias:
#Busco la fecha en la query
existeFecha=False
for q in query:
if str(q["date"])==dia:
existeFecha=True
if existeFecha:
#consigo el valor
valor =0
for elemento in query:
if str(elemento["date"])==(dia):
valor=elemento["created_count"]
break
estadistica.append({dia:valor})
else:
estadistica.append({dia:0})
try:
return Response({'estadistica':estadistica})
except Exception as e:
print(e)
return Response({'estadistica':[]})
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getCantidadDePalabrasRepasadasUltimoMes(request):
user=request.user
dia_hace_una_semana = timezone.now().date() - timedelta(days=30)
# Cantidad de palabras repasadas en la ultima semana
# con historial
query = Palabra.historial.filter(usuario=user,fechaUltimoRepaso__gte=dia_hace_una_semana
).annotate(date=TruncDay('fechaUltimoRepaso')
).values("date").annotate(created_count=Count(Concat('cantidadRepasos', 'palabra'),distinct=True)).order_by("-date")
#query = Palabra.objects.filter(usuario=user,fechaUltimoRepaso__gte=dia_hace_una_semana
# ).annotate(date=TruncDay('fechaUltimoRepaso')
# ).values("date").annotate(created_count=Count('id')).order_by("-date")
# Voy a armar el diccionario
dias = []
for i in range(30,-1,-1):
dias.append(str(timezone.now().date() - timedelta(days=i)))
estadistica = []
for dia in dias:
#Busco la fecha en la query
existeFecha=False
for q in query:
if str(q["date"])==dia:
existeFecha=True
if existeFecha:
#consigo el valor
valor =0
for elemento in query:
if str(elemento["date"])==(dia):
valor=elemento["created_count"]
break
estadistica.append({dia:valor})
else:
estadistica.append({dia:0})
try:
return Response({'estadistica':estadistica})
except Exception as e:
print(e)
return Response({'estadistica':[]})
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getCantidadDePalabrasRepasadasUltimaSemanaPorIdioma(request):
idiomaId=request.query_params.get('idiomaId')
idioma = Idioma.objects.get(id=idiomaId)
user=request.user
dia_hace_una_semana = timezone.now().date() - timedelta(days=7)
# con historial
query = Palabra.historial.filter(usuario=user,idioma=idioma,fechaUltimoRepaso__gte=dia_hace_una_semana
).annotate(date=TruncDay('fechaUltimoRepaso')
).values("date").annotate(created_count=Count(Concat('cantidadRepasos', 'palabra'),distinct=True)).order_by("-date")
""" for item in Palabra.historial.filter(usuario=user,idioma=idioma,fechaUltimoRepaso__gte=dia_hace_una_semana
).annotate(date=TruncDay('fechaUltimoRepaso'),repasos=Count(Concat('cantidadRepasos', 'palabra'), distinct=True))\
.values( "date", "palabra", 'repasos',"cantidadRepasos").order_by("-date" ,"palabra","repasos","cantidadRepasos"
):
print(item) """
# Cantidad de palabras repasadas en la ultima semana
#query = Palabra.objects.filter(usuario=user,idioma=idioma,fechaUltimoRepaso__gte=dia_hace_una_semana
# ).annotate(date=TruncDay('fechaUltimoRepaso')
# ).values("date").annotate(created_count=Count('id')).order_by("-date")
# Voy a armar el diccionario
dias = []
for i in range(7,-1,-1):
dias.append(str(timezone.now().date() - timedelta(days=i)))
estadistica = []
for dia in dias:
#Busco la fecha en la query
existeFecha=False
for q in query:
if str(q["date"])==dia:
existeFecha=True
if existeFecha:
#consigo el valor
valor =0
for elemento in query:
if str(elemento["date"])==(dia):
valor=elemento["created_count"]
break
estadistica.append({dia:valor})
else:
estadistica.append({dia:0})
try:
return Response({'estadistica':estadistica})
except Exception as e:
print(e)
return Response({'estadistica':[]})
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getCantidadDePalabrasRepasadasUltimoMesPorIdioma(request):
user=request.user
idiomaId=request.query_params.get('idiomaId')
idioma = Idioma.objects.get(id=idiomaId)
dia_hace_una_semana = timezone.now().date() - timedelta(days=30)
# con historial
query = Palabra.historial.filter(usuario=user,idioma=idioma,fechaUltimoRepaso__gte=dia_hace_una_semana
).annotate(date=TruncDay('fechaUltimoRepaso')
).values("date").annotate(created_count=Count(Concat('cantidadRepasos', 'palabra'),distinct=True)).order_by("-date")
# Cantidad de palabras repasadas en la ultima semana
#query = Palabra.objects.filter(usuario=user,idioma=idioma,fechaUltimoRepaso__gte=dia_hace_una_semana
# ).annotate(date=TruncDay('fechaUltimoRepaso')
# ).values("date").annotate(created_count=Count('id')).order_by("-date")
# Voy a armar el diccionario
dias = []
for i in range(30,-1,-1):
dias.append(str(timezone.now().date() - timedelta(days=i)))
estadistica = []
for dia in dias:
#Busco la fecha en la query
existeFecha=False
for q in query:
if str(q["date"])==dia:
existeFecha=True
if existeFecha:
#consigo el valor
valor =0
for elemento in query:
if str(elemento["date"])==(dia):
valor=elemento["created_count"]
break
estadistica.append({dia:valor})
else:
estadistica.append({dia:0})
try:
return Response({'estadistica':estadistica})
except Exception as e:
print(e)
return Response({'estadistica':[]})
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getCantidadDePalabrasNuevasUltimaSemana(request):
user=request.user
dia_hace_una_semana = timezone.now().date() - timedelta(days=7)
# Cantidad de palabras repasadas en la ultima semana
query = Palabra.objects.filter(usuario=user,fechaLeidaPrimeraVez__gte=dia_hace_una_semana
).annotate(date=TruncDay('fechaLeidaPrimeraVez')
).values("date").annotate(created_count=Count('id')).order_by("-date")
# Voy a armar el diccionario
dias = []
for i in range(7,-1,-1):
dias.append(str(timezone.now().date() - timedelta(days=i)))
estadistica = []
for dia in dias:
#Busco la fecha en la query
existeFecha=False
for q in query:
if str(q["date"])==dia:
existeFecha=True
if existeFecha:
#consigo el valor
valor =0
for elemento in query:
if str(elemento["date"])==(dia):
valor=elemento["created_count"]
break
estadistica.append({dia:valor})
else:
estadistica.append({dia:0})
try:
return Response({'estadistica':estadistica})
except Exception as e:
print(e)
return Response({'estadistica':[]})
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getCantidadDePalabrasNuevasUltimoMes(request):
user=request.user
dia_hace_una_semana = timezone.now().date() - timedelta(days=30)
# Cantidad de palabras repasadas en la ultima semana
query = Palabra.objects.filter(usuario=user,fechaLeidaPrimeraVez__gte=dia_hace_una_semana
).annotate(date=TruncDay('fechaLeidaPrimeraVez')
).values("date").annotate(created_count=Count('id')).order_by("-date")
# Voy a armar el diccionario
dias = []
for i in range(30,-1,-1):
dias.append(str(timezone.now().date() - timedelta(days=i)))
estadistica = []
for dia in dias:
#Busco la fecha en la query
existeFecha=False
for q in query:
if str(q["date"])==dia:
existeFecha=True
if existeFecha:
#consigo el valor
valor =0
for elemento in query:
if str(elemento["date"])==(dia):
valor=elemento["created_count"]
break
estadistica.append({dia:valor})
else:
estadistica.append({dia:0})
try:
return Response({'estadistica':estadistica})
except Exception as e:
print(e)
return Response({'estadistica':[]})
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getCantidadDePalabrasNuevasUltimaSemanaPorIdioma(request):
user=request.user
idiomaId=request.query_params.get('idiomaId')
idioma = Idioma.objects.get(id=idiomaId)
dia_hace_una_semana = timezone.now().date() - timedelta(days=7)
# Cantidad de palabras repasadas en la ultima semana
query = Palabra.objects.filter(usuario=user,idioma=idioma,fechaLeidaPrimeraVez__gte=dia_hace_una_semana
).annotate(date=TruncDay('fechaLeidaPrimeraVez')
).values("date").annotate(created_count=Count('id')).order_by("-date")
# Voy a armar el diccionario
dias = []
for i in range(7,-1,-1):
dias.append(str(timezone.now().date() - timedelta(days=i)))
estadistica = []
for dia in dias:
#Busco la fecha en la query
existeFecha=False
for q in query:
if str(q["date"])==dia:
existeFecha=True
if existeFecha:
#consigo el valor
valor =0
for elemento in query:
if str(elemento["date"])==(dia):
valor=elemento["created_count"]
break
estadistica.append({dia:valor})
else:
estadistica.append({dia:0})
try:
return Response({'estadistica':estadistica})
except Exception as e:
print(e)
return Response({'estadistica':[]})
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getCantidadDePalabrasNuevasUltimoMesPorIdioma(request):
user=request.user
idiomaId=request.query_params.get('idiomaId')
idioma = Idioma.objects.get(id=idiomaId)
dia_hace_una_semana = timezone.now().date() - timedelta(days=30)
# Cantidad de palabras repasadas en la ultima semana
query = Palabra.objects.filter(usuario=user,idioma=idioma,fechaLeidaPrimeraVez__gte=dia_hace_una_semana
).annotate(date=TruncDay('fechaLeidaPrimeraVez')
).values("date").annotate(created_count=Count('id')).order_by("-date")
# Voy a armar el diccionario
dias = []
for i in range(30,-1,-1):
dias.append(str(timezone.now().date() - timedelta(days=i)))
estadistica = []
for dia in dias:
#Busco la fecha en la query
existeFecha=False
for q in query:
if str(q["date"])==dia:
existeFecha=True
if existeFecha:
#consigo el valor
valor =0
for elemento in query:
if str(elemento["date"])==(dia):
valor=elemento["created_count"]
break
estadistica.append({dia:valor})
else:
estadistica.append({dia:0})
try:
return Response({'estadistica':estadistica})
except Exception as e:
print(e)
return Response({'estadistica':[]})
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getCantidadDePalabrasAprendidasUltimaSemana(request):
user=request.user
idiomaId=request.query_params.get('idiomaId')
idioma = Idioma.objects.get(id=idiomaId)
dia_hace_una_semana = timezone.now().date() - timedelta(days=7)
# Cantidad de palabras repasadas en la ultima semana
query = Palabra.objects.filter(usuario=user,idioma=idioma,dificultad=4,fechaModificacion__gte=dia_hace_una_semana
).annotate(date=TruncDay('fechaModificacion')
).values("date").annotate(created_count=Count('id')).order_by("-date")
# Voy a armar el diccionario
dias = []
for i in range(7,-1,-1):
dias.append(str(timezone.now().date() - timedelta(days=i)))
estadistica = []
for dia in dias:
#Busco la fecha en la query
existeFecha=False
for q in query:
if str(q["date"])==dia:
existeFecha=True
if existeFecha:
#consigo el valor
valor =0
for elemento in query:
if str(elemento["date"])==(dia):
valor=elemento["created_count"]
break
estadistica.append({dia:valor})
else:
estadistica.append({dia:0})
try:
return Response({'estadistica':estadistica})
except Exception as e:
print(e)
return Response({'estadistica':[]})
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getCantidadDePalabrasAprendidasUltimoMes(request):
user=request.user
idiomaId=request.query_params.get('idiomaId')
idioma = Idioma.objects.get(id=idiomaId)
dia_hace_una_semana = timezone.now().date() - timedelta(days=30)
# Cantidad de palabras repasadas en la ultima semana
query = Palabra.objects.filter(usuario=user,dificultad=4,idioma=idioma,fechaModificacion__gte=dia_hace_una_semana
).annotate(date=TruncDay('fechaModificacion')
).values("date").annotate(created_count=Count('id')).order_by("-date")
# Voy a armar el diccionario
dias = []
for i in range(30,-1,-1):
dias.append(str(timezone.now().date() - timedelta(days=i)))
estadistica = []
for dia in dias:
#Busco la fecha en la query
existeFecha=False
for q in query:
if str(q["date"])==dia:
existeFecha=True
if existeFecha:
#consigo el valor
valor =0
for elemento in query:
if str(elemento["date"])==(dia):
valor=elemento["created_count"]
break
estadistica.append({dia:valor})
else:
estadistica.append({dia:0})
try:
return Response({'estadistica':estadistica})
except Exception as e:
print(e)
return Response({'estadistica':[]})
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getCantidadDePalabrasPorDificultades(request):
user=request.user
NOVISTA=0
FACIL=1
MEDIA=2
DIFICIL=3
APRENDIDA=4
# Estadística
estadistica = [{"No vistas": Palabra.objects.filter(usuario=user,dificultad=NOVISTA).count()},
{
"Fáciles": Palabra.objects.filter(usuario=user,dificultad=FACIL).count()
},
{
"Medias": Palabra.objects.filter(usuario=user,dificultad=MEDIA).count()
},
{
"Difíciles": Palabra.objects.filter(usuario=user,dificultad=DIFICIL).count()
},
{
"Aprendidas": Palabra.objects.filter(usuario=user,dificultad=APRENDIDA).count()
},
]
try:
return Response({'estadistica':estadistica})
except Exception as e:
print(e)
return Response({'estadistica':[]})
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getCantidadDePalabrasPorDificultadesEIdioma(request):
idiomaId=orden=request.query_params.get('idiomaId')
idioma = Idioma.objects.get(id=idiomaId)
user=request.user
NOVISTA=0
FACIL=1
MEDIA=2
DIFICIL=3
APRENDIDA=4
# Estadística
estadistica = [{"No vistas": Palabra.objects.filter(usuario=user,dificultad=NOVISTA,idioma=idioma).count()},
{
"Fáciles": Palabra.objects.filter(usuario=user,dificultad=FACIL,idioma=idioma).count()
},
{
"Medias": Palabra.objects.filter(usuario=user,dificultad=MEDIA,idioma=idioma).count()
},
{
"Difíciles": Palabra.objects.filter(usuario=user,dificultad=DIFICIL,idioma=idioma).count()
},
{
"Aprendidas": Palabra.objects.filter(usuario=user,dificultad=APRENDIDA,idioma=idioma).count()
},
]
try:
return Response({'estadistica':estadistica})
except Exception as e:
print(e)
return Response({'estadistica':[]})
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getCantidadDePalabrasNoVistasVistasPorIdioma(request):
idiomaId=orden=request.query_params.get('idiomaId')
idioma = Idioma.objects.get(id=idiomaId)
user=request.user
NOVISTA=0
FACIL=1
MEDIA=2
DIFICIL=3
APRENDIDA=4
# Estadística
estadistica = [{"No vistas": Palabra.objects.filter(usuario=user,dificultad=NOVISTA,idioma=idioma).count()},
{
"Nuevas": Palabra.objects.filter(usuario=user,dificultad__in=(FACIL,MEDIA,DIFICIL,APRENDIDA),idioma=idioma).count()
}
]
try:
return Response({'estadistica':estadistica})
except Exception as e:
print(e)
return Response({'estadistica':[]})
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getInformacionPerfil(request):
user=request.user
NOVISTA=0
FACIL=1
MEDIA=2
DIFICIL=3
APRENDIDA=4
# Estadística
cantidadPalabrasLeidas = Texto.objects.filter(usuario=user,completado=True
).aggregate(Sum('cantidadPalabras'))['cantidadPalabras__sum']
cantidadTextosLeidos = Texto.objects.filter(usuario=user,completado=True
).count()
cantidadVocabulario = Palabra
estadistica = [{"palabrasLeidas": cantidadPalabrasLeidas},
{
"textosLeidos":cantidadTextosLeidos
},
{
"cantidadVocabulario": Palabra.objects.filter(usuario=user,dificultad__in=(FACIL,MEDIA,DIFICIL,APRENDIDA)).count()
}
]
try:
return Response({'estadistica':estadistica})
except Exception as e:
print(e)
return Response({'estadistica':[]})
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def getMetaDiaria(request):
user=request.user
hoy = datetime.now()
FACIL=1
MEDIA=2
DIFICIL=3
APRENDIDA=4
cantidad = Palabra.objects.filter(usuario=user,fechaLeidaPrimeraVez=hoy, dificultad__in=(FACIL,MEDIA,DIFICIL,APRENDIDA)).count()
estadistica = [{"cantidad": cantidad
},
{
"meta":user.metaDiaria
},
{
"esMeta": user.metaDiaria<=cantidad
},
{
"fecha":timezone.now().date()
}
]
try:
return Response({'estadistica':estadistica})
except Exception as e:
print(e)
return Response({'estadistica':[]})
| 33.993818 | 140 | 0.607211 |
3cca5a969016219c50fa9865e703eee47de3a136 | 5,350 | py | Python | RecoMuon/Configuration/python/RecoMuon_EventContent_cff.py | samarendran23/cmssw | 849dd9897db9b894ca83e1b630a3c1eecafd6097 | [
"Apache-2.0"
] | 6 | 2017-09-08T14:12:56.000Z | 2022-03-09T23:57:01.000Z | RecoMuon/Configuration/python/RecoMuon_EventContent_cff.py | samarendran23/cmssw | 849dd9897db9b894ca83e1b630a3c1eecafd6097 | [
"Apache-2.0"
] | 545 | 2017-09-19T17:10:19.000Z | 2022-03-07T16:55:27.000Z | RecoMuon/Configuration/python/RecoMuon_EventContent_cff.py | p2l1pfp/cmssw | 9bda22bf33ecf18dd19a3af2b3a8cbdb1de556a9 | [
"Apache-2.0"
] | 14 | 2017-10-04T09:47:21.000Z | 2019-10-23T18:04:45.000Z | # The following comments couldn't be translated into the new config version:
# Stand Alone's tracks with extra and hits
# Global's tracks with extra and hits
# TeV muons products
# Tracker's Tracks without extra and hits
# Muon Id
# Seed
# Global's tracks with extra and hits
# TeV muons products
import FWCore.ParameterSet.Config as cms
#Add Isolation
from RecoMuon.MuonIsolationProducers.muIsolation_EventContent_cff import *
# AOD content
RecoMuonAOD = cms.PSet(
outputCommands = cms.untracked.vstring(#'keep *_muons_*_*',
'keep recoMuons_muons_*_*',
'keep booledmValueMap_muons_*_*',
'keep doubleedmValueMap_muons_muPFMean*_*',
'keep doubleedmValueMap_muons_muPFSum*_*',
'keep *_muons_muonShowerInformation_*',
'keep recoMuonTimeExtraedmValueMap_muons_*_*',
'keep recoMuonCosmicCompatibilityedmValueMap_muons_*_*',
'keep uintedmValueMap_muons_*_*',
'keep *_particleFlow_muons_*',
#'drop *_muons_muons1stStep2muonsMap_*',
#'drop recoIsoDepositedmValueMap_muons_*_*', #not really used
#'drop doubleedmValueMap_muons_muPFIso*_*', #already inside the muon
# Tracks known by the Muon obj
'keep recoTracks_standAloneMuons_*_*',
'keep recoTrackExtras_standAloneMuons_*_*',
'keep TrackingRecHitsOwned_standAloneMuons_*_*',
'keep recoTracks_globalMuons_*_*',
'keep recoTrackExtras_globalMuons_*_*',
'keep recoTracks_tevMuons_*_*',
'keep recoTrackExtras_tevMuons_*_*',
'keep recoTracks_generalTracks_*_*',
'keep recoTracks_displacedTracks_*_*',
'keep recoTracksToOnerecoTracksAssociation_tevMuons_*_*',
# Displaced Global Muons
'keep recoTracks_displacedGlobalMuons_*_*',
'keep recoTrackExtras_displacedGlobalMuons_*_*',
'keep TrackingRecHitsOwned_displacedGlobalMuons_*_*',
# Cosmics
'keep recoTracks_cosmicMuons_*_*',
'keep recoMuons_muonsFromCosmics_*_*',
# Cosmics 1 leg
'keep recoTracks_cosmicMuons1Leg_*_*',
'keep recoMuons_muonsFromCosmics1Leg_*_*',
# Additional tracks
'keep recoTracks_refittedStandAloneMuons_*_*',
'keep recoTrackExtras_refittedStandAloneMuons_*_*',
'keep TrackingRecHitsOwned_refittedStandAloneMuons_*_*',
'keep recoTracks_displacedStandAloneMuons__*',
'keep recoTrackExtras_displacedStandAloneMuons_*_*',
'keep TrackingRecHitsOwned_displacedStandAloneMuons_*_*')
)
RecoMuonAOD.outputCommands.extend(RecoMuonIsolationAOD.outputCommands)
# RECO content
RecoMuonRECO = cms.PSet(
outputCommands = cms.untracked.vstring('keep *_MuonSeed_*_*',
'keep *_ancientMuonSeed_*_*',
'keep *_displacedMuonSeeds_*_*',
'keep TrackingRecHitsOwned_globalMuons_*_*',
'keep TrackingRecHitsOwned_tevMuons_*_*',
# Cosmics
'keep *_CosmicMuonSeed_*_*',
'keep recoTrackExtras_cosmicMuons_*_*',
'keep TrackingRecHitsOwned_cosmicMuons_*_*',
'keep recoTrackExtras_cosmicMuons1Leg_*_*',
'keep TrackingRecHitsOwned_cosmicMuons1Leg_*_*',
'keep recoTracks_cosmicsVetoTracks_*_*')
)
RecoMuonRECO.outputCommands.extend(RecoMuonAOD.outputCommands)
RecoMuonRECO.outputCommands.extend(RecoMuonIsolationRECO.outputCommands)
# Full Event content
RecoMuonFEVT = cms.PSet(
outputCommands = cms.untracked.vstring()
)
RecoMuonFEVT.outputCommands.extend(RecoMuonRECO.outputCommands)
RecoMuonFEVT.outputCommands.extend(RecoMuonIsolationFEVT.outputCommands)
| 56.914894 | 111 | 0.483364 |
2b137dfa559fe78e64c0bd6c330f0ec654f812a5 | 18,370 | py | Python | pycircuit/utilities/DESolver.py | michaelnt/pycircuit | ef3110c1c3789c1e5f30c35e3f5dd15ed4bd349e | [
"BSD-3-Clause"
] | 25 | 2015-05-13T22:49:26.000Z | 2020-03-10T04:13:20.000Z | pycircuit/utilities/DESolver.py | michaelnt/pycircuit | ef3110c1c3789c1e5f30c35e3f5dd15ed4bd349e | [
"BSD-3-Clause"
] | 1 | 2016-11-09T13:09:31.000Z | 2016-11-09T13:09:31.000Z | pycircuit/utilities/DESolver.py | michaelnt/pycircuit | ef3110c1c3789c1e5f30c35e3f5dd15ed4bd349e | [
"BSD-3-Clause"
] | 9 | 2016-03-05T11:46:27.000Z | 2022-01-19T18:30:55.000Z | # Placed into the public domain by:
# James R. Phillips
# 2548 Vera Cruz Drive
# Birmingham, AL 35235 USA
# email: zunzun@zunzun.com
import numpy, random
import pp # http;//www.parallelpython.com - can be single CPU, multi-core SMP, or cluster parallelization
# runs only in remote worker
def MakeGlobalDESolverObjectInWorker(in_solver):
global solver
solver = in_solver
if True == solver.useClassRandomNumberMethods:
solver.SetupClassRandomNumberMethods()
# runs only in remote worker
def UpdatePopulationInWorker(in_population, in_bestEnergy, in_generation):
global solver
solver.population = in_population
solver.bestEnergy = in_bestEnergy
solver.generation = in_generation
# runs only in remote worker
def GenerateTrialAndTestInWorker(in_candidate):
global solver
# deStrategy is the name of the DE function to use
eval('solver.' + solver.deStrategy + '(in_candidate)')
energy, atSolution = solver.EnergyFunction(solver.trialSolution)
if solver.polishTheBestTrials == True and energy < solver.bestEnergy and solver.generation > 0: # not the first generation
# try to polish these new coefficients a bit.
solver.trialSolution = scipy.optimize.fmin(solver.externalEnergyFunction, solver.trialSolution, disp = 0) # don't print warning messages to stdout
energy, atSolution = solver.EnergyFunction(solver.trialSolution) # recalc with polished coefficients
return[in_candidate, solver.trialSolution, energy, atSolution]
class DESolver:
def __init__(self, parameterCount, populationSize, maxGenerations, minInitialValue, maxInitialValue, deStrategy, diffScale, crossoverProb, cutoffEnergy, useClassRandomNumberMethods, polishTheBestTrials):
self.polishTheBestTrials = polishTheBestTrials # see the Solve method where this flag is used
self.maxGenerations = maxGenerations
self.parameterCount = parameterCount
self.populationSize = populationSize
self.cutoffEnergy = cutoffEnergy
self.minInitialValue = minInitialValue
self.maxInitialValue = maxInitialValue
self.deStrategy = deStrategy # deStrategy is the name of the DE function to use
self.useClassRandomNumberMethods = useClassRandomNumberMethods
self.scale = diffScale
self.crossOverProbability = crossoverProb
# initial energies for comparison
self.popEnergy = numpy.ones(self.populationSize) * 1.0E300
self.bestSolution = numpy.zeros(self.parameterCount)
self.bestEnergy = 1.0E300
def Solve(self):
breakLoop = False
# a random initial population, returns numpy arrays directly
# the population will be synchronized with the remote workers at the beginning of each generation
self.population = numpy.random.uniform(self.minInitialValue, self.maxInitialValue, size=(self.populationSize, self.parameterCount))
job_server = pp.Server() # auto-detects number of SMP CPU cores (will detect 1 core on single-CPU systems)
# try/finally block is to ensure remote worker processes are killed
try:
# give each worker a copy of this object
for i in range(job_server.get_ncpus()):
job_server.submit(MakeGlobalDESolverObjectInWorker, (self,), (), ('DESolver', 'numpy', 'scipy.optimize'))
job_server.wait()
# now run DE
for self.generation in range(self.maxGenerations):
# no need to try another generation if we are done
if breakLoop == True:
break # from generation loop
# synchronize the populations for each worker
for i in range(job_server.get_ncpus()):
job_server.submit(UpdatePopulationInWorker, (self.population, self.bestEnergy, self.generation,), (), ())
job_server.wait()
# run this generation remotely
jobs = []
for candidate in range(self.populationSize):
jobs.append(job_server.submit(GenerateTrialAndTestInWorker, (candidate,), (), ()))
for job in jobs:
candidate, trialSolution, trialEnergy, atSolution = job()
# if we've reached a sufficient solution we can stop
if atSolution == True:
breakLoop = True
if trialEnergy < self.popEnergy[candidate]:
# New low for this candidate
self.popEnergy[candidate] = trialEnergy
self.population[candidate] = numpy.copy(trialSolution)
# If at an all-time low, save to "best"
if trialEnergy < self.bestEnergy:
self.bestEnergy = self.popEnergy[candidate]
self.bestSolution = numpy.copy(self.population[candidate])
finally:
job_server.destroy()
return atSolution
def SetupClassRandomNumberMethods(self):
numpy.random.seed(3) # this yields same results each time Solve() is run
self.nonStandardRandomCount = self.populationSize * self.parameterCount * 3
if self.nonStandardRandomCount < 523: # set a minimum number of random numbers
self.nonStandardRandomCount = 523
self.ArrayOfRandomIntegersBetweenZeroAndParameterCount = numpy.random.random_integers(0, self.parameterCount-1, size=(self.nonStandardRandomCount))
self.ArrayOfRandomRandomFloatBetweenZeroAndOne = numpy.random.uniform(size=(self.nonStandardRandomCount))
self.ArrayOfRandomIntegersBetweenZeroAndPopulationSize = numpy.random.random_integers(0, self.populationSize-1, size=(self.nonStandardRandomCount))
self.randCounter1 = 0
self.randCounter2 = 0
self.randCounter3 = 0
def GetClassRandomIntegerBetweenZeroAndParameterCount(self):
self.randCounter1 += 1
if self.randCounter1 >= self.nonStandardRandomCount:
self.randCounter1 = 0
return self.ArrayOfRandomIntegersBetweenZeroAndParameterCount[self.randCounter1]
def GetClassRandomFloatBetweenZeroAndOne(self):
self.randCounter2 += 1
if self.randCounter2 >= self.nonStandardRandomCount:
self.randCounter2 = 0
return self.ArrayOfRandomRandomFloatBetweenZeroAndOne[self.randCounter2]
def GetClassRandomIntegerBetweenZeroAndPopulationSize(self):
self.randCounter3 += 1
if self.randCounter3 >= self.nonStandardRandomCount:
self.randCounter3 = 0
return self.ArrayOfRandomIntegersBetweenZeroAndPopulationSize[self.randCounter3]
# this class might normally be subclassed and this method overridden, or the
# externalEnergyFunction set and this method used directly
def EnergyFunction(self, trial):
try:
energy = self.externalEnergyFunction(trial)
except ArithmeticError:
energy = 1.0E300 # high energies for arithmetic exceptions
except FloatingPointError:
energy = 1.0E300 # high energies for floating point exceptions
# we will be "done" if the energy is less than or equal to the cutoff energy
if energy <= self.cutoffEnergy:
return energy, True
else:
return energy, False
def Best1Exp(self, candidate):
r1,r2,r3,r4,r5 = self.SelectSamples(candidate, 1,1,0,0,0)
if True == self.useClassRandomNumberMethods:
n = self.GetClassRandomIntegerBetweenZeroAndParameterCount()
else:
n = random.randint(0, self.parameterCount-1)
self.trialSolution = numpy.copy(self.population[candidate])
i = 0
while(1):
if True == self.useClassRandomNumberMethods:
k = self.GetClassRandomFloatBetweenZeroAndOne()
else:
k = random.uniform(0.0, 1.0)
if k >= self.crossOverProbability or i == self.parameterCount:
break
self.trialSolution[n] = self.bestSolution[n] + self.scale * (self.population[r1][n] - self.population[r2][n])
n = (n + 1) % self.parameterCount
i += 1
def Rand1Exp(self, candidate):
r1,r2,r3,r4,r5 = self.SelectSamples(candidate, 1,1,1,0,0)
if True == self.useClassRandomNumberMethods:
n = self.GetClassRandomIntegerBetweenZeroAndParameterCount()
else:
n = random.randint(0, self.parameterCount-1)
self.trialSolution = numpy.copy(self.population[candidate])
i = 0
while(1):
if True == self.useClassRandomNumberMethods:
k = self.GetClassRandomFloatBetweenZeroAndOne()
else:
k = random.uniform(0.0, 1.0)
if k >= self.crossOverProbability or i == self.parameterCount:
break
self.trialSolution[n] = self.population[r1][n] + self.scale * (self.population[r2][n] - self.population[r3][n])
n = (n + 1) % self.parameterCount
i += 1
def RandToBest1Exp(self, candidate):
r1,r2,r3,r4,r5 = self.SelectSamples(candidate, 1,1,0,0,0)
if True == self.useClassRandomNumberMethods:
n = self.GetClassRandomIntegerBetweenZeroAndParameterCount()
else:
n = random.randint(0, self.parameterCount-1)
self.trialSolution = numpy.copy(self.population[candidate])
i = 0
while(1):
if True == self.useClassRandomNumberMethods:
k = self.GetClassRandomFloatBetweenZeroAndOne()
else:
k = random.uniform(0.0, 1.0)
if k >= self.crossOverProbability or i == self.parameterCount:
break
self.trialSolution[n] += self.scale * (self.bestSolution[n] - self.trialSolution[n]) + self.scale * (self.population[r1][n] - self.population[r2][n])
n = (n + 1) % self.parameterCount
i += 1
def Best2Exp(self, candidate):
r1,r2,r3,r4,r5 = self.SelectSamples(candidate, 1,1,1,1,0)
if True == self.useClassRandomNumberMethods:
n = self.GetClassRandomIntegerBetweenZeroAndParameterCount()
else:
n = random.randint(0, self.parameterCount-1)
self.trialSolution = numpy.copy(self.population[candidate])
i = 0
while(1):
if True == self.useClassRandomNumberMethods:
k = self.GetClassRandomFloatBetweenZeroAndOne()
else:
k = random.uniform(0.0, 1.0)
if k >= self.crossOverProbability or i == self.parameterCount:
break
self.trialSolution[n] = self.bestSolution[n] + self.scale * (self.population[r1][n] + self.population[r2][n] - self.population[r3][n] - self.population[r4][n])
n = (n + 1) % self.parameterCount
i += 1
def Rand2Exp(self, candidate):
r1,r2,r3,r4,r5 = self.SelectSamples(candidate, 1,1,1,1,1)
if True == self.useClassRandomNumberMethods:
n = self.GetClassRandomIntegerBetweenZeroAndParameterCount()
else:
n = random.randint(0, self.parameterCount-1)
self.trialSolution = numpy.copy(self.population[candidate])
i = 0
while(1):
if True == self.useClassRandomNumberMethods:
k = self.GetClassRandomFloatBetweenZeroAndOne()
else:
k = random.uniform(0.0, 1.0)
if k >= self.crossOverProbability or i == self.parameterCount:
break
self.trialSolution[n] = self.population[r1][n] + self.scale * (self.population[r2][n] + self.population[r3][n] - self.population[r4][n] - self.population[r5][n])
n = (n + 1) % self.parameterCount
i += 1
def Best1Bin(self, candidate):
r1,r2,r3,r4,r5 = self.SelectSamples(candidate, 1,1,0,0,0)
if True == self.useClassRandomNumberMethods:
n = self.GetClassRandomIntegerBetweenZeroAndParameterCount()
else:
n = random.randint(0, self.parameterCount-1)
self.trialSolution = numpy.copy(self.population[candidate])
i = 0
while(1):
if True == self.useClassRandomNumberMethods:
k = self.GetClassRandomFloatBetweenZeroAndOne()
else:
k = random.uniform(0.0, 1.0)
if k >= self.crossOverProbability or i == self.parameterCount:
break
self.trialSolution[n] = self.bestSolution[n] + self.scale * (self.population[r1][n] - self.population[r2][n])
n = (n + 1) % self.parameterCount
i += 1
def Rand1Bin(self, candidate):
r1,r2,r3,r4,r5 = self.SelectSamples(candidate, 1,1,1,0,0)
if True == self.useClassRandomNumberMethods:
n = self.GetClassRandomIntegerBetweenZeroAndParameterCount()
else:
n = random.randint(0, self.parameterCount-1)
self.trialSolution = numpy.copy(self.population[candidate])
i = 0
while(1):
if True == self.useClassRandomNumberMethods:
k = self.GetClassRandomFloatBetweenZeroAndOne()
else:
k = random.uniform(0.0, 1.0)
if k >= self.crossOverProbability or i == self.parameterCount:
break
self.trialSolution[n] = self.population[r1][n] + self.scale * (self.population[r2][n] - self.population[r3][n])
n = (n + 1) % self.parameterCount
i += 1
def RandToBest1Bin(self, candidate):
r1,r2,r3,r4,r5 = self.SelectSamples(candidate, 1,1,0,0,0)
if True == self.useClassRandomNumberMethods:
n = self.GetClassRandomIntegerBetweenZeroAndParameterCount()
else:
n = random.randint(0, self.parameterCount-1)
self.trialSolution = numpy.copy(self.population[candidate])
i = 0
while(1):
if True == self.useClassRandomNumberMethods:
k = self.GetClassRandomFloatBetweenZeroAndOne()
else:
k = random.uniform(0.0, 1.0)
if k >= self.crossOverProbability or i == self.parameterCount:
break
self.trialSolution[n] += self.scale * (self.bestSolution[n] - self.trialSolution[n]) + self.scale * (self.population[r1][n] - self.population[r2][n])
n = (n + 1) % self.parameterCount
i += 1
def Best2Bin(self, candidate):
r1,r2,r3,r4,r5 = self.SelectSamples(candidate, 1,1,1,1,0)
if True == self.useClassRandomNumberMethods:
n = self.GetClassRandomIntegerBetweenZeroAndParameterCount()
else:
n = random.randint(0, self.parameterCount-1)
self.trialSolution = numpy.copy(self.population[candidate])
i = 0
while(1):
if True == self.useClassRandomNumberMethods:
k = self.GetClassRandomFloatBetweenZeroAndOne()
else:
k = random.uniform(0.0, 1.0)
if k >= self.crossOverProbability or i == self.parameterCount:
break
self.trialSolution[n] = self.bestSolution[n] + self.scale * (self.population[r1][n] + self.population[r2][n] - self.population[r3][n] - self.population[r4][n])
n = (n + 1) % self.parameterCount
i += 1
def Rand2Bin(self, candidate):
r1,r2,r3,r4,r5 = self.SelectSamples(candidate, 1,1,1,1,1)
if True == self.useClassRandomNumberMethods:
n = self.GetClassRandomIntegerBetweenZeroAndParameterCount()
else:
n = random.randint(0, self.parameterCount-1)
self.trialSolution = numpy.copy(self.population[candidate])
i = 0
while(1):
if True == self.useClassRandomNumberMethods:
k = self.GetClassRandomFloatBetweenZeroAndOne()
else:
k = random.uniform(0.0, 1.0)
if k >= self.crossOverProbability or i == self.parameterCount:
break
self.trialSolution[n] = self.population[r1][n] + self.scale * (self.population[r2][n] + self.population[r3][n] - self.population[r4][n] - self.population[r5][n])
n = (n + 1) % self.parameterCount
i += 1
def SelectSamples(self, candidate, r1, r2, r3, r4, r5):
if r1:
while(1):
if True == self.useClassRandomNumberMethods:
r1 = self.GetClassRandomIntegerBetweenZeroAndPopulationSize()
else:
r1 = random.randint(0, self.populationSize-1)
if r1 != candidate:
break
if r2:
while(1):
if True == self.useClassRandomNumberMethods:
r2 = self.GetClassRandomIntegerBetweenZeroAndPopulationSize()
else:
r2 = random.randint(0, self.populationSize-1)
if r2 != candidate and r2 != r1:
break
if r3:
while(1):
if True == self.useClassRandomNumberMethods:
r3 = self.GetClassRandomIntegerBetweenZeroAndPopulationSize()
else:
r3 = random.randint(0, self.populationSize-1)
if r3 != candidate and r3 != r1 and r3 != r2:
break
if r4:
while(1):
if True == self.useClassRandomNumberMethods:
r4 = self.GetClassRandomIntegerBetweenZeroAndPopulationSize()
else:
r4 = random.randint(0, self.populationSize-1)
if r4 != candidate and r4 != r1 and r4 != r2 and r4 != r3:
break
if r5:
while(1):
if True == self.useClassRandomNumberMethods:
r5 = self.GetClassRandomIntegerBetweenZeroAndPopulationSize()
else:
r5 = random.randint(0, self.populationSize-1)
if r5 != candidate and r5 != r1 and r5 != r2 and r5 != r3 and r5 != r4:
break
return r1, r2, r3, r4, r5
| 43.325472 | 207 | 0.610071 |
dea10278f2d032b6320d40096bf78f630094e2d5 | 15,131 | py | Python | sdk/python/pulumi_azure_native/datalakestore/latest/get_account.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/datalakestore/latest/get_account.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_native/datalakestore/latest/get_account.py | pulumi-bot/pulumi-azure-native | f7b9490b5211544318e455e5cceafe47b628e12c | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetAccountResult',
'AwaitableGetAccountResult',
'get_account',
]
warnings.warn("""The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:datalakestore:getAccount'.""", DeprecationWarning)
@pulumi.output_type
class GetAccountResult:
"""
Data Lake Store account information.
"""
def __init__(__self__, account_id=None, creation_time=None, current_tier=None, default_group=None, encryption_config=None, encryption_provisioning_state=None, encryption_state=None, endpoint=None, firewall_allow_azure_ips=None, firewall_rules=None, firewall_state=None, id=None, identity=None, last_modified_time=None, location=None, name=None, new_tier=None, provisioning_state=None, state=None, tags=None, trusted_id_provider_state=None, trusted_id_providers=None, type=None, virtual_network_rules=None):
if account_id and not isinstance(account_id, str):
raise TypeError("Expected argument 'account_id' to be a str")
pulumi.set(__self__, "account_id", account_id)
if creation_time and not isinstance(creation_time, str):
raise TypeError("Expected argument 'creation_time' to be a str")
pulumi.set(__self__, "creation_time", creation_time)
if current_tier and not isinstance(current_tier, str):
raise TypeError("Expected argument 'current_tier' to be a str")
pulumi.set(__self__, "current_tier", current_tier)
if default_group and not isinstance(default_group, str):
raise TypeError("Expected argument 'default_group' to be a str")
pulumi.set(__self__, "default_group", default_group)
if encryption_config and not isinstance(encryption_config, dict):
raise TypeError("Expected argument 'encryption_config' to be a dict")
pulumi.set(__self__, "encryption_config", encryption_config)
if encryption_provisioning_state and not isinstance(encryption_provisioning_state, str):
raise TypeError("Expected argument 'encryption_provisioning_state' to be a str")
pulumi.set(__self__, "encryption_provisioning_state", encryption_provisioning_state)
if encryption_state and not isinstance(encryption_state, str):
raise TypeError("Expected argument 'encryption_state' to be a str")
pulumi.set(__self__, "encryption_state", encryption_state)
if endpoint and not isinstance(endpoint, str):
raise TypeError("Expected argument 'endpoint' to be a str")
pulumi.set(__self__, "endpoint", endpoint)
if firewall_allow_azure_ips and not isinstance(firewall_allow_azure_ips, str):
raise TypeError("Expected argument 'firewall_allow_azure_ips' to be a str")
pulumi.set(__self__, "firewall_allow_azure_ips", firewall_allow_azure_ips)
if firewall_rules and not isinstance(firewall_rules, list):
raise TypeError("Expected argument 'firewall_rules' to be a list")
pulumi.set(__self__, "firewall_rules", firewall_rules)
if firewall_state and not isinstance(firewall_state, str):
raise TypeError("Expected argument 'firewall_state' to be a str")
pulumi.set(__self__, "firewall_state", firewall_state)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if last_modified_time and not isinstance(last_modified_time, str):
raise TypeError("Expected argument 'last_modified_time' to be a str")
pulumi.set(__self__, "last_modified_time", last_modified_time)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if new_tier and not isinstance(new_tier, str):
raise TypeError("Expected argument 'new_tier' to be a str")
pulumi.set(__self__, "new_tier", new_tier)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if trusted_id_provider_state and not isinstance(trusted_id_provider_state, str):
raise TypeError("Expected argument 'trusted_id_provider_state' to be a str")
pulumi.set(__self__, "trusted_id_provider_state", trusted_id_provider_state)
if trusted_id_providers and not isinstance(trusted_id_providers, list):
raise TypeError("Expected argument 'trusted_id_providers' to be a list")
pulumi.set(__self__, "trusted_id_providers", trusted_id_providers)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if virtual_network_rules and not isinstance(virtual_network_rules, list):
raise TypeError("Expected argument 'virtual_network_rules' to be a list")
pulumi.set(__self__, "virtual_network_rules", virtual_network_rules)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> str:
"""
The unique identifier associated with this Data Lake Store account.
"""
return pulumi.get(self, "account_id")
@property
@pulumi.getter(name="creationTime")
def creation_time(self) -> str:
"""
The account creation time.
"""
return pulumi.get(self, "creation_time")
@property
@pulumi.getter(name="currentTier")
def current_tier(self) -> str:
"""
The commitment tier in use for the current month.
"""
return pulumi.get(self, "current_tier")
@property
@pulumi.getter(name="defaultGroup")
def default_group(self) -> str:
"""
The default owner group for all new folders and files created in the Data Lake Store account.
"""
return pulumi.get(self, "default_group")
@property
@pulumi.getter(name="encryptionConfig")
def encryption_config(self) -> 'outputs.EncryptionConfigResponse':
"""
The Key Vault encryption configuration.
"""
return pulumi.get(self, "encryption_config")
@property
@pulumi.getter(name="encryptionProvisioningState")
def encryption_provisioning_state(self) -> str:
"""
The current state of encryption provisioning for this Data Lake Store account.
"""
return pulumi.get(self, "encryption_provisioning_state")
@property
@pulumi.getter(name="encryptionState")
def encryption_state(self) -> str:
"""
The current state of encryption for this Data Lake Store account.
"""
return pulumi.get(self, "encryption_state")
@property
@pulumi.getter
def endpoint(self) -> str:
"""
The full CName endpoint for this account.
"""
return pulumi.get(self, "endpoint")
@property
@pulumi.getter(name="firewallAllowAzureIps")
def firewall_allow_azure_ips(self) -> str:
"""
The current state of allowing or disallowing IPs originating within Azure through the firewall. If the firewall is disabled, this is not enforced.
"""
return pulumi.get(self, "firewall_allow_azure_ips")
@property
@pulumi.getter(name="firewallRules")
def firewall_rules(self) -> Sequence['outputs.FirewallRuleResponse']:
"""
The list of firewall rules associated with this Data Lake Store account.
"""
return pulumi.get(self, "firewall_rules")
@property
@pulumi.getter(name="firewallState")
def firewall_state(self) -> str:
"""
The current state of the IP address firewall for this Data Lake Store account.
"""
return pulumi.get(self, "firewall_state")
@property
@pulumi.getter
def id(self) -> str:
"""
The resource identifier.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> 'outputs.EncryptionIdentityResponse':
"""
The Key Vault encryption identity, if any.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="lastModifiedTime")
def last_modified_time(self) -> str:
"""
The account last modified time.
"""
return pulumi.get(self, "last_modified_time")
@property
@pulumi.getter
def location(self) -> str:
"""
The resource location.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
The resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="newTier")
def new_tier(self) -> str:
"""
The commitment tier to use for next month.
"""
return pulumi.get(self, "new_tier")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The provisioning status of the Data Lake Store account.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter
def state(self) -> str:
"""
The state of the Data Lake Store account.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter
def tags(self) -> Mapping[str, str]:
"""
The resource tags.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="trustedIdProviderState")
def trusted_id_provider_state(self) -> str:
"""
The current state of the trusted identity provider feature for this Data Lake Store account.
"""
return pulumi.get(self, "trusted_id_provider_state")
@property
@pulumi.getter(name="trustedIdProviders")
def trusted_id_providers(self) -> Sequence['outputs.TrustedIdProviderResponse']:
"""
The list of trusted identity providers associated with this Data Lake Store account.
"""
return pulumi.get(self, "trusted_id_providers")
@property
@pulumi.getter
def type(self) -> str:
"""
The resource type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="virtualNetworkRules")
def virtual_network_rules(self) -> Sequence['outputs.VirtualNetworkRuleResponse']:
"""
The list of virtual network rules associated with this Data Lake Store account.
"""
return pulumi.get(self, "virtual_network_rules")
class AwaitableGetAccountResult(GetAccountResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetAccountResult(
account_id=self.account_id,
creation_time=self.creation_time,
current_tier=self.current_tier,
default_group=self.default_group,
encryption_config=self.encryption_config,
encryption_provisioning_state=self.encryption_provisioning_state,
encryption_state=self.encryption_state,
endpoint=self.endpoint,
firewall_allow_azure_ips=self.firewall_allow_azure_ips,
firewall_rules=self.firewall_rules,
firewall_state=self.firewall_state,
id=self.id,
identity=self.identity,
last_modified_time=self.last_modified_time,
location=self.location,
name=self.name,
new_tier=self.new_tier,
provisioning_state=self.provisioning_state,
state=self.state,
tags=self.tags,
trusted_id_provider_state=self.trusted_id_provider_state,
trusted_id_providers=self.trusted_id_providers,
type=self.type,
virtual_network_rules=self.virtual_network_rules)
def get_account(account_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetAccountResult:
"""
Data Lake Store account information.
Latest API Version: 2016-11-01.
:param str account_name: The name of the Data Lake Store account.
:param str resource_group_name: The name of the Azure resource group.
"""
pulumi.log.warn("""get_account is deprecated: The 'latest' version is deprecated. Please migrate to the function in the top-level module: 'azure-native:datalakestore:getAccount'.""")
__args__ = dict()
__args__['accountName'] = account_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:datalakestore/latest:getAccount', __args__, opts=opts, typ=GetAccountResult).value
return AwaitableGetAccountResult(
account_id=__ret__.account_id,
creation_time=__ret__.creation_time,
current_tier=__ret__.current_tier,
default_group=__ret__.default_group,
encryption_config=__ret__.encryption_config,
encryption_provisioning_state=__ret__.encryption_provisioning_state,
encryption_state=__ret__.encryption_state,
endpoint=__ret__.endpoint,
firewall_allow_azure_ips=__ret__.firewall_allow_azure_ips,
firewall_rules=__ret__.firewall_rules,
firewall_state=__ret__.firewall_state,
id=__ret__.id,
identity=__ret__.identity,
last_modified_time=__ret__.last_modified_time,
location=__ret__.location,
name=__ret__.name,
new_tier=__ret__.new_tier,
provisioning_state=__ret__.provisioning_state,
state=__ret__.state,
tags=__ret__.tags,
trusted_id_provider_state=__ret__.trusted_id_provider_state,
trusted_id_providers=__ret__.trusted_id_providers,
type=__ret__.type,
virtual_network_rules=__ret__.virtual_network_rules)
| 40.894595 | 510 | 0.671205 |
14001d40f3b803e215e432fd2125b934924fbce6 | 22,616 | py | Python | InnerEye/Azure/azure_runner.py | MaxCodeXTC/InnerEye-DeepLearning | 12b496093097ef48d5ac8880985c04918d7f76fe | [
"MIT"
] | 1 | 2020-09-23T07:26:37.000Z | 2020-09-23T07:26:37.000Z | InnerEye/Azure/azure_runner.py | MaxCodeXTC/InnerEye-DeepLearning | 12b496093097ef48d5ac8880985c04918d7f76fe | [
"MIT"
] | null | null | null | InnerEye/Azure/azure_runner.py | MaxCodeXTC/InnerEye-DeepLearning | 12b496093097ef48d5ac8880985c04918d7f76fe | [
"MIT"
] | null | null | null | # ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
import argparse
import getpass
import logging
import signal
import sys
from argparse import ArgumentError, ArgumentParser, Namespace
from datetime import date
from pathlib import Path
from typing import Any, Dict, List, Optional
from azureml.core import Dataset, Experiment, Run, Workspace
from azureml.core.conda_dependencies import CondaDependencies
from azureml.core.datastore import Datastore
from azureml.core.workspace import WORKSPACE_DEFAULT_BLOB_STORE_NAME
from azureml.data.dataset_consumption_config import DatasetConsumptionConfig
from azureml.train.dnn import PyTorch
from InnerEye.Azure import azure_util
from InnerEye.Azure.azure_config import AzureConfig, ParserResult, SourceConfig
from InnerEye.Azure.azure_util import CROSS_VALIDATION_SPLIT_INDEX_TAG_KEY, RUN_RECOVERY_FROM_ID_KEY_NAME, \
RUN_RECOVERY_ID_KEY_NAME, \
merge_conda_dependencies
from InnerEye.Azure.secrets_handling import read_variables_from_yaml
from InnerEye.Azure.tensorboard_monitor import AMLTensorBoardMonitorConfig, monitor
from InnerEye.Common.fixed_paths import AZUREML_DATASTORE_NAME
from InnerEye.Common.generic_parsing import GenericConfig
from InnerEye.ML.common import ModelExecutionMode
from InnerEye.ML.utils.config_util import ModelConfigLoader
SLEEP_TIME_SECONDS = 30
INPUT_DATA_KEY = "input_data"
RUN_RECOVERY_FILE = "most_recent_run.txt"
def submit_to_azureml(azure_config: AzureConfig,
source_config: SourceConfig,
model_config_overrides: str,
azure_dataset_id: str) -> Run:
"""
The main entry point. It creates an AzureML workspace if needed, submits an experiment using the code
as specified in source_config, and waits for completion if needed.
:param azure_config: azure related configurations to setup valid workspace
:param source_config: The information about which code should be submitted, and which arguments should be used.
:param model_config_overrides: A string that describes which model parameters were overwritten by commandline
arguments in the present run. This is only used for diagnostic purposes (it is set as a Tag on the run).
:param azure_dataset_id: The name of the dataset on blob storage to be used for this run.
"""
azure_run: Optional[Run] = None
# When running as part of the PR build, jobs frequently get interrupted by new pushes to the repository.
# In this case, we'd like to cancel the current AzureML run before exiting, to reduce cost.
# However, at present, this does NOT work, the SIGINT is not propagated through.
def interrupt_handler(signal: int, _: Any) -> None:
logging.info('Process interrupted via signal {}'.format(str(signal)))
if azure_run:
logging.info('Trying to terminate the AzureML job now.')
azure_run.cancel()
sys.exit(0)
for s in [signal.SIGINT, signal.SIGTERM]:
signal.signal(s, interrupt_handler)
# Retrieve the AzureML workspace
workspace = azure_config.get_workspace()
# create train/test experiment
azure_run = create_and_submit_experiment(workspace, azure_config, source_config, model_config_overrides,
azure_dataset_id)
if azure_config.wait_for_completion:
# We want the job output to be visible on the console, but the program should not exit if the
# job fails because we need to download the pytest result file.
azure_run.wait_for_completion(show_output=True, raise_on_error=False)
return azure_run
def set_run_tags(run: Run, azure_config: AzureConfig, model_config_overrides: str) -> None:
"""
Set metadata for the run
:param run: Run to set metadata for.
:param azure_config: The configurations for the present AzureML job
:param model_config_overrides: A string that describes which model parameters were overwritten by commandline
arguments in the present run.
"""
git_information = azure_config.get_git_information()
run.set_tags({
"tag": azure_config.tag,
"model_name": azure_config.model,
"friendly_name": azure_config.user_friendly_name,
"execution_mode": ModelExecutionMode.TRAIN.value if azure_config.train else ModelExecutionMode.TEST.value,
RUN_RECOVERY_ID_KEY_NAME: azure_util.create_run_recovery_id(run=run),
RUN_RECOVERY_FROM_ID_KEY_NAME: azure_config.run_recovery_id,
"build_number": str(azure_config.build_number),
"build_user": azure_config.build_user,
"source_repository": git_information.repository,
"source_branch": git_information.branch,
"source_id": git_information.commit_id,
"source_message": git_information.commit_message,
"source_author": git_information.commit_author,
"source_dirty": str(git_information.is_dirty),
"overrides": model_config_overrides,
CROSS_VALIDATION_SPLIT_INDEX_TAG_KEY: -1,
})
def create_experiment_name(azure_config: AzureConfig) -> str:
"""
Gets the name of the AzureML experiment. This is taken from the commandline, or from the git branch.
:param azure_config: The object containing all Azure-related settings.
:return: The name to use for the AzureML experiment.
"""
if azure_config.experiment_name:
return azure_config.experiment_name
branch = azure_config.get_git_information().branch
# If no branch information is found anywhere, create an experiment name that is the user alias and a timestamp
# at monthly granularity, so that not too many runs accumulate in that experiment.
return branch or getpass.getuser() + f"_local_branch_{date.today().strftime('%Y%m')}"
def create_and_submit_experiment(
workspace: Workspace,
azure_config: AzureConfig,
source_config: SourceConfig,
model_config_overrides: str,
azure_dataset_id: str) -> Run:
"""
Creates an AzureML experiment in the provided workspace and submits it for execution.
:param workspace: configured workspace to use to run the experiment in
:param azure_config: azure related configurations to setup valid workspace
:param source_config: The information about which code should be submitted, and which arguments should be used.
:param model_config_overrides: A string that describes which model parameters were overwritten by commandline
arguments in the present run. This is only used for diagnostic purposes (it is set as a Tag on the run).
:param azure_dataset_id: The name of the dataset in blob storage to be used for this run.
:returns: Run object for the submitted AzureML run
"""
experiment_name = create_experiment_name(azure_config)
exp = Experiment(workspace=workspace, name=azure_util.to_azure_friendly_string(experiment_name))
pt_env = create_pytorch_environment(workspace, azure_config, source_config, azure_dataset_id)
# submit a training/testing run associated with the experiment
run: Run = exp.submit(pt_env)
# set metadata for the run
set_run_tags(run, azure_config, model_config_overrides)
print("\nSuccessfully queued new run for experiment: {}".format(exp.name))
print("==============================================================================")
if azure_config.run_recovery_id:
print(f"\nRecovered from: {azure_config.run_recovery_id}")
recovery_id = azure_util.create_run_recovery_id(run)
recovery_file = Path(RUN_RECOVERY_FILE)
if recovery_file.exists():
recovery_file.unlink()
recovery_file.write_text(recovery_id)
print("==============================================================================")
print("Experiment URL: {}".format(exp.get_portal_url()))
print("Run URL: {}".format(run.get_portal_url()))
print("If this run fails, re-start runner.py and supply these additional arguments: "
f"--run_recovery_id={recovery_id}")
print(f"The run recovery ID has been written to this file: {recovery_file}")
print("==============================================================================")
if azure_config.tensorboard and azure_config.azureml:
print("Starting TensorBoard now because you specified --tensorboard")
monitor(monitor_config=AMLTensorBoardMonitorConfig(run_ids=[run.id]), azure_config=azure_config)
else:
print(f"To monitor this run locally using TensorBoard, run the script: "
f"InnerEye/Azure/tensorboard_monitor.py --run_ids={run.id}")
print("==============================================================================")
return run
def get_or_create_dataset(workspace: Workspace,
azure_dataset_id: str) -> Dataset:
"""
Looks in the AzureML datastore for a dataset of the given name. If there is no such dataset, a dataset is created
and registered, assuming that the files are in a folder that has the same name as the dataset. For example, if
azure_dataset_id is 'foo', then the 'foo' dataset is pointing to <container_root>/datasets/foo folder.
WARNING: the behaviour of Dataset.File.from_files, used below, is idiosyncratic. For example,
if "mydataset" storage has two "foo..." subdirectories each containing
a file dataset.csv and a directory ABC,
datastore = Datastore.get(workspace, "mydataset")
# This dataset has the file(s) in foo-bar01 at top level, e.g. dataset.csv
ds1 = Dataset.File.from_files([(datastore, "foo-bar01/*")])
# This dataset has two directories at top level, each with a name matching foo-bar*, and each
# containing dataset.csv.
ds2 = Dataset.File.from_files([(datastore, "foo-bar*/*")])
# This dataset contains a single directory "mydataset" at top level, containing a subdirectory
# foo-bar01, containing dataset.csv and (part of) ABC.
ds3 = Dataset.File.from_files([(datastore, "foo-bar01/*"),
(datastore, "foo-bar01/ABC/abc_files/*/*.nii.gz")])
These behaviours can be verified by calling "ds.download()" on each dataset ds.
"""
logging.info(f"Retrieving datastore '{AZUREML_DATASTORE_NAME}' from AzureML workspace")
datastore = Datastore.get(workspace, AZUREML_DATASTORE_NAME)
try:
logging.info(f"Trying to retrieve AzureML Dataset '{azure_dataset_id}'")
azureml_dataset = Dataset.get_by_name(workspace, name=azure_dataset_id)
logging.info("Dataset found.")
except:
logging.info(f"Dataset does not yet exist, creating a new one from data in folder '{azure_dataset_id}'")
# See WARNING above before changing the from_files call!
azureml_dataset = Dataset.File.from_files([(datastore, azure_dataset_id)])
logging.info("Registering the dataset for future use.")
azureml_dataset.register(workspace, name=azure_dataset_id)
return azureml_dataset
def create_pytorch_environment(workspace: Workspace,
azure_config: AzureConfig,
source_config: SourceConfig,
azure_dataset_id: str) -> PyTorch:
"""
Creates an Estimator environment required for model execution
:param workspace: The AzureML workspace
:param azure_config: azure related configurations to use for model scaleout behaviour
:param source_config: configurations for model execution, such as name and execution mode
:param azure_dataset_id: The name of the dataset in blob storage to be used for this run.
:return: The configured PyTorch environment to be used for experimentation
"""
azureml_dataset = get_or_create_dataset(workspace, azure_dataset_id=azure_dataset_id)
if azureml_dataset:
if azure_config.use_dataset_mount:
logging.info("Inside AzureML, the dataset will be provided as a mounted folder.")
estimator_inputs = [azureml_dataset.as_named_input(INPUT_DATA_KEY).as_mount()]
else:
logging.info("Inside AzureML, the dataset will be downloaded before training starts.")
estimator_inputs = [azureml_dataset.as_named_input(INPUT_DATA_KEY).as_download()]
else:
raise ValueError("No AzureML dataset was found.")
return create_estimator_from_configs(workspace, azure_config, source_config, estimator_inputs)
def pytorch_version_from_conda_dependencies(conda_dependencies: CondaDependencies) -> Optional[str]:
"""
Given a CondaDependencies object, look for a spec of the form "pytorch=...", and return
whichever supported version is compatible with the value, or None if there isn't one.
"""
supported_versions = PyTorch.get_supported_versions()
for spec in conda_dependencies.conda_packages:
components = spec.split("=")
if len(components) == 2 and components[0] == "pytorch":
version = components[1]
for supported in supported_versions:
if version.startswith(supported) or supported.startswith(version):
return supported
return None
def create_estimator_from_configs(workspace: Workspace, azure_config: AzureConfig, source_config: SourceConfig,
estimator_inputs: List[DatasetConsumptionConfig]) -> PyTorch:
"""
Create an return a PyTorch estimator from the provided configuration information.
:param workspace: workspace that should contain a datastore named "workspaceblobstore", for storing source
:param azure_config: Azure configuration, used to store various values for the job to be submitted
:param source_config: source configutation, for other needed values
:param estimator_inputs: value for the "inputs" field of the estimator.
:return:
"""
# AzureML seems to sometimes expect the entry script path in Linux format, hence convert to posix path
entry_script_relative_path = Path(source_config.entry_script).relative_to(source_config.root_folder).as_posix()
logging.info(f"Entry script {entry_script_relative_path} ({source_config.entry_script} relative to "
f"source directory {source_config.root_folder})")
environment_variables = {
"AZUREML_OUTPUT_UPLOAD_TIMEOUT_SEC": str(source_config.upload_timeout_seconds),
"MKL_SERVICE_FORCE_INTEL": "1",
**(source_config.environment_variables or {})
}
# Merge the project-specific dependencies with the packages that InnerEye itself needs. This should not be
# necessary if the innereye package is installed. It is necessary when working with an outer project and
# InnerEye as a git submodule and submitting jobs from the local machine.
# In case of version conflicts, the package version in the outer project is given priority.
conda_dependencies = merge_conda_dependencies(source_config.conda_dependencies_files) # type: ignore
if azure_config.pip_extra_index_url:
# When an extra-index-url is supplied, swap the order in which packages are searched for.
# This is necessary if we need to consume packages from extra-index that clash with names of packages on
# pypi
conda_dependencies.set_pip_option(f"--index-url {azure_config.pip_extra_index_url}")
conda_dependencies.set_pip_option("--extra-index-url https://pypi.org/simple")
# create Estimator environment
framework_version = pytorch_version_from_conda_dependencies(conda_dependencies)
logging.info(f"PyTorch framework version: {framework_version}")
estimator = PyTorch(
source_directory=source_config.root_folder,
entry_script=entry_script_relative_path,
script_params=source_config.script_params,
compute_target=azure_config.cluster,
# Use blob storage for storing the source, rather than the FileShares section of the storage account.
source_directory_data_store=workspace.datastores.get(WORKSPACE_DEFAULT_BLOB_STORE_NAME),
inputs=estimator_inputs,
environment_variables=environment_variables,
shm_size=azure_config.docker_shm_size,
use_docker=True,
use_gpu=True,
framework_version=framework_version
)
estimator.run_config.environment.python.conda_dependencies = conda_dependencies
# We'd like to log the estimator config, but conversion to string fails when the Estimator has some inputs.
# logging.info(azure_util.estimator_to_string(estimator))
if azure_config.hyperdrive:
estimator = source_config.hyperdrive_config_func(estimator) # type: ignore
return estimator
def create_runner_parser(model_config_class: type = None) -> argparse.ArgumentParser:
"""
Creates a commandline parser, that understands all necessary arguments for running a script in Azure,
plus all arguments for the given class. The class must be a subclass of GenericConfig.
:param model_config_class: A class that contains the model-specific parameters.
:return: An instance of ArgumentParser.
"""
parser = AzureConfig.create_argparser()
ModelConfigLoader.add_args(parser)
if model_config_class is not None:
if not issubclass(model_config_class, GenericConfig):
raise ValueError(f"The given class must be a subclass of GenericConfig, but got: {model_config_class}")
model_config_class.add_args(parser)
return parser
def parse_args_and_add_yaml_variables(parser: ArgumentParser,
yaml_config_file: Optional[Path] = None,
fail_on_unknown_args: bool = False,
args: List[str] = None) -> ParserResult:
"""
Reads arguments from sys.argv, modifies them with secrets from local YAML files,
and parses them using the given argument parser.
:param parser: The parser to use.
:param yaml_config_file: The path to the YAML file that contains values to supply into sys.argv.
:param fail_on_unknown_args: If True, raise an exception if the parser encounters an argument that it does not
recognize. If False, unrecognized arguments will be ignored, and added to the "unknown" field of the parser result.
:param args: arguments to parse
:return: The parsed arguments, and overrides
"""
settings_from_yaml = read_variables_from_yaml(yaml_config_file)
return parse_arguments(parser,
settings_from_yaml=settings_from_yaml,
fail_on_unknown_args=fail_on_unknown_args,
args=args)
def _create_default_namespace(parser: ArgumentParser) -> Namespace:
"""
Creates an argparse Namespace with all parser-specific default values set.
:param parser: The parser to work with.
:return:
"""
# This is copy/pasted from parser.parse_known_args
namespace = Namespace()
for action in parser._actions:
if action.dest is not argparse.SUPPRESS:
if not hasattr(namespace, action.dest):
if action.default is not argparse.SUPPRESS:
setattr(namespace, action.dest, action.default)
for dest in parser._defaults:
if not hasattr(namespace, dest):
setattr(namespace, dest, parser._defaults[dest])
return namespace
def parse_arguments(parser: ArgumentParser,
settings_from_yaml: Optional[Dict[str, Any]] = None,
fail_on_unknown_args: bool = False,
args: List[str] = None) -> ParserResult:
"""
Parses a list of commandline arguments with a given parser, and adds additional information read
from YAML files. Returns results broken down into a full arguments dictionary, a dictionary of arguments
that were set to non-default values, and unknown arguments.
:param parser: The parser to use
:param settings_from_yaml: A dictionary of settings read from a YAML config file.
:param fail_on_unknown_args: If True, raise an exception if the parser encounters an argument that it does not
recognize. If False, unrecognized arguments will be ignored, and added to the "unknown" field of the parser result.
:param args: Arguments to parse. If not given, use those in sys.argv
:return: The parsed arguments, and overrides
"""
if args is None:
args = sys.argv[1:]
# The following code is a slightly modified version of what happens in parser.parse_known_args. This had to be
# copied here because otherwise we would not be able to achieve the priority order that we desire.
namespace = _create_default_namespace(parser)
known_settings_from_yaml = dict()
unknown_settings_from_yaml = dict()
if settings_from_yaml:
for key, setting_from_yaml in settings_from_yaml.items():
if hasattr(namespace, key):
known_settings_from_yaml[key] = setting_from_yaml
setattr(namespace, key, setting_from_yaml)
else:
unknown_settings_from_yaml[key] = setting_from_yaml
if len(unknown_settings_from_yaml) > 0 and fail_on_unknown_args:
raise ValueError(f'Unknown settings from YAML: {unknown_settings_from_yaml}')
try:
namespace, unknown = parser._parse_known_args(args, namespace)
if hasattr(namespace, argparse._UNRECOGNIZED_ARGS_ATTR):
unknown.extend(getattr(namespace, argparse._UNRECOGNIZED_ARGS_ATTR))
delattr(namespace, argparse._UNRECOGNIZED_ARGS_ATTR)
except ArgumentError:
parser.print_usage(sys.stderr)
err = sys.exc_info()[1]
parser._print_message(str(err), sys.stderr)
raise
# Parse the arguments a second time, without supplying defaults, to see which arguments actually differ
# from defaults.
namespace_without_defaults, _ = parser._parse_known_args(args, Namespace())
parsed_args = vars(namespace).copy()
overrides = vars(namespace_without_defaults).copy()
if len(unknown) > 0 and fail_on_unknown_args:
raise ValueError(f'Unknown arguments: {unknown}')
return ParserResult(
args=parsed_args,
unknown=unknown,
overrides=overrides,
known_settings_from_yaml=known_settings_from_yaml,
unknown_settings_from_yaml=unknown_settings_from_yaml
)
| 52.595349 | 119 | 0.709144 |
2f279078c7f66608512d448b7c97ec296a24bd1a | 5,478 | py | Python | tutorial/source/conf.py | ciguaran/pyro | 2dfa8da0dd400c3712768385d8306848e93dab9a | [
"Apache-2.0"
] | 2 | 2021-01-04T01:35:23.000Z | 2021-01-04T01:35:32.000Z | tutorial/source/conf.py | Ezecc/pyro | 11a96cde05756def826c232d76f9cff66f6e6d4f | [
"Apache-2.0"
] | 1 | 2020-05-12T16:26:21.000Z | 2020-05-12T17:23:13.000Z | tutorial/source/conf.py | Ezecc/pyro | 11a96cde05756def826c232d76f9cff66f6e6d4f | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import sphinx_rtd_theme
from pyro import __version__
# -*- coding: utf-8 -*-
#
# Pyro Tutorials documentation build configuration file, created by
# sphinx-quickstart on Tue Oct 31 11:33:17 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.githubpages',
'nbsphinx',
'sphinx.ext.autodoc'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.ipynb']
# do not execute cells
nbsphinx_execute = 'never'
# allow errors because not all tutorials build
nbsphinx_allow_errors = True
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pyro Tutorials'
copyright = u'2017-2018, Uber Technologies, Inc'
author = u'Uber AI Labs'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
version = __version__
# release version
release = version # eg pyro 0.1.2
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['.ipynb_checkpoints']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# extend timeout
nbsphinx_timeout = 120
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# logo
html_logo = '_static/img/pyro_logo_wide.png'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'logo_only': True
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_style = 'css/pyro.css'
html_favicon = '_static/img/favicon/favicon.ico'
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'PyroTutorialsdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'PyroTutorials.tex', u'Pyro Examples and Tutorials',
u'Uber AI Labs', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pyrotutorials', u'Pyro Examples and Tutorials',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'PyroTutorials', u'Pyro Examples and Tutorials',
author, 'PyroTutorials', 'One line description of project.',
'Miscellaneous'),
]
| 30.098901 | 79 | 0.684009 |
cfc29bd7ddaa990c6ffedbc058ef09372b15d870 | 320 | py | Python | ckan/tests/legacy/test_versions.py | florianm/ckan | 1cfd98d591ac70b4eb81048bcd227b6c1354b1bf | [
"Apache-2.0"
] | 12 | 2015-08-28T16:59:07.000Z | 2020-03-08T01:39:30.000Z | ckan/tests/legacy/test_versions.py | florianm/ckan | 1cfd98d591ac70b4eb81048bcd227b6c1354b1bf | [
"Apache-2.0"
] | 13 | 2019-05-02T21:01:28.000Z | 2020-10-20T23:34:48.000Z | ckan/tests/legacy/test_versions.py | florianm/ckan | 1cfd98d591ac70b4eb81048bcd227b6c1354b1bf | [
"Apache-2.0"
] | 10 | 2015-05-08T04:33:20.000Z | 2020-03-03T15:17:58.000Z | import subprocess
class TestVersions(object):
no_db = True
def test_pylons(self):
p = subprocess.Popen(
'pip freeze | grep Pylons', shell=True,
stdout=subprocess.PIPE)
pylons_version = p.communicate()[0].strip()
assert pylons_version == "Pylons==0.9.7"
| 24.615385 | 55 | 0.596875 |
875e2e165ad245a365c316f56f31d8cd925c555a | 1,110 | py | Python | Useful scripts/train_test_split.py | SathyaKrishnan1211/understanding_perceptron | 5dfb0ce278eea20f68799d26dca0ec77eec23e05 | [
"MIT"
] | null | null | null | Useful scripts/train_test_split.py | SathyaKrishnan1211/understanding_perceptron | 5dfb0ce278eea20f68799d26dca0ec77eec23e05 | [
"MIT"
] | null | null | null | Useful scripts/train_test_split.py | SathyaKrishnan1211/understanding_perceptron | 5dfb0ce278eea20f68799d26dca0ec77eec23e05 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
def train_test_split(X,y,test_size=0.2,random_state=42):
"""
Accepts only a dataframe or a numpy array as input.
:param X: input data X
:param y: input data y
:param test_size: specifies the size of the test dataset.
:param random_state: seed for shuffling the data
:return: X_train,X_test,y_train,y_test
"""
np.random.seed(random_state)
shuffled_index = np.random.permutation(len(X))
train_indices = shuffled_index[:int(len(X)*(1-test_size))]
test_indices = shuffled_index[int(len(X)*test_size):]
if type(X)==type(pd.DataFrame()):
X_train,X_test,y_train,y_test = X.iloc[train_indices],X.iloc[test_indices],y.iloc[train_indices],y.iloc[test_indices]
return X_train, X_test, y_train, y_test
elif type(X)==type(np.array()):
X_train,X_test,y_train,y_test = X[train_indices],X[test_indices],y[train_indices],y[test_indices]
return X_train, X_test, y_train, y_test
else:
raise TypeError("Only dataframes and numpy arrays are accepted as input")
| 39.642857 | 126 | 0.685586 |
81c30382c9e33b336d8fa4f6abcc3651aecea92a | 1,581 | py | Python | hw/ip/rv_plic/util/reg_rv_plic.py | ladmangesh805/opentitan | f4126a4a2eab29f6621aced2bcfd912149ebdf2d | [
"Apache-2.0"
] | 1 | 2019-12-24T02:10:12.000Z | 2019-12-24T02:10:12.000Z | hw/ip/rv_plic/util/reg_rv_plic.py | BharathS11/opentitan | bfaf88058776f23428b317aad90ae93b822d6747 | [
"Apache-2.0"
] | null | null | null | hw/ip/rv_plic/util/reg_rv_plic.py | BharathS11/opentitan | bfaf88058776f23428b317aad90ae93b822d6747 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
r"""Mako template to hjson register description
"""
import sys
import argparse
from io import StringIO
from mako.template import Template
def main():
parser = argparse.ArgumentParser(prog="reg_rv_plic")
parser.add_argument('input',
nargs='?',
metavar='file',
type=argparse.FileType('r'),
default=sys.stdin,
help='input template file')
parser.add_argument('--sources',
'-s',
type=int,
help='Number of Interrupt Sources')
parser.add_argument('--targets',
'-t',
type=int,
default=1,
help='Number of Interrupt Targets')
parser.add_argument('--priority',
'-p',
type=int,
default=7,
help='Max value of interrupt priorities')
args = parser.parse_args()
# Determine output: if stdin then stdout if not then ??
out = StringIO()
reg_tpl = Template(args.input.read())
out.write(
reg_tpl.render(src=args.sources,
target=args.targets,
prio=args.priority))
print(out.getvalue())
out.close()
if __name__ == "__main__":
main()
| 28.745455 | 74 | 0.511069 |
518e0bbed21bcd0e2be6d07def2b396122c80458 | 17,329 | py | Python | matrix/plugin.video.youtube/resources/lib/youtube_plugin/youtube/helper/v3.py | nzmodbox/repo.modbox | 5a5d77089f94f2fdde755ccc2e5f93e81f54f261 | [
"Apache-2.0"
] | null | null | null | matrix/plugin.video.youtube/resources/lib/youtube_plugin/youtube/helper/v3.py | nzmodbox/repo.modbox | 5a5d77089f94f2fdde755ccc2e5f93e81f54f261 | [
"Apache-2.0"
] | null | null | null | matrix/plugin.video.youtube/resources/lib/youtube_plugin/youtube/helper/v3.py | nzmodbox/repo.modbox | 5a5d77089f94f2fdde755ccc2e5f93e81f54f261 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Copyright (C) 2014-2016 bromix (plugin.video.youtube)
Copyright (C) 2016-2018 plugin.video.youtube
SPDX-License-Identifier: GPL-2.0-only
See LICENSES/GPL-2.0-only for more information.
"""
from ...youtube.helper import yt_context_menu
from ... import kodion
from ...kodion import items
from . import utils
def _process_list_response(provider, context, json_data):
video_id_dict = {}
channel_id_dict = {}
playlist_id_dict = {}
playlist_item_id_dict = {}
subscription_id_dict = {}
result = []
is_upcoming = False
thumb_size = context.get_settings().use_thumbnail_size()
yt_items = json_data.get('items', [])
if len(yt_items) == 0:
context.log_warning('List of search result is empty')
return result
incognito = str(context.get_param('incognito', False)).lower() == 'true'
addon_id = context.get_param('addon_id', '')
for yt_item in yt_items:
is_youtube, kind = _parse_kind(yt_item)
if not is_youtube or not kind:
context.log_debug('v3 response: Item discarded, is_youtube=False')
continue
if kind == 'video':
video_id = yt_item['id']
snippet = yt_item['snippet']
title = snippet.get('title', context.localize(provider.LOCAL_MAP['youtube.untitled']))
image = utils.get_thumbnail(thumb_size, snippet.get('thumbnails', {}))
item_params = {'video_id': video_id}
if incognito:
item_params.update({'incognito': incognito})
if addon_id:
item_params.update({'addon_id': addon_id})
item_uri = context.create_uri(['play'], item_params)
video_item = items.VideoItem(title, item_uri, image=image)
video_item.video_id = video_id
if incognito:
video_item.set_play_count(0)
video_item.set_fanart(provider.get_fanart(context))
result.append(video_item)
video_id_dict[video_id] = video_item
elif kind == 'channel':
channel_id = yt_item['id']
snippet = yt_item['snippet']
title = snippet.get('title', context.localize(provider.LOCAL_MAP['youtube.untitled']))
image = utils.get_thumbnail(thumb_size, snippet.get('thumbnails', {}))
item_params = {}
if incognito:
item_params.update({'incognito': incognito})
if addon_id:
item_params.update({'addon_id': addon_id})
item_uri = context.create_uri(['channel', channel_id], item_params)
channel_item = items.DirectoryItem(title, item_uri, image=image)
channel_item.set_fanart(provider.get_fanart(context))
# if logged in => provide subscribing to the channel
if provider.is_logged_in():
context_menu = []
yt_context_menu.append_subscribe_to_channel(context_menu, provider, context, channel_id)
channel_item.set_context_menu(context_menu)
result.append(channel_item)
channel_id_dict[channel_id] = channel_item
elif kind == 'guidecategory':
guide_id = yt_item['id']
snippet = yt_item['snippet']
title = snippet.get('title', context.localize(provider.LOCAL_MAP['youtube.untitled']))
item_params = {'guide_id': guide_id}
if incognito:
item_params.update({'incognito': incognito})
if addon_id:
item_params.update({'addon_id': addon_id})
item_uri = context.create_uri(['special', 'browse_channels'], item_params)
guide_item = items.DirectoryItem(title, item_uri)
guide_item.set_fanart(provider.get_fanart(context))
result.append(guide_item)
elif kind == 'subscription':
snippet = yt_item['snippet']
title = snippet.get('title', context.localize(provider.LOCAL_MAP['youtube.untitled']))
image = utils.get_thumbnail(thumb_size, snippet.get('thumbnails', {}))
channel_id = snippet['resourceId']['channelId']
item_params = {}
if incognito:
item_params.update({'incognito': incognito})
if addon_id:
item_params.update({'addon_id': addon_id})
item_uri = context.create_uri(['channel', channel_id], item_params)
channel_item = items.DirectoryItem(title, item_uri, image=image)
channel_item.set_fanart(provider.get_fanart(context))
# map channel id with subscription id - we need it for the unsubscription
subscription_id_dict[channel_id] = yt_item['id']
result.append(channel_item)
channel_id_dict[channel_id] = channel_item
elif kind == 'playlist':
playlist_id = yt_item['id']
snippet = yt_item['snippet']
title = snippet.get('title', context.localize(provider.LOCAL_MAP['youtube.untitled']))
image = utils.get_thumbnail(thumb_size, snippet.get('thumbnails', {}))
channel_id = snippet['channelId']
# if the path directs to a playlist of our own, we correct the channel id to 'mine'
if context.get_path() == '/channel/mine/playlists/':
channel_id = 'mine'
item_params = {}
if incognito:
item_params.update({'incognito': incognito})
if addon_id:
item_params.update({'addon_id': addon_id})
item_uri = context.create_uri(['channel', channel_id, 'playlist', playlist_id], item_params)
playlist_item = items.DirectoryItem(title, item_uri, image=image)
playlist_item.set_fanart(provider.get_fanart(context))
result.append(playlist_item)
playlist_id_dict[playlist_id] = playlist_item
elif kind == 'playlistitem':
snippet = yt_item['snippet']
video_id = snippet['resourceId']['videoId']
# store the id of the playlistItem - for deleting this item we need this item
playlist_item_id_dict[video_id] = yt_item['id']
title = snippet.get('title', context.localize(provider.LOCAL_MAP['youtube.untitled']))
image = utils.get_thumbnail(thumb_size, snippet.get('thumbnails', {}))
item_params = {'video_id': video_id}
if incognito:
item_params.update({'incognito': incognito})
if addon_id:
item_params.update({'addon_id': addon_id})
item_uri = context.create_uri(['play'], item_params)
video_item = items.VideoItem(title, item_uri, image=image)
video_item.video_id = video_id
if incognito:
video_item.set_play_count(0)
video_item.set_fanart(provider.get_fanart(context))
# Get Track-ID from Playlist
video_item.set_track_number(snippet['position'] + 1)
result.append(video_item)
video_id_dict[video_id] = video_item
elif kind == 'activity':
snippet = yt_item['snippet']
details = yt_item['contentDetails']
activity_type = snippet['type']
# recommendations
if activity_type == 'recommendation':
video_id = details['recommendation']['resourceId']['videoId']
elif activity_type == 'upload':
video_id = details['upload']['videoId']
else:
continue
title = snippet.get('title', context.localize(provider.LOCAL_MAP['youtube.untitled']))
image = utils.get_thumbnail(thumb_size, snippet.get('thumbnails', {}))
item_params = {'video_id': video_id}
if incognito:
item_params.update({'incognito': incognito})
if addon_id:
item_params.update({'addon_id': addon_id})
item_uri = context.create_uri(['play'], item_params)
video_item = items.VideoItem(title, item_uri, image=image)
video_item.video_id = video_id
if incognito:
video_item.set_play_count(0)
video_item.set_fanart(provider.get_fanart(context))
result.append(video_item)
video_id_dict[video_id] = video_item
elif kind == 'commentthread':
thread_snippet = yt_item['snippet']
total_replies = thread_snippet['totalReplyCount']
snippet = thread_snippet['topLevelComment']['snippet']
item_params = {'parent_id': yt_item['id']}
if total_replies:
item_uri = context.create_uri(['special', 'child_comments'], item_params)
else:
item_uri = ''
result.append(utils.make_comment_item(context, provider, snippet, item_uri, total_replies))
elif kind == 'comment':
result.append(utils.make_comment_item(context, provider, yt_item['snippet'], uri=''))
elif kind == 'searchresult':
_, kind = _parse_kind(yt_item.get('id', {}))
# video
if kind == 'video':
video_id = yt_item['id']['videoId']
snippet = yt_item.get('snippet', {})
is_upcoming = snippet.get('liveBroadcastContent', '').lower() == 'upcoming'
title = snippet.get('title', context.localize(provider.LOCAL_MAP['youtube.untitled']))
image = utils.get_thumbnail(thumb_size, snippet.get('thumbnails', {}))
item_params = {'video_id': video_id}
if incognito:
item_params.update({'incognito': incognito})
if addon_id:
item_params.update({'addon_id': addon_id})
item_uri = context.create_uri(['play'], item_params)
video_item = items.VideoItem(title, item_uri, image=image)
video_item.video_id = video_id
if incognito:
video_item.set_play_count(0)
video_item.set_fanart(provider.get_fanart(context))
result.append(video_item)
video_id_dict[video_id] = video_item
# playlist
elif kind == 'playlist':
playlist_id = yt_item['id']['playlistId']
snippet = yt_item['snippet']
title = snippet.get('title', context.localize(provider.LOCAL_MAP['youtube.untitled']))
image = utils.get_thumbnail(thumb_size, snippet.get('thumbnails', {}))
channel_id = snippet['channelId']
# if the path directs to a playlist of our own, we correct the channel id to 'mine'
if context.get_path() == '/channel/mine/playlists/':
channel_id = 'mine'
# channel_name = snippet.get('channelTitle', '')
item_params = {}
if incognito:
item_params.update({'incognito': incognito})
if addon_id:
item_params.update({'addon_id': addon_id})
item_uri = context.create_uri(['channel', channel_id, 'playlist', playlist_id], item_params)
playlist_item = items.DirectoryItem(title, item_uri, image=image)
playlist_item.set_fanart(provider.get_fanart(context))
result.append(playlist_item)
playlist_id_dict[playlist_id] = playlist_item
elif kind == 'channel':
channel_id = yt_item['id']['channelId']
snippet = yt_item['snippet']
title = snippet.get('title', context.localize(provider.LOCAL_MAP['youtube.untitled']))
image = utils.get_thumbnail(thumb_size, snippet.get('thumbnails', {}))
item_params = {}
if incognito:
item_params.update({'incognito': incognito})
if addon_id:
item_params.update({'addon_id': addon_id})
item_uri = context.create_uri(['channel', channel_id], item_params)
channel_item = items.DirectoryItem(title, item_uri, image=image)
channel_item.set_fanart(provider.get_fanart(context))
result.append(channel_item)
channel_id_dict[channel_id] = channel_item
else:
raise kodion.KodionException("Unknown kind '%s'" % kind)
else:
raise kodion.KodionException("Unknown kind '%s'" % kind)
use_play_data = not incognito and context.get_settings().use_playback_history()
# this will also update the channel_id_dict with the correct channel id for each video.
channel_items_dict = {}
utils.update_video_infos(provider, context, video_id_dict, playlist_item_id_dict, channel_items_dict,
live_details=is_upcoming, use_play_data=use_play_data)
utils.update_playlist_infos(provider, context, playlist_id_dict, channel_items_dict)
utils.update_channel_infos(provider, context, channel_id_dict, subscription_id_dict, channel_items_dict)
if video_id_dict or playlist_id_dict:
utils.update_fanarts(provider, context, channel_items_dict)
return result
def response_to_items(provider, context, json_data, sort=None, reverse_sort=False, process_next_page=True):
result = []
is_youtube, kind = _parse_kind(json_data)
if not is_youtube:
context.log_debug('v3 response: Response discarded, is_youtube=False')
return result
if kind in ['searchlistresponse', 'playlistitemlistresponse', 'playlistlistresponse',
'subscriptionlistresponse', 'guidecategorylistresponse', 'channellistresponse',
'videolistresponse', 'activitylistresponse', 'commentthreadlistresponse',
'commentlistresponse']:
result.extend(_process_list_response(provider, context, json_data))
else:
raise kodion.KodionException("Unknown kind '%s'" % kind)
if sort is not None:
result = sorted(result, key=sort, reverse=reverse_sort)
# no processing of next page item
if not process_next_page:
return result
# next page
"""
This will try to prevent the issue 7163 (https://code.google.com/p/gdata-issues/issues/detail?id=7163).
Somehow the APIv3 is missing the token for the next page. We implemented our own calculation for the token
into the YouTube client...this should work for up to ~2000 entries.
"""
yt_total_results = int(json_data.get('pageInfo', {}).get('totalResults', 0))
yt_results_per_page = int(json_data.get('pageInfo', {}).get('resultsPerPage', 0))
page = int(context.get_param('page', 1))
yt_next_page_token = json_data.get('nextPageToken', '')
if yt_next_page_token or (page * yt_results_per_page < yt_total_results):
if not yt_next_page_token:
client = provider.get_client(context)
yt_next_page_token = client.calculate_next_page_token(page + 1, yt_results_per_page)
new_params = {}
new_params.update(context.get_params())
new_params['page_token'] = yt_next_page_token
new_context = context.clone(new_params=new_params)
current_page = int(new_context.get_param('page', 1))
next_page_item = items.NextPageItem(new_context, current_page, fanart=provider.get_fanart(new_context))
result.append(next_page_item)
return result
def handle_error(provider, context, json_data):
if json_data and 'error' in json_data:
ok_dialog = False
message_timeout = 5000
message = kodion.utils.strip_html_from_text(json_data['error'].get('message', ''))
log_message = kodion.utils.strip_html_from_text(json_data['error'].get('message', ''))
reason = json_data['error']['errors'][0].get('reason', '')
title = '%s: %s' % (context.get_name(), reason)
context.log_error('Error reason: |%s| with message: |%s|' % (reason, log_message))
if reason == 'accessNotConfigured':
message = context.localize(provider.LOCAL_MAP['youtube.key.requirement.notification'])
ok_dialog = True
if reason == 'keyInvalid' and message == 'Bad Request':
message = context.localize(provider.LOCAL_MAP['youtube.api.key.incorrect'])
message_timeout = 7000
if reason == 'quotaExceeded' or reason == 'dailyLimitExceeded':
message_timeout = 7000
if ok_dialog:
context.get_ui().on_ok(title, message)
else:
context.get_ui().show_notification(message, title, time_milliseconds=message_timeout)
return False
return True
def _parse_kind(item):
kind = item.get('kind', '').split('#')
if len(kind) < 1:
return False, ''
if len(kind) < 2:
try:
_ = kind.index('youtube')
return True, ''
except ValueError:
return False, str(kind[0]).lower()
try:
idx = kind.index('youtube')
if idx == 0:
return True, str(kind[1]).lower()
except ValueError:
pass
return False, str(kind[1]).lower()
| 44.777778 | 111 | 0.610133 |
ef0b4725df1fcffbe0e9d8d97116f5e4aa90161b | 7,843 | py | Python | skyscraper/utils/value_parser.py | amdor/skyscraper | c7f74168356b37dca78291b8eec390b9341dfa57 | [
"MIT"
] | 1 | 2020-02-23T16:22:43.000Z | 2020-02-23T16:22:43.000Z | skyscraper/utils/value_parser.py | amdor/skyscraper | c7f74168356b37dca78291b8eec390b9341dfa57 | [
"MIT"
] | null | null | null | skyscraper/utils/value_parser.py | amdor/skyscraper | c7f74168356b37dca78291b8eec390b9341dfa57 | [
"MIT"
] | null | null | null | import re
from datetime import date
from math import log10
from currency_converter import CurrencyConverter
from skyscraper.utils.constants import MASS_KEY, SPEEDOMETER_KEY, PRICE_KEY, AGE_KEY, ACCEPTED_CURRENCIES, \
ACCEPTED_CURRENCY_KEYS, CURRENCY_KEY
from skyscraper.utils.constants import POWER_KEY, CONDITION_KEY, Conditions, TRUNK_KEY
from skyscraper.utils.date_helper import is_string_year, is_string_month
class ValueParser:
def __init__(self, car_data):
self.car_data = car_data
@staticmethod
def get_first_number(string_value):
"""
Parses the first number of a string, that starts with a number.
:rtype: int
:param string_value: string to parse
:return: the first number followed by any regarded delimiter. Space, dot, coma delimited numbers are regarded
"""
num_val = ValueParser.__remove_special_chars_from_number(string_value)
if num_val:
return int(num_val)
return 0
@staticmethod
def __remove_special_chars_from_number(string_value):
ret_val = string_value.replace('.', '').replace(',', '').replace(' ', '')
ret_val = re.search('[^0-9]*(?P<num>\d*)', ret_val).group('num')
return ret_val
def get_power_value(self):
"""
Parses the the kilowatt value of the car from the HTML text
representation
car_data[POWER_KEY]: the text representation e.g. '62 kW'
:return: the value e.g. 62 or null if power is not present
"""
return ValueParser.get_first_number(self.car_data.get(POWER_KEY, '0')) / 14
def get_condition_value(self):
"""
Parses the value for condition.
Enumerated conditions are viewed as good, and thus handled
as baseline. Anything else (including empty value) is added as penalty
:return: the value for condition
"""
condition_text = self.car_data.get(CONDITION_KEY, '')
if condition_text == Conditions.COMMON.value \
or condition_text == Conditions.EXCELLENT.value \
or condition_text == Conditions.UNDAMAGED.value \
or condition_text == Conditions.PRESERVED.value \
or condition_text == Conditions.NOVEL.value:
return 0
else:
return -20
def get_trunk_value(self):
"""
Presumably trunk space is in liter
:return: the value for trunk space
"""
trunk_space_text = self.car_data.get(TRUNK_KEY, '0')
trunk_space_value = ValueParser.get_first_number(trunk_space_text)
return round(trunk_space_value / 150)
def get_mass_value(self):
"""
Car's mass in kilograms converted to car worth
:return: the value for mass
"""
mass_text = self.car_data.get(MASS_KEY, '0')
mass_value = ValueParser.get_first_number(mass_text)
return round(mass_value / 500)
def get_speedometer_value(self):
"""
Speedometer value: first 100 000km is 0-10 proportionately,
the part from 100 000 to 200 000 is plus 1-5 penalty point similarly
from 200 000 it's 2.5 penalty for every 100 000 (proportionately)
The more a car runs, the less it's worth
:return: value for speedometer
"""
speedometer_text = self.car_data.get(SPEEDOMETER_KEY, '-12')
speedometer_value = ValueParser.get_first_number(speedometer_text)
if 0 < speedometer_value:
speedometer_value = speedometer_value / 10000
else:
return speedometer_value
if 10.0 < speedometer_value < 20:
speedometer_value = 10 + (speedometer_value - 10) / 2
elif 20.0 < speedometer_value:
speedometer_value = 15 + (speedometer_value - 20) / 4
return round(speedometer_value) * -1
@staticmethod
def get_currency_iso_symbol(amount_in_original_currency):
from_currency = 'EUR'
for currency_symbol in ACCEPTED_CURRENCIES:
if currency_symbol in amount_in_original_currency:
from_currency = ACCEPTED_CURRENCY_KEYS[currency_symbol]
break
return from_currency
@staticmethod
def __get_in_huf(amount_in_original_currency, from_currency='EUR'):
"""
Converts any accepted currencies to HUF
:param amount_in_original_currency: amount string in original currency to be converted
:return: value in HUF
"""
if from_currency == 'HUF':
return int(amount_in_original_currency)
else:
converter = CurrencyConverter()
new_currency = converter.convert(amount_in_original_currency, from_currency, 'HUF')
return new_currency
def get_price_value(self):
"""
Price is calculated from the price and the power, if there is no problem
(like no power or price data), NOTE: max cap
:return: price to power ratio or 0 if there is no price or power
"""
power = ValueParser.get_first_number(self.car_data.get(POWER_KEY, '0'))
ratio = 5000 * power
price_text = self.car_data.get(PRICE_KEY, '0')
original_currency = self.car_data.get(CURRENCY_KEY, 'HUF')
price_value = ValueParser.__get_in_huf(price_text, original_currency)
if min(price_value, power) <= 0:
return 0
price_value = round(price_value / ratio)
return min(price_value, 10)
def get_age_value(self):
"""
The date of production is required for this feature in YYYY[/MM/*]? format
The base point of car worth loss was
http://www.edmunds.com/car-buying/how-fast-does-my-new-car-lose-value-infographic.html
:return: the worth loss by the car's age
"""
prod_date = self.car_data.get(AGE_KEY, 0)
if prod_date == 0 or prod_date == '':
return 0
current_date = date.today()
# toInt
prod_date = prod_date.replace('.', '/')
prod_date_arr = list(map(int, prod_date.split('/')))
if len(prod_date_arr) < 2:
return 0
elif len(prod_date_arr) >= 2:
# 2005/03
if is_string_year(prod_date_arr[0]):
year_string = prod_date_arr[0]
if is_string_month(prod_date_arr[1]):
month_string = prod_date_arr[1]
else:
return 0
# 03/2005
elif is_string_year(prod_date_arr[1]):
year_string = prod_date_arr[1]
if is_string_month(prod_date_arr[0]):
month_string = prod_date_arr[0]
else:
return 0
# 25/03/2005
elif len(prod_date_arr) > 2 and is_string_year(prod_date_arr[2]):
year_string = prod_date_arr[2]
if is_string_month(prod_date_arr[1]):
month_string = prod_date_arr[1]
else:
return 0
else:
return 0
prod_date = date(year_string, month_string, 1)
months_old = current_date.month - prod_date.month
years_old = current_date.year - prod_date.year
if months_old < 0 < years_old:
months_old = 12 + months_old
years_old = years_old - 1
price_loss = -months_old
if years_old <= 0 and months_old <= 3:
return price_loss
elif years_old <= 5:
price_loss -= 11 * years_old + 19
elif years_old <= 30:
price_loss -= log10(years_old * 12 + months_old - 36) * 20 + 54
else:
price_loss -= log10(years_old * 12 + months_old - 36) * 20 + 193 - 0.001 * (years_old * 12) ** 2
return round(price_loss / 3)
| 39.812183 | 117 | 0.617876 |
34909f1924065e2ae99317434e3867ce96b0f93c | 1,299 | py | Python | deploy/hubserving/ocr_rec/params.py | qbcbyb/PaddleOCR | 02584696c96d6d799afe2dd4868b7aebecff8a64 | [
"Apache-2.0"
] | null | null | null | deploy/hubserving/ocr_rec/params.py | qbcbyb/PaddleOCR | 02584696c96d6d799afe2dd4868b7aebecff8a64 | [
"Apache-2.0"
] | null | null | null | deploy/hubserving/ocr_rec/params.py | qbcbyb/PaddleOCR | 02584696c96d6d799afe2dd4868b7aebecff8a64 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Config(object):
pass
def read_params():
cfg = Config()
# #params for text detector
# cfg.det_algorithm = "DB"
# cfg.det_model_dir = "./inference/ch_det_mv3_db/"
# cfg.det_max_side_len = 960
# #DB parmas
# cfg.det_db_thresh =0.3
# cfg.det_db_box_thresh =0.5
# cfg.det_db_unclip_ratio =2.0
# #EAST parmas
# cfg.det_east_score_thresh = 0.8
# cfg.det_east_cover_thresh = 0.1
# cfg.det_east_nms_thresh = 0.2
#params for text recognizer
cfg.rec_algorithm = "CRNN"
cfg.rec_model_dir = "./inference/ch_ppocr_mobile_v1.1_det_infer/"
cfg.rec_image_shape = "3, 32, 320"
cfg.rec_char_type = 'ch'
cfg.rec_batch_num = 30
cfg.max_text_length = 25
cfg.rec_char_dict_path = "./ppocr/utils/ppocr_keys_v1.txt"
cfg.use_space_char = True
#params for text classifier
cfg.use_angle_cls = True
cfg.cls_model_dir = "./inference/ch_ppocr_mobile_v1.1_cls_infer/"
cfg.cls_image_shape = "3, 48, 192"
cfg.label_list = ['0', '180']
cfg.cls_batch_num = 30
cfg.cls_thresh = 0.9
cfg.use_zero_copy_run = False
cfg.use_pdserving = False
return cfg
| 24.509434 | 69 | 0.676674 |
f13a038197e7fe0af3b0b111734d25e4b32ca3ce | 745 | py | Python | pacote-download/PycharmProjects/pyhtonexercicios/ex084.py | ThamirisAdriano/Curso-em-video-Python | 5715dfa0d558f7ebe41666a537cccf20fd79ce72 | [
"MIT"
] | 1 | 2020-09-05T16:02:56.000Z | 2020-09-05T16:02:56.000Z | pacote-download/PycharmProjects/pyhtonexercicios/ex084.py | ThamirisAdriano/Curso-em-video-Python | 5715dfa0d558f7ebe41666a537cccf20fd79ce72 | [
"MIT"
] | null | null | null | pacote-download/PycharmProjects/pyhtonexercicios/ex084.py | ThamirisAdriano/Curso-em-video-Python | 5715dfa0d558f7ebe41666a537cccf20fd79ce72 | [
"MIT"
] | null | null | null | temp = []
princ = []
mai = men = 0
while True:
temp.append(str(input('Nome:')))
temp.append(float(input('Peso: ')))
if len(princ) == 0:
mai = men = temp[1]
else:
if temp[1] > mai:
mai = temp[1]
elif temp [1] < men:
men = temp [1]
princ.append(temp[:])
temp.clear()
resp = str(input('Quer continuar? : S/N'))
if resp in 'Nn':
break
print('-='*30)
print(f'Ao todo você cadastrou {len(princ)} pessoas.')
print(f'O maior peso foi de {mai} Kg, peso de', end=' ')
for p in princ:
if p[1] == mai:
print(f'{p[0]}', end=' ')
print(f'\nO menor peso foi {men}Kg, peso de', end=' ')
for p in princ:
if p[1] == men:
print(f'{p[0]}', end= ' ')
| 23.28125 | 56 | 0.503356 |
f9f5eb5a3f572f5405585d3e14a205ac138a9fcc | 1,027 | py | Python | count_kmers.py | nikopech/Snakemake-k-mer | bb12bb241e15eafa390e2b87fee36ff801ae6b1b | [
"MIT"
] | null | null | null | count_kmers.py | nikopech/Snakemake-k-mer | bb12bb241e15eafa390e2b87fee36ff801ae6b1b | [
"MIT"
] | null | null | null | count_kmers.py | nikopech/Snakemake-k-mer | bb12bb241e15eafa390e2b87fee36ff801ae6b1b | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pylab as plt
import pandas as pd
def count_kmers(read, k, counts):
"""Count kmer occurrences in a given read.
Parameters
----------
read : string
A single DNA sequence.
k : int
The value of k for which to count kmers.
Returns
-------
counts : dictionary, {'string': int}
A dictionary of counts keyed by their individual kmers (strings
of length k).
Examples
--------
>>> count_kmers("GATGAT", 3)
{'ATG': 1, 'GAT': 2, 'TGA': 1}
"""
# Calculate how many kmers of length k there are
num_kmers = len(read) - k + 1
# Loop over the kmer start positions
for i in range(num_kmers):
# Slice the string to get the kmer
kmer = read[i:i+k]
# Add the kmer to the dictionary if it's not there
if kmer not in counts:
counts[kmer] = 0
# Increment the count for this kmer
counts[kmer] += 1
# Return the final counts
return counts
| 25.675 | 71 | 0.582278 |
e3c8d198d368f2881051aa50d72c83c0422f574a | 468 | py | Python | classes/migrations/0011_classinstance_syllabus.py | ericrobskyhuntley/vialab.mit.edu | 1318d03b8eeb106c1662052e1caa53290e206ae7 | [
"MIT"
] | null | null | null | classes/migrations/0011_classinstance_syllabus.py | ericrobskyhuntley/vialab.mit.edu | 1318d03b8eeb106c1662052e1caa53290e206ae7 | [
"MIT"
] | null | null | null | classes/migrations/0011_classinstance_syllabus.py | ericrobskyhuntley/vialab.mit.edu | 1318d03b8eeb106c1662052e1caa53290e206ae7 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.4 on 2020-12-17 19:20
import classes.models
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('classes', '0010_auto_20201217_1138'),
]
operations = [
migrations.AddField(
model_name='classinstance',
name='syllabus',
field=models.FileField(blank=True, default='', upload_to=classes.models.syllabus_filename),
),
]
| 23.4 | 103 | 0.641026 |
c8591dcf58ea245693de9dd345250b81d1d9c04a | 18,288 | py | Python | examples/language_model/bert/create_pretraining_data.py | tangzhiyi11/PaddleNLP | c58a1c6c90e146178276cccb122fae8fdafa0143 | [
"Apache-2.0"
] | 2 | 2021-06-23T08:22:09.000Z | 2021-10-31T04:28:23.000Z | examples/language_model/bert/create_pretraining_data.py | xiemoyuan/PaddleNLP | e73a02d09dbc249dcd3aa721c507a81cc905210c | [
"Apache-2.0"
] | 1 | 2021-11-22T08:11:08.000Z | 2021-11-22T08:11:08.000Z | examples/language_model/bert/create_pretraining_data.py | xiemoyuan/PaddleNLP | e73a02d09dbc249dcd3aa721c507a81cc905210c | [
"Apache-2.0"
] | 1 | 2021-04-28T09:01:37.000Z | 2021-04-28T09:01:37.000Z | # coding=utf-8
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2019 NVIDIA CORPORATION. All rights reserved.
# Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create masked LM/next sentence masked_lm TF examples for BERT."""
from __future__ import absolute_import, division, print_function, unicode_literals
import argparse
import logging
import os
import random
from io import open
import h5py
import numpy as np
from tqdm import tqdm
from paddlenlp.transformers import BertTokenizer
from paddlenlp.transformers.tokenizer_utils import convert_to_unicode
import random
import collections
class TrainingInstance(object):
"""A single training instance (sentence pair)."""
def __init__(self, tokens, segment_ids, masked_lm_positions,
masked_lm_labels, is_random_next):
self.tokens = tokens
self.segment_ids = segment_ids
self.is_random_next = is_random_next
self.masked_lm_positions = masked_lm_positions
self.masked_lm_labels = masked_lm_labels
def write_instance_to_example_file(instances, tokenizer, max_seq_length,
max_predictions_per_seq, output_file):
"""Create TF example files from `TrainingInstance`s."""
total_written = 0
features = collections.OrderedDict()
num_instances = len(instances)
features["input_ids"] = np.zeros(
[num_instances, max_seq_length], dtype="int32")
features["input_mask"] = np.zeros(
[num_instances, max_seq_length], dtype="int32")
features["segment_ids"] = np.zeros(
[num_instances, max_seq_length], dtype="int32")
features["masked_lm_positions"] = np.zeros(
[num_instances, max_predictions_per_seq], dtype="int32")
features["masked_lm_ids"] = np.zeros(
[num_instances, max_predictions_per_seq], dtype="int32")
features["next_sentence_labels"] = np.zeros(num_instances, dtype="int32")
for inst_index, instance in enumerate(tqdm(instances)):
input_ids = tokenizer.convert_tokens_to_ids(instance.tokens)
input_mask = [1] * len(input_ids)
segment_ids = list(instance.segment_ids)
assert len(input_ids) <= max_seq_length
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
segment_ids.append(0)
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
masked_lm_positions = list(instance.masked_lm_positions)
masked_lm_ids = tokenizer.convert_tokens_to_ids(
instance.masked_lm_labels)
masked_lm_weights = [1.0] * len(masked_lm_ids)
while len(masked_lm_positions) < max_predictions_per_seq:
masked_lm_positions.append(0)
masked_lm_ids.append(0)
masked_lm_weights.append(0.0)
next_sentence_label = 1 if instance.is_random_next else 0
features["input_ids"][inst_index] = input_ids
features["input_mask"][inst_index] = input_mask
features["segment_ids"][inst_index] = segment_ids
features["masked_lm_positions"][inst_index] = masked_lm_positions
features["masked_lm_ids"][inst_index] = masked_lm_ids
features["next_sentence_labels"][inst_index] = next_sentence_label
total_written += 1
print("saving data")
f = h5py.File(output_file, 'w')
f.create_dataset(
"input_ids", data=features["input_ids"], dtype='i4', compression='gzip')
f.create_dataset(
"input_mask",
data=features["input_mask"],
dtype='i1',
compression='gzip')
f.create_dataset(
"segment_ids",
data=features["segment_ids"],
dtype='i1',
compression='gzip')
f.create_dataset(
"masked_lm_positions",
data=features["masked_lm_positions"],
dtype='i4',
compression='gzip')
f.create_dataset(
"masked_lm_ids",
data=features["masked_lm_ids"],
dtype='i4',
compression='gzip')
f.create_dataset(
"next_sentence_labels",
data=features["next_sentence_labels"],
dtype='i1',
compression='gzip')
f.flush()
f.close()
def create_training_instances(input_files, tokenizer, max_seq_length,
dupe_factor, short_seq_prob, masked_lm_prob,
max_predictions_per_seq, rng):
"""Create `TrainingInstance`s from raw text."""
all_documents = [[]]
# Input file format:
# (1) One sentence per line. These should ideally be actual sentences, not
# entire paragraphs or arbitrary spans of text. (Because we use the
# sentence boundaries for the "next sentence prediction" task).
# (2) Blank lines between documents. Document boundaries are needed so
# that the "next sentence prediction" task doesn't span between documents.
for input_file in input_files:
print("creating instance from {}".format(input_file))
with open(input_file, "r") as reader:
while True:
line = convert_to_unicode(reader.readline())
if not line:
break
line = line.strip()
# Empty lines are used as document delimiters
if not line:
all_documents.append([])
tokens = tokenizer.tokenize(line)
if tokens:
all_documents[-1].append(tokens)
# Remove empty documents
all_documents = [x for x in all_documents if x]
rng.shuffle(all_documents)
# vocab_words = list(tokenizer.vocab.keys())
vocab_words = list(tokenizer.vocab.token_to_idx.keys())
instances = []
for _ in range(dupe_factor):
for document_index in range(len(all_documents)):
instances.extend(
create_instances_from_document(
all_documents, document_index, max_seq_length,
short_seq_prob, masked_lm_prob, max_predictions_per_seq,
vocab_words, rng))
rng.shuffle(instances)
return instances
def create_instances_from_document(
all_documents, document_index, max_seq_length, short_seq_prob,
masked_lm_prob, max_predictions_per_seq, vocab_words, rng):
"""Creates `TrainingInstance`s for a single document."""
document = all_documents[document_index]
# Account for [CLS], [SEP], [SEP]
max_num_tokens = max_seq_length - 3
# We *usually* want to fill up the entire sequence since we are padding
# to `max_seq_length` anyways, so short sequences are generally wasted
# computation. However, we *sometimes*
# (i.e., short_seq_prob == 0.1 == 10% of the time) want to use shorter
# sequences to minimize the mismatch between pre-training and fine-tuning.
# The `target_seq_length` is just a rough target however, whereas
# `max_seq_length` is a hard limit.
target_seq_length = max_num_tokens
if rng.random() < short_seq_prob:
target_seq_length = rng.randint(2, max_num_tokens)
# We DON'T just concatenate all of the tokens from a document into a long
# sequence and choose an arbitrary split point because this would make the
# next sentence prediction task too easy. Instead, we split the input into
# segments "A" and "B" based on the actual "sentences" provided by the user
# input.
instances = []
current_chunk = []
current_length = 0
i = 0
while i < len(document):
segment = document[i]
current_chunk.append(segment)
current_length += len(segment)
if i == len(document) - 1 or current_length >= target_seq_length:
if current_chunk:
# `a_end` is how many segments from `current_chunk` go into the `A`
# (first) sentence.
a_end = 1
if len(current_chunk) >= 2:
a_end = rng.randint(1, len(current_chunk) - 1)
tokens_a = []
for j in range(a_end):
tokens_a.extend(current_chunk[j])
tokens_b = []
# Random next
is_random_next = False
if len(current_chunk) == 1 or rng.random() < 0.5:
is_random_next = True
target_b_length = target_seq_length - len(tokens_a)
# This should rarely go for more than one iteration for large
# corpora. However, just to be careful, we try to make sure that
# the random document is not the same as the document
# we're processing.
for _ in range(10):
random_document_index = rng.randint(
0, len(all_documents) - 1)
if random_document_index != document_index:
break
#If picked random document is the same as the current document
if random_document_index == document_index:
is_random_next = False
random_document = all_documents[random_document_index]
random_start = rng.randint(0, len(random_document) - 1)
for j in range(random_start, len(random_document)):
tokens_b.extend(random_document[j])
if len(tokens_b) >= target_b_length:
break
# We didn't actually use these segments so we "put them back" so
# they don't go to waste.
num_unused_segments = len(current_chunk) - a_end
i -= num_unused_segments
# Actual next
else:
is_random_next = False
for j in range(a_end, len(current_chunk)):
tokens_b.extend(current_chunk[j])
truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng)
assert len(tokens_a) >= 1
assert len(tokens_b) >= 1
tokens = []
segment_ids = []
tokens.append("[CLS]")
segment_ids.append(0)
for token in tokens_a:
tokens.append(token)
segment_ids.append(0)
tokens.append("[SEP]")
segment_ids.append(0)
for token in tokens_b:
tokens.append(token)
segment_ids.append(1)
tokens.append("[SEP]")
segment_ids.append(1)
(tokens, masked_lm_positions,
masked_lm_labels) = create_masked_lm_predictions(
tokens, masked_lm_prob, max_predictions_per_seq,
vocab_words, rng)
instance = TrainingInstance(
tokens=tokens,
segment_ids=segment_ids,
is_random_next=is_random_next,
masked_lm_positions=masked_lm_positions,
masked_lm_labels=masked_lm_labels)
instances.append(instance)
current_chunk = []
current_length = 0
i += 1
return instances
MaskedLmInstance = collections.namedtuple("MaskedLmInstance",
["index", "label"])
def create_masked_lm_predictions(tokens, masked_lm_prob,
max_predictions_per_seq, vocab_words, rng):
"""Creates the predictions for the masked LM objective."""
cand_indexes = []
for (i, token) in enumerate(tokens):
if token == "[CLS]" or token == "[SEP]":
continue
cand_indexes.append(i)
rng.shuffle(cand_indexes)
output_tokens = list(tokens)
num_to_predict = min(max_predictions_per_seq,
max(1, int(round(len(tokens) * masked_lm_prob))))
masked_lms = []
covered_indexes = set()
for index in cand_indexes:
if len(masked_lms) >= num_to_predict:
break
if index in covered_indexes:
continue
covered_indexes.add(index)
masked_token = None
# 80% of the time, replace with [MASK]
if rng.random() < 0.8:
masked_token = "[MASK]"
else:
# 10% of the time, keep original
if rng.random() < 0.5:
masked_token = tokens[index]
# 10% of the time, replace with random word
else:
masked_token = vocab_words[rng.randint(0, len(vocab_words) - 1)]
output_tokens[index] = masked_token
masked_lms.append(MaskedLmInstance(index=index, label=tokens[index]))
masked_lms = sorted(masked_lms, key=lambda x: x.index)
masked_lm_positions = []
masked_lm_labels = []
for p in masked_lms:
masked_lm_positions.append(p.index)
masked_lm_labels.append(p.label)
return (output_tokens, masked_lm_positions, masked_lm_labels)
def truncate_seq_pair(tokens_a, tokens_b, max_num_tokens, rng):
"""Truncates a pair of sequences to a maximum sequence length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_num_tokens:
break
trunc_tokens = tokens_a if len(tokens_a) > len(tokens_b) else tokens_b
assert len(trunc_tokens) >= 1
# We want to sometimes truncate from the front and sometimes from the
# back to add more randomness and avoid biases.
if rng.random() < 0.5:
del trunc_tokens[0]
else:
trunc_tokens.pop()
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_file",
default=None,
type=str,
required=True,
help="The input train corpus. can be directory with .txt files or a path to a single file"
)
parser.add_argument(
"--output_file",
default=None,
type=str,
required=True,
help="The output file where created hdf5 formatted data will be written."
)
parser.add_argument(
"--vocab_file",
default=None,
type=str,
required=False,
help="The vocabulary the BERT model will train on. "
"Use bert_model argument would ignore this. "
"The bert_model argument is recommended.")
parser.add_argument(
"--do_lower_case",
action='store_true',
default=True,
help="Whether to lower case the input text. True for uncased models, False for cased models. "
"Use bert_model argument would ignore this. The bert_model argument is recommended."
)
parser.add_argument(
"--bert_model",
default="bert-base-uncased",
type=str,
required=False,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese."
"If provided, use the pre-trained model used tokenizer to create data "
"and ignore vocab_file and do_lower_case.")
## Other parameters
#int
parser.add_argument(
"--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument(
"--dupe_factor",
default=10,
type=int,
help="Number of times to duplicate the input data (with different masks)."
)
parser.add_argument(
"--max_predictions_per_seq",
default=20,
type=int,
help="Maximum number of masked LM predictions per sequence.")
# floats
parser.add_argument(
"--masked_lm_prob",
default=0.15,
type=float,
help="Masked LM probability.")
parser.add_argument(
"--short_seq_prob",
default=0.1,
type=float,
help="Probability to create a sequence shorter than maximum sequence length"
)
parser.add_argument(
'--random_seed',
type=int,
default=12345,
help="random seed for initialization")
args = parser.parse_args()
print(args)
if args.bert_model:
tokenizer = BertTokenizer.from_pretrained(args.bert_model)
else:
assert args.vocab_file, (
"vocab_file must be set If bert_model is not provided.")
tokenizer = BertTokenizer(
args.vocab_file, do_lower_case=args.do_lower_case)
input_files = []
if os.path.isfile(args.input_file):
input_files.append(args.input_file)
elif os.path.isdir(args.input_file):
input_files = [
os.path.join(args.input_file, f)
for f in os.listdir(args.input_file)
if (os.path.isfile(os.path.join(args.input_file, f)) and f.endswith(
'.txt'))
]
else:
raise ValueError("{} is not a valid path".format(args.input_file))
rng = random.Random(args.random_seed)
instances = create_training_instances(
input_files, tokenizer, args.max_seq_length, args.dupe_factor,
args.short_seq_prob, args.masked_lm_prob, args.max_predictions_per_seq,
rng)
output_file = args.output_file
write_instance_to_example_file(instances, tokenizer, args.max_seq_length,
args.max_predictions_per_seq, output_file)
if __name__ == "__main__":
main()
| 36.576 | 102 | 0.615923 |
bd91ffc23ddd01df578b30083be7a945ff202e39 | 18,836 | py | Python | tms_ur/tms_ur_listener/script/tms_ur_listener_server.py | SigmaHayashi/ros_tms_for_smart_previewed_reality | 4ace908bd3da0519246b3c45d0230cbd02e49da0 | [
"BSD-3-Clause"
] | null | null | null | tms_ur/tms_ur_listener/script/tms_ur_listener_server.py | SigmaHayashi/ros_tms_for_smart_previewed_reality | 4ace908bd3da0519246b3c45d0230cbd02e49da0 | [
"BSD-3-Clause"
] | null | null | null | tms_ur/tms_ur_listener/script/tms_ur_listener_server.py | SigmaHayashi/ros_tms_for_smart_previewed_reality | 4ace908bd3da0519246b3c45d0230cbd02e49da0 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
import rospy
from tms_ur_listener.msg import julius_msg
from tms_ur_listener.srv import gSpeech_msg
from tms_ur_speaker.srv import *
from std_msgs.msg import Bool
from std_msgs.msg import String
from std_msgs.msg import Int32
from janome.tokenizer import Tokenizer
from tms_msg_db.msg import Tmsdb
from tms_msg_db.srv import TmsdbGetData
from tms_msg_ts.srv import ts_req
import requests
import time
import subprocess
import shlex
import json
import datetime
import threading
import urllib
trigger = ['ROS-TMS']
error_msg0 = "すみません。聞き取れませんでした。"
error_msg1 = "すみません。よくわかりませんでした。"
error_msg2 = "エラーが発生したため、処理を中断します"
sid = 100000
class TmsUrListener():
def __init__(self):
rospy.init_node("tms_ur_listener")
rospy.on_shutdown(self.shutdown)
rospy.Subscriber("/pi0/julius_msg",julius_msg,self.callback, callback_args=0)
rospy.Subscriber("/pi1/julius_msg",julius_msg,self.callback, callback_args=1)
rospy.Subscriber("/pi2/julius_msg",julius_msg,self.callback, callback_args=2)
rospy.Subscriber("/pi3/julius_msg",julius_msg,self.callback, callback_args=3)
rospy.Subscriber("/pi4/julius_msg",julius_msg,self.callback, callback_args=4)
rospy.Subscriber("/pi5/julius_msg",julius_msg,self.callback, callback_args=5)
rospy.Subscriber("/watch_msg",String,self.callback, callback_args=100)
self.gSpeech_launched = False
self.julius_flag = True
self.timer = threading.Timer(1,self.alarm)
self.power_pub = rospy.Publisher("julius_power",Bool,queue_size=10)
self.speaker_pub = rospy.Publisher("speaker",String,queue_size=10)
self.bed_pub = rospy.Publisher("rc_bed",Int32,queue_size=10)
self.tok = Tokenizer()
f = open('/home/rts/apikey','r')
for line in f:
self.apikey = line.replace('\n','')
f.close()
print 'tms_ur_listener_server ready...'
def alarm(self):
while True:
print "alarm"
self.speaker('\sound4')
time.sleep(1.5)
temp_dbdata=Tmsdb()
temp_dbdata.id = 1100
temp_dbdata.state = 1
target = self.db_reader(temp_dbdata)
if target is None:
self.announce(error_msg2)
return
print 'rp:'+str(target.rp)
if target.rp>-0.2:
break
def julius_power(self,data,t=0):
if self.julius_flag != data:
msg = Bool()
msg.data = data
time.sleep(float(t))
self.power_pub.publish(msg)
self.julius_flag = data
if data == True:
time.sleep(1.5)
self.speaker('\sound3')
def launch_gSpeech(self,id):
servicename = '/pi' + str(id) + '/gSpeech'
rospy.wait_for_service(servicename)
try:
gspeech = rospy.ServiceProxy(servicename,gSpeech_msg)
response = gspeech()
print response
return response
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def speaker(self,data):
speak = String()
speak.data = data
self.speaker_pub.publish(speak)
def announce(self,data):
print data
rospy.wait_for_service('speaker_srv', timeout=1.0)
tim = 0
try:
speak = rospy.ServiceProxy('speaker_srv',speaker_srv)
tim = speak(data)
except rospy.ServiceException, e:
print "Service call failed: %s" % e
return tim
def db_reader(self,data):
rospy.wait_for_service('tms_db_reader')
try:
tms_db_reader = rospy.ServiceProxy('tms_db_reader', TmsdbGetData)
res = tms_db_reader(data)
return res
except rospy.ServiceException as e:
print "Service call failed: %s" % e
return None
def tag_reader(self,data):
temp_dbdata = Tmsdb()
temp_dbdata.tag='「'+data+'」'
target = self.db_reader(temp_dbdata)
return target
def callback(self, data, id):
rospy.loginfo(str(id))
rospy.loginfo(data)
if id < 100:
if data.data not in trigger:
return
if self.gSpeech_launched == True:
return
self.gSpeech_launched = True
rospy.loginfo("call trigger on raspi:%d",id)
rospy.loginfo("kill julius!!")
self.julius_power(False)
self.speaker("\sound1")
time.sleep(0.5)
data = self.launch_gSpeech(id)
self.gSpeech_launched = False
if data.data == "":
tim = self.announce(error_msg0)
self.julius_power(True,tim.sec)
return
rospy.loginfo("get command!")
tokens = self.tok.tokenize(data.data.decode('utf-8'))
words = []
verb = ''
for token in tokens:
print token
if token.part_of_speech.split(',')[0] == u'動詞':
verb += token.base_form.encode('utf-8')
elif token.part_of_speech.split(',')[0] == u'名詞':
if token.base_form.encode('utf-8') != "*":
words.append(token.base_form.encode('utf-8'))
else:
words.append(token.surface.encode('utf-8'))
if verb != '':
words.append(verb)
if "言う" in words: #「〇〇に行って」が「〇〇に言って」と認識される
words.append("行く")
if "入る" in words: #同上
words.append("行く")
print str(words).decode('string-escape')
task_id = 0
robot_id = 0
object_id = 0
user_id = 1100
place_id = 0
announce = ""
robot_name = ""
object_name = ""
user_name = "太郎さん"
place_name = ""
task_dic = {}
robot_dic = {}
object_dic = {}
user_dic = {1100:"太郎さん"}
place_dic = {}
other_words = []
for word in words:
res = self.tag_reader(word)
if res is None:
tim = self.announce(error_msg2)
self.julius_power(True,tim.sec)
return
for target in res.tmsdb:
if target.type == 'task':
task_dic[target.id] = target.announce
elif target.type == 'robot':
robot_dic[target.id] = target.announce
elif target.type == 'object':
object_dic[target.id] = target.announce
elif target.type == 'person':
user_dic[target.id] = target.announce
elif target.type == 'furniture':
place_dic[target.id] = target.announce
else:
other_words.append(word)
print "task:" + str(task_dic)
print "robot:" + str(robot_dic)
print "object:" + str(object_dic)
print "user:" + str(user_dic)
print "place:" + str(place_dic)
if len(task_dic) == 1:
task_id = task_dic.keys()[0]
announce = task_dic[task_id]
elif len(task_dic) > 1:
print "len(task_dic) > 1"
#未実装
task_id = task_dic.keys()[0]
announce = task_dic[task_id]
if task_id == 0:
print 'ask docomo Q&A api'
print data.data
urlenc = urllib.quote(data.data)
args = "curl -s 'https://api.apigw.smt.docomo.ne.jp/knowledgeQA/v1/ask?APIKEY=" + self.apikey + "&q=" + urlenc + "'"
ret = subprocess.check_output(shlex.split(args))
json_dict = json.loads(ret,"utf-8")
announce = "すみません、わかりませんでした。"
if "message" in json_dict:
print json_dict["message"]["textForDisplay"]
announce = json_dict["message"]["textForSpeech"]
tim = self.announce(announce)
self.julius_power(True,tim.sec)
return
elif task_id == 8100: #search_object
if len(object_dic) == 1:
object_id = object_dic.keys()[0]
object_name = object_dic[object_id]
elif len(object_dic) > 1:
print "len(object_dic) > 1"
#未実装
place_id = 0
place_name = ""
temp_dbdata = Tmsdb()
temp_dbdata.id = object_id
temp_dbdata.state = 1
target = self.db_reader(temp_dbdata)
if target is None:
tim = self.announce(error_msg2)
self.julius_power(True,tim.sec)
return
place_id = target.place
temp_dbdata = Tmsdb()
temp_dbdata.id = place_id + sid
target = self.db_reader(temp_dbdata)
if target is None:
tim = self.announce(error_msg2)
self.julius_power(True,tim.sec)
return
place_name = target.announce
if object_name == "" or place_name == "":
tim = self.announce(error_msg1)
self.julius_power(True,tim.sec)
return
anc_list = announce.split("$")
announce = ""
for anc in anc_list:
if anc == "object":
announce += object_name
elif anc == "place":
announce += place_name
else:
announce += anc
tim = self.announce(announce)
self.julius_power(True,tim.sec)
elif task_id == 8101: #weather_forecast
place = "福岡市"
date = ""
weather = ""
for word in other_words:
if word in ['今日','明日','明後日','あさって']:
date = word
if date == "":
tim = self.announce(error_msg1)
self.julius_power(True,tim.sec)
return
args = "curl -s http://weather.livedoor.com/forecast/webservice/json/v1\?city\=400010"
ret = subprocess.check_output(shlex.split(args))
json_dict = json.loads(ret,"utf-8")
if "forecasts" in json_dict:
if date == '今日':
weather = json_dict["forecasts"][0]["telop"].encode('utf-8')
elif date == '明日':
weather = json_dict["forecasts"][1]["telop"].encode('utf-8')
elif date == '明後日' or date == 'あさって':
weather = json_dict["forecasts"][2]["telop"].encode('utf-8')
if weather == "":
tim = self.announce(error_msg1)
self.julius_power(True,tim.sec)
return
anc_list = announce.split("$")
announce = ""
for anc in anc_list:
if anc == "place":
announce += place
elif anc == "date":
announce += date
elif anc == "weather":
announce += weather
else:
announce += anc
tim = self.announce(announce)
self.julius_power(True,tim.sec)
elif task_id == 8102: #set_alarm
today = datetime.datetime.today()
print 'now:' + today.strftime("%Y/%m/%d %H:%M:%S")
if today.hour < 6:
date = 0
else:
date = 1
hour = -1
minute = 0
for i,word in enumerate(other_words):
if word == "今日":
date = 0
elif word == "明日" and today.hour > 6:
date = 1
elif word in ["時","時半"] and i>0:
if words[i-1].isdigit():
hour = int(words[i-1])
if word == "時半":
minute = 30
if i>1 and words[i-2] == "午後" and hour <=12:
hour += 12
elif i>1 and words[i-2] == "夜" and hour <=12 and hour>=6:
hour += 12
elif word == "分":
if words[i-1].isdigit():
minute = int(words[i-1])
print "d:"+str(date)+" h:"+str(hour)+" m:"+str(minute)
if hour == -1:
tim = self.announce(error_msg1)
self.julius_power(True,tim.sec)
return
tgt_tim = datetime.datetime(today.year,today.month,today.day,hour,minute,0,0)
tgt_time += datetime.timedelta(1)
print 'tgt_time:' + tgt_time.strftime("%Y/%m/%d %H:%M:%S")
offset = tgt_time - today
print 'offset_sec:' + str(offset.seconds)
if offset.seconds < 0:
tim = self.announce(error_msg1)
self.julius_power(True,tim.sec)
return
self.timer = threading.Timer(15,self.alarm)#(offset.seconds,self.alarm)
self.timer.start()
anc_list = announce.split("$")
announce = ""
for anc in anc_list:
if anc == "date":
announce += str(tgt_time.month)+"月"+str(tgt_time.day)+"日"
elif anc == "time":
announce += str(tgt_time.hour)+"時"+str(tgt_time.minute)+"分"
else:
announce += anc
tim = self.announce(announce)
self.julius_power(True,tim.sec)
elif task_id == 8103:
url = "http://192.168.100.101/codemari_kyudai/CodemariServlet?deviceID=9999&locale=ja&cmd=%251CFLP%"
onoff = ""
if "つける" in other_words:
print "light on"
onoff = "付け"
url += "2003"
elif "消す" in other_words:
print "light off"
onoff = "消し"
url += "2005"
else:
tim = self.announce(error_msg1)
self.julius_power(True,tim.sec)
return
anc_list = announce.split("$")
announce = ""
for anc in anc_list:
if anc == "onoff":
announce += onoff
else:
announce += anc
tim = self.announce(announce)
self.julius_power(True,tim.sec)
res = requests.get(url)
print res.text
elif task_id == 8104:
msg = Int32()
cmd = ""
if "起こす" in words:
msg.data = 1
cmd = "を起こし"
elif "寝かせる" in words:
msg.data = 2
cmd = "を寝かせ"
elif "立てる" in words:
msg.data = 3
cmd = "を立て"
elif "倒す" in words:
msg.data = 4
cmd = "を倒し"
elif "上げる" in words:
msg.data = 7
cmd = "の高さを上げ"
elif "下げる" in words:
msg.data = 8
cmd = "の高さを下げ"
else:
tim = self.announce(error_msg1)
self.julius_power(True,tim.sec)
return
anc_list = announce.split("$")
announce = ""
for anc in anc_list:
if anc == "cmd":
announce += cmd
else:
announce += anc
tim = self.announce(announce)
self.julius_power(True,tim.sec)
self.bed_pub.publish(msg)
else: #robot_task
anc_list = announce.split("$")
announce = ""
for anc in anc_list:
if anc == "robot":
if len(robot_dic) == 1:
robot_id = robot_dic.keys()[0]
robot_name = robot_dic[robot_id]
elif len(robot_dic) > 1:
print "len(robot_dic) > 1"
#未実装
if robot_id==0:
tim = self.announce(error_msg1)
self.julius_power(True,tim.sec)
return
announce += robot_name
elif anc == "object":
if len(object_dic) == 1:
object_id = object_dic.keys()[0]
object_name = object_dic[object_id]
elif len(object_dic) > 1:
print "len(object_dic) > 1"
#未実装
if object_id==0:
tim = self.announce(error_msg1)
self.julius_power(True,tim.sec)
return
announce += object_name
elif anc == "user":
if len(user_dic) == 1:
user_id = user_dic.keys()[0]
user_name = user_dic[user_id]
elif len(user_dic) > 1:
print "len(user_dic) > 1"
#未実装
if user_id==0:
tim = self.announce(error_msg1)
self.julius_power(True,tim.sec)
return
announce += user_name
elif anc == "place":
if len(place_dic) == 1:
place_id = place_dic.keys()[0]
place_name = place_dic[place_id]
elif len(place_dic) > 1:
print "len(place_dic) > 1"
#未実装
if place_id==0:
tim = self.announce(error_msg1)
self.julius_power(True,tim.sec)
return
announce += place_name
else:
announce += anc
print 'send command'
try:
rospy.wait_for_service('tms_ts_master', timeout=1.0)
except rospy.ROSException:
print "tms_ts_master is not work"
try:
tms_ts_master = rospy.ServiceProxy('tms_ts_master',ts_req)
res = tms_ts_master(0,task_id,robot_id,object_id,user_id,place_id,0)
print res
except rospy.ServiceException as e:
print "Service call failed: %s" % e
tim = self.announce(announce)
self.julius_power(True,tim.sec)
def shutdown(self):
rospy.loginfo("Stopping the node")
if __name__ == '__main__':
try:
TmsUrListener()
rospy.spin()
except rospy.ROSInterruptException:
rospy.loginfo("tms_ur_listener node terminated.")
| 35.809886 | 128 | 0.486515 |
9e7e8b5208fc8f47c8f5fb89a694c68a70a1e7b5 | 4,729 | py | Python | app/eg018_envelope_custom_field_data.py | OleksiiSemko/eg-03-python-auth-code-grant | ecd534b3d9ba7da981bf19705883d44b34909011 | [
"MIT"
] | null | null | null | app/eg018_envelope_custom_field_data.py | OleksiiSemko/eg-03-python-auth-code-grant | ecd534b3d9ba7da981bf19705883d44b34909011 | [
"MIT"
] | null | null | null | app/eg018_envelope_custom_field_data.py | OleksiiSemko/eg-03-python-auth-code-grant | ecd534b3d9ba7da981bf19705883d44b34909011 | [
"MIT"
] | null | null | null | """018: Get an envelope's custom field data"""
from flask import render_template, url_for, redirect, session, flash, request
from os import path
import json
from app import app, ds_config, views
from docusign_esign import *
from docusign_esign.client.api_exception import ApiException
eg = "eg018" # reference (and URL) for this example
def controller():
"""Controller router using the HTTP method"""
if request.method == "GET":
return get_controller()
elif request.method == "POST":
return create_controller()
else:
return render_template("404.html"), 404
def create_controller():
"""
1. Check the token
2. Call the worker method
3. Show results
"""
minimum_buffer_min = 3
token_ok = views.ds_token_ok(minimum_buffer_min)
if token_ok and "envelope_id" in session:
# 2. Call the worker method
args = {
"account_id": session["ds_account_id"],
"envelope_id": session["envelope_id"],
"base_path": session["ds_base_path"],
"ds_access_token": session["ds_access_token"],
}
try:
results = worker(args)
except ApiException as err:
error_body_json = err and hasattr(err, "body") and err.body
# We can pull the DocuSign error code and message from the response body
error_body = json.loads(error_body_json)
error_code = error_body and "errorCode" in error_body and error_body["errorCode"]
error_message = error_body and "message" in error_body and error_body["message"]
# In production, you may want to provide customized error messages and
# remediation advice to the user.
return render_template("error.html",
err=err,
error_code=error_code,
error_message=error_message
)
return render_template("example_done.html",
title="Get custom field data",
h1="Envelope custom field data",
message="Results from the EnvelopeCustomFields::list method:",
json=json.dumps(json.dumps(results.to_dict()))
)
elif not token_ok:
flash("Sorry, you need to re-authenticate.")
# We could store the parameters of the requested operation so it could be restarted
# automatically. But since it should be rare to have a token issue here,
# we'll make the user re-enter the form data after authentication.
session["eg"] = url_for(eg)
return redirect(url_for("ds_must_authenticate"))
elif not "envelope_id" in session:
return render_template("eg018_envelope_custom_field_data.html",
title="Envelope Custom Field Data",
envelope_ok=False,
source_file=path.basename(__file__),
source_url=ds_config.DS_CONFIG["github_example_url"] + path.basename(__file__),
documentation=ds_config.DS_CONFIG["documentation"] + eg,
show_doc=ds_config.DS_CONFIG["documentation"],
)
# ***DS.snippet.0.start
def worker(args):
"""
1. Call the envelope get method
"""
# Exceptions will be caught by the calling function
api_client = ApiClient()
api_client.host = args["base_path"]
api_client.set_default_header("Authorization", "Bearer " + args["ds_access_token"])
envelopes_api = EnvelopesApi(api_client)
results = envelopes_api.list_custom_fields(args["account_id"], args["envelope_id"])
return results
# ***DS.snippet.0.end
def get_controller():
"""responds with the form for the example"""
if views.ds_token_ok():
return render_template("eg015_envelope_tab_data.html",
title="Envelope information",
envelope_ok="envelope_id" in session,
source_file=path.basename(__file__),
source_url=ds_config.DS_CONFIG["github_example_url"] + path.basename(__file__),
documentation=ds_config.DS_CONFIG["documentation"] + eg,
show_doc=ds_config.DS_CONFIG["documentation"],
)
else:
# Save the current operation so it will be resumed after authentication
session["eg"] = url_for(eg)
return redirect(url_for("ds_must_authenticate"))
| 43.787037 | 110 | 0.589765 |
3f6e21e9c472a4a6a3f0bc5113b97621cfcb6b7d | 5,359 | py | Python | malaya/__init__.py | illaiza115/malaya | 002a885db0681ebf056f53b3b1b11fd99687eef1 | [
"MIT"
] | 1 | 2021-01-06T07:15:41.000Z | 2021-01-06T07:15:41.000Z | malaya/__init__.py | illaiza115/malaya | 002a885db0681ebf056f53b3b1b11fd99687eef1 | [
"MIT"
] | null | null | null | malaya/__init__.py | illaiza115/malaya | 002a885db0681ebf056f53b3b1b11fd99687eef1 | [
"MIT"
] | null | null | null | # Malaya Natural Language Toolkit
#
# Copyright (C) 2019 Malaya Project
# Licensed under the MIT License
# Author: huseinzol05 <husein.zol05@gmail.com>
# URL: <https://malaya.readthedocs.io/>
# For license information, see https://github.com/huseinzol05/Malaya/blob/master/LICENSE
import os
from shutil import rmtree
from pathlib import Path
import logging
home = os.path.join(str(Path.home()), 'Malaya')
version = '4.0'
bump_version = '4.0.9.1'
version_path = os.path.join(home, 'version')
__version__ = bump_version
path = os.path.dirname(__file__)
def available_gpu():
"""
Get list of GPUs from `nvidia-smi`.
Returns
-------
result : List[str]
"""
percent = []
try:
ns = os.popen('nvidia-smi')
lines_ns = ns.readlines()
for line in lines_ns:
if line.find('%') != -1:
percent.append(int(line.split('%')[-2][-3:]))
percent = [f'/device:GPU:{i}' for i in range(len(percent))]
except:
pass
return percent
def check_malaya_gpu():
import pkg_resources
return 'malaya-gpu' in [p.project_name for p in pkg_resources.working_set]
if check_malaya_gpu():
__gpu__ = available_gpu()
else:
__gpu__ = []
def gpu_available():
"""
Check Malaya is GPU version.
Returns
-------
result : bool
"""
return len(__gpu__) > 0
def _delete_folder(folder):
for root, dirs, files in os.walk(folder):
for file in files:
os.remove(os.path.join(root, file))
def _delete_macos():
macos = os.path.join(home, '__MACOSX')
if os.path.exists(macos):
rmtree(macos)
try:
if not os.path.exists(home):
os.makedirs(home)
except:
raise Exception(
'Malaya cannot make directory for caching. Please check your '
+ str(Path.home())
)
_delete_macos()
if not os.path.isfile(version_path):
_delete_folder(home)
with open(version_path, 'w') as fopen:
fopen.write(version)
else:
with open(version_path, 'r') as fopen:
cached_version = fopen.read()
try:
if float(cached_version) < 1:
_delete_folder(home)
with open(version_path, 'w') as fopen:
fopen.write(version)
except:
_delete_folder(home)
with open(version_path, 'w') as fopen:
fopen.write(version)
def print_cache(location = None):
"""
Print cached data, this will print entire cache folder if let location = None.
Parameters
----------
location : str, (default=None)
if location is None, will print entire cache directory.
"""
from malaya.function import DisplayablePath
path = os.path.join(home, location) if location else home
paths = DisplayablePath.make_tree(Path(path))
for path in paths:
print(path.displayable())
def clear_all_cache():
"""
Remove cached data, this will delete entire cache folder.
"""
_delete_macos()
try:
logging.info('clearing cached models..')
_delete_folder(home)
with open(version_path, 'w') as fopen:
fopen.write(version)
return True
except:
raise Exception(
f'failed to clear cached models. Please make sure {home} is able to overwrite from Malaya'
)
def clear_cache(location):
"""
Remove selected cached data, please run malaya.print_cache() to get path.
Parameters
----------
location : str
Returns
-------
result : boolean
"""
if not isinstance(location, str):
raise ValueError('location must be a string')
location = os.path.join(home, location)
if not os.path.exists(location):
raise Exception(
'folder not exist, please check path from `malaya.print_cache()`'
)
if not os.path.isdir(location):
raise Exception(
'Please use parent directory, please check path from `malaya.print_cache()`'
)
_delete_folder(location)
return True
def clear_session(model):
"""
Clear session from a model to prevent any out-of-memory or segmentation fault issues.
Parameters
----------
model : malaya object.
Returns
-------
result : boolean
"""
success = False
try:
if hasattr(model, 'sess'):
model.sess.close()
success = True
elif hasattr(model, '_sess'):
model._sess.close()
success = True
except Exception as e:
logging.warning(e)
return success
from . import augmentation
from . import cluster
from . import constituency
from . import dependency
from . import emotion
from . import entity
from . import generator
from . import keyword_extraction
from . import language_detection
from . import lexicon
from . import normalize
from . import nsfw
from . import num2word
from . import paraphrase
from . import pos
from . import preprocessing
from . import relevancy
from . import segmentation
from . import sentiment
from . import similarity
from . import spell
from . import stack
from . import stem
from . import subjectivity
from . import summarization
from . import topic_model
from . import toxicity
from . import transformer
from . import true_case
from . import translation
from . import word2num
from . import wordvector
from . import zero_shot
| 23.199134 | 102 | 0.638365 |
2fd10dc82f26c3bcb247d865753a10c2e13e62cb | 9,189 | py | Python | crossword_generator.py | Axelwickm/CLI-Crossword-Generator | a5ae45425817528f262e8f94163973704da1be4b | [
"MIT"
] | 1 | 2020-08-15T08:37:16.000Z | 2020-08-15T08:37:16.000Z | crossword_generator.py | Axelwickm/CLI-Crossword-Generator | a5ae45425817528f262e8f94163973704da1be4b | [
"MIT"
] | null | null | null | crossword_generator.py | Axelwickm/CLI-Crossword-Generator | a5ae45425817528f262e8f94163973704da1be4b | [
"MIT"
] | null | null | null | import curses
from curses import wrapper
import os
import time
os.environ.setdefault('ESCDELAY', '25')
dict_dict = {}
# What dictionary to use
dictionary_path = "dictionaries/sv_wikidict.txt"
def load_dict(path):
# Load dictionary from file
_words = []
with open(path, "r") as file:
for line in file:
line = line.split(",")
word = line[0].strip()
_words.append(word)
dict_dict[word] = (float(line[1]), int(line[2]))
return _words
# Load at once
words = load_dict(dictionary_path)
def suggest_word(query, after):
# Returns list of words that matches query. After parameter searches for n-additional characters
global words
counter = 0
after += 1
suggested = []
start_time = time.time()
while counter < after:
known = query + [None] * counter
counter += 1
for word in words:
if len(word) == len(known):
for i, l in enumerate(word):
if known[i] is None:
pass
elif known[i].lower() == l.lower():
pass
else:
break
else:
suggested.append(word)
return suggested, (time.time()-start_time)*1e3
# Ask user for dimensions
width = int(input("Width: "))
height = int(input("Height: "))
# Start curses
stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
stdscr.keypad(True)
def main(stdscr):
# Clear screen
stdscr.clear()
stdscr.refresh()
# Initiate window
win = curses.newwin(height+2, width+2, 0, 0)
win.box()
win.move(1, 1)
win.keypad(True)
win.refresh()
# Initiate suggestion color pair
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_CYAN)
def _print(s, p, attr=0):
# Prints s at given position
op = win.getyx()
win.move(p[0], p[1])
try:
win.addstr(str(s), attr)
except curses.error:
pass # This happens when trying to print off-screen
win.move(op[0], op[1])
def select(o):
if type(o) == tuple:
o = [o]
orgpos = win.getyx()
for pt in o:
selected.append(pt)
for pt in selected:
if pt != orgpos:
win.chgat(pt[0], pt[1], 1, curses.A_STANDOUT)
win.move(orgpos[0], orgpos[1])
def deselect(o):
if type(o) == tuple:
o = [o]
op = win.getyx()
for pt in o[:]:
win.chgat(pt[0], pt[1], 1, curses.A_NORMAL)
selected.remove(pt)
win.refresh()
win.move(op[0], op[1])
k = None
selected = []
select_typing = False
while True:
k = win.getch() # Get user action
if k in [curses.KEY_RIGHT, curses.KEY_LEFT, curses.KEY_DOWN, curses.KEY_UP]:
deselect(selected) # Deselect all
# Move cursor if within bounds:
if k == curses.KEY_RIGHT:
if win.getyx()[1] != width:
win.move(win.getyx()[0], win.getyx()[1]+1)
select_typing = False
elif k == curses.KEY_LEFT:
if win.getyx()[1] != 1:
win.move(win.getyx()[0], win.getyx()[1]-1)
select_typing = False
elif k == curses.KEY_DOWN:
if win.getyx()[0] != height:
win.move(win.getyx()[0]+1, win.getyx()[1])
select_typing = False
elif k == curses.KEY_UP:
if win.getyx()[0] != 1:
win.move(win.getyx()[0]-1, win.getyx()[1])
select_typing = False
elif k == 27: # Escape key to deselect or quit program
if selected:
deselect(selected)
else:
break
elif k == 10:
pass
elif k in [560, 545, 525, 566]: # Ctrl + right, left, bottom, up, to select
select_typing = False
pos = win.getyx()
if not selected:
select(pos)
if k == 560 and pos[1] != width:
win.move(pos[0], pos[1] + 1)
elif k == 545 and pos[1] != 1:
win.move(pos[0], pos[1] - 1)
elif k == 525 and pos[0] != height:
win.move(pos[0]+1, pos[1])
elif k == 566 and pos[0] != 1:
win.move(pos[0]-1, pos[1])
new_pos = win.getyx()
if new_pos in selected:
deselect(pos)
else:
select(new_pos)
elif k == ord('\t'): # Start suggesting words for active selection
if selected:
# Build query
chars = []
for pc in selected:
char = chr(win.inch(pc[0], pc[1]) & 0xFF)
if char == ' ' or char == ' ':
char = None
chars.append(char)
if None not in chars:
continue
# Query words
suggested, exec_time = suggest_word(chars, 0)
# Without alphabetical sort, will be order in file (commonality)
# suggested.sort()
_print(str(exec_time)+"ms", (0, 0))
# If found no suggestions
if not suggested:
win.box()
_print("NO SUGGESTION", (height + 1, 1), curses.A_BOLD)
continue
iterated_words = 0
keep_word = False
while True: # Iterate through found words
win.box()
suggested_word = suggested[iterated_words]
# Print on screen in right color, maybe can be done with _print
for cp, ch in enumerate(suggested_word):
win.move(selected[cp][0], selected[cp][1])
if chars[cp] is None:
if cp == 0:
win.addstr(ch, curses.color_pair(1))
else:
win.addstr(ch, curses.color_pair(1) | curses.A_STANDOUT)
win.move(selected[0][0], selected[0][1])
# Print extra info in lower left edge
word_rate = int(1/(10**dict_dict[suggested_word][0]))
_print(suggested_word+", word rate: 1/"+str(word_rate), (height+1, 1))
win.refresh()
# Get user key
k = win.getch()
if k == 10: # Enter key to break loop and keep word
keep_word = True
elif k == ord('\t'): # Tab to get next word
iterated_words = (iterated_words+1) % len(suggested)
continue
elif k == curses.KEY_STAB: # Shift tab to go back, FIXME
iterated_words = (len(suggested) + iterated_words - 1) % len(suggested)
continue
break
if keep_word:
deselect(selected)
else: # Reset to old characters
win.box()
for cp, ch in enumerate(chars):
win.move(selected[cp][0], selected[cp][1])
if ch is None:
win.addch(" ", curses.A_STANDOUT)
else:
win.addch(ch, curses.A_STANDOUT)
elif k == curses.KEY_BACKSPACE or k == curses.KEY_DC: # Delete all selected
if selected:
for cp in selected:
win.addch(cp[0], cp[1], " ")
else:
win.addstr(" ")
win.move(win.getyx()[0], win.getyx()[1] - 1)
elif k is not None: # Add manually by typing
try:
if chr(k).isalnum():
if selected: # Add rolling within selection
if not select_typing:
select_typing = True
win.move(selected[0][0], selected[0][1])
if win.getyx() == selected[-1]:
select_typing = False
win.addstr(chr(k).upper(), curses.A_STANDOUT)
win.move(selected[-1][0], selected[-1][1]) # Stay on last if just type last
else:
win.addstr(chr(k).upper(), curses.A_STANDOUT)
win.move(win.getyx()[0], win.getyx()[1]-1)
new_pos = selected[selected.index(win.getyx())+1]
win.move(new_pos[0], new_pos[1])
else:
win.addstr(chr(k).upper())
win.move(win.getyx()[0], win.getyx()[1]-1)
except ValueError as e:
pass
win.refresh()
stdscr.refresh()
# Start program
wrapper(main)
# Quit program
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin() | 34.41573 | 104 | 0.46708 |
1137dbea15095c4bc88057be4ba08ee99c8b4139 | 512 | py | Python | var/spack/repos/builtin/packages/perl-time-piece/package.py | whitfin/spack | aabd2be31a511d0e00c1017f7311a421659319d9 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 3 | 2019-06-27T13:26:50.000Z | 2019-07-01T16:24:54.000Z | var/spack/repos/builtin/packages/perl-time-piece/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 75 | 2016-07-27T11:43:00.000Z | 2020-12-08T15:56:53.000Z | var/spack/repos/builtin/packages/perl-time-piece/package.py | openbiox/spack | bb6ec7fb40c14b37e094a860e3625af53f633174 | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 8 | 2015-10-16T13:51:49.000Z | 2021-10-18T13:58:03.000Z | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PerlTimePiece(PerlPackage):
"""Object Oriented time objects"""
homepage = "http://search.cpan.org/~esaym/Time-Piece/Piece.pm"
url = "http://search.cpan.org/CPAN/authors/id/E/ES/ESAYM/Time-Piece-1.3203.tar.gz"
version('1.3203', '515c1306f123a00116a95335cf543501')
| 32 | 91 | 0.728516 |
711daf7fe415d69753639ffde98597d0b2504556 | 55,657 | py | Python | pandas/tests/groupby/test_categorical.py | gabriellm1/pandas | 020040b3b92516b445ddd8daba3b9818340e82d4 | [
"BSD-3-Clause"
] | 1 | 2020-08-18T16:49:16.000Z | 2020-08-18T16:49:16.000Z | pandas/tests/groupby/test_categorical.py | gabriellm1/pandas | 020040b3b92516b445ddd8daba3b9818340e82d4 | [
"BSD-3-Clause"
] | null | null | null | pandas/tests/groupby/test_categorical.py | gabriellm1/pandas | 020040b3b92516b445ddd8daba3b9818340e82d4 | [
"BSD-3-Clause"
] | 2 | 2021-07-17T19:28:31.000Z | 2021-11-28T17:14:58.000Z | from datetime import datetime
import numpy as np
import pytest
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
Index,
MultiIndex,
Series,
qcut,
)
import pandas._testing as tm
def cartesian_product_for_groupers(result, args, names, fill_value=np.NaN):
"""Reindex to a cartesian production for the groupers,
preserving the nature (Categorical) of each grouper
"""
def f(a):
if isinstance(a, (CategoricalIndex, Categorical)):
categories = a.categories
a = Categorical.from_codes(
np.arange(len(categories)), categories=categories, ordered=a.ordered
)
return a
index = MultiIndex.from_product(map(f, args), names=names)
return result.reindex(index, fill_value=fill_value).sort_index()
_results_for_groupbys_with_missing_categories = dict(
# This maps the builtin groupby functions to their expected outputs for
# missing categories when they are called on a categorical grouper with
# observed=False. Some functions are expected to return NaN, some zero.
# These expected values can be used across several tests (i.e. they are
# the same for SeriesGroupBy and DataFrameGroupBy) but they should only be
# hardcoded in one place.
[
("all", np.NaN),
("any", np.NaN),
("count", 0),
("corrwith", np.NaN),
("first", np.NaN),
("idxmax", np.NaN),
("idxmin", np.NaN),
("last", np.NaN),
("mad", np.NaN),
("max", np.NaN),
("mean", np.NaN),
("median", np.NaN),
("min", np.NaN),
("nth", np.NaN),
("nunique", 0),
("prod", np.NaN),
("quantile", np.NaN),
("sem", np.NaN),
("size", 0),
("skew", np.NaN),
("std", np.NaN),
("sum", 0),
("var", np.NaN),
]
)
def test_apply_use_categorical_name(df):
cats = qcut(df.C, 4)
def get_stats(group):
return {
"min": group.min(),
"max": group.max(),
"count": group.count(),
"mean": group.mean(),
}
result = df.groupby(cats, observed=False).D.apply(get_stats)
assert result.index.names[0] == "C"
def test_basic():
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
exp_index = CategoricalIndex(list("abcd"), name="b", ordered=True)
expected = DataFrame({"a": [1, 2, 4, np.nan]}, index=exp_index)
result = data.groupby("b", observed=False).mean()
tm.assert_frame_equal(result, expected)
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
# single grouper
gb = df.groupby("A", observed=False)
exp_idx = CategoricalIndex(["a", "b", "z"], name="A", ordered=True)
expected = DataFrame({"values": Series([3, 7, 0], index=exp_idx)})
result = gb.sum()
tm.assert_frame_equal(result, expected)
# GH 8623
x = DataFrame(
[[1, "John P. Doe"], [2, "Jane Dove"], [1, "John P. Doe"]],
columns=["person_id", "person_name"],
)
x["person_name"] = Categorical(x.person_name)
g = x.groupby(["person_id"], observed=False)
result = g.transform(lambda x: x)
tm.assert_frame_equal(result, x[["person_name"]])
result = x.drop_duplicates("person_name")
expected = x.iloc[[0, 1]]
tm.assert_frame_equal(result, expected)
def f(x):
return x.drop_duplicates("person_name").iloc[0]
result = g.apply(f)
expected = x.iloc[[0, 1]].copy()
expected.index = Index([1, 2], name="person_id")
expected["person_name"] = expected["person_name"].astype("object")
tm.assert_frame_equal(result, expected)
# GH 9921
# Monotonic
df = DataFrame({"a": [5, 15, 25]})
c = pd.cut(df.a, bins=[0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.max(xs)), df[["a"]]
)
# Filter
tm.assert_series_equal(df.a.groupby(c, observed=False).filter(np.all), df["a"])
tm.assert_frame_equal(df.groupby(c, observed=False).filter(np.all), df)
# Non-monotonic
df = DataFrame({"a": [5, 15, 25, -5]})
c = pd.cut(df.a, bins=[-10, 0, 10, 20, 30, 40])
result = df.a.groupby(c, observed=False).transform(sum)
tm.assert_series_equal(result, df["a"])
tm.assert_series_equal(
df.a.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df["a"]
)
tm.assert_frame_equal(df.groupby(c, observed=False).transform(sum), df[["a"]])
tm.assert_frame_equal(
df.groupby(c, observed=False).transform(lambda xs: np.sum(xs)), df[["a"]]
)
# GH 9603
df = DataFrame({"a": [1, 0, 0, 0]})
c = pd.cut(df.a, [0, 1, 2, 3, 4], labels=Categorical(list("abcd")))
result = df.groupby(c, observed=False).apply(len)
exp_index = CategoricalIndex(c.values.categories, ordered=c.values.ordered)
expected = Series([1, 0, 0, 0], index=exp_index)
expected.index.name = "a"
tm.assert_series_equal(result, expected)
# more basic
levels = ["foo", "bar", "baz", "qux"]
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
exp_idx = CategoricalIndex(levels, categories=cats.categories, ordered=True)
expected = expected.reindex(exp_idx)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = np.asarray(cats).take(idx)
ord_data = data.take(idx)
exp_cats = Categorical(
ord_labels, ordered=True, categories=["foo", "bar", "baz", "qux"]
)
expected = ord_data.groupby(exp_cats, sort=False, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_level_get_group(observed):
# GH15155
df = DataFrame(
data=np.arange(2, 22, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(10)],
codes=[[0] * 5 + [1] * 5, range(10)],
names=["Index1", "Index2"],
),
)
g = df.groupby(level=["Index1"], observed=observed)
# expected should equal test.loc[["a"]]
# GH15166
expected = DataFrame(
data=np.arange(2, 12, 2),
index=MultiIndex(
levels=[CategoricalIndex(["a", "b"]), range(5)],
codes=[[0] * 5, range(5)],
names=["Index1", "Index2"],
),
)
result = g.get_group("a")
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
def test_apply(ordered):
# GH 10138
dense = Categorical(list("abc"), ordered=ordered)
# 'b' is in the categories but not in the list
missing = Categorical(list("aaa"), categories=["a", "b"], ordered=ordered)
values = np.arange(len(dense))
df = DataFrame({"missing": missing, "dense": dense, "values": values})
grouped = df.groupby(["missing", "dense"], observed=True)
# missing category 'b' should still exist in the output index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = DataFrame([0, 1, 2.0], index=idx, columns=["values"])
# GH#21636 tracking down the xfail, in some builds np.mean(df.loc[[0]])
# is coming back as Series([0., 1., 0.], index=["missing", "dense", "values"])
# when we expect Series(0., index=["values"])
result = grouped.apply(lambda x: np.mean(x))
tm.assert_frame_equal(result, expected)
# we coerce back to ints
expected = expected.astype("int")
result = grouped.mean()
tm.assert_frame_equal(result, expected)
result = grouped.agg(np.mean)
tm.assert_frame_equal(result, expected)
# but for transform we should still get back the original index
idx = MultiIndex.from_arrays([missing, dense], names=["missing", "dense"])
expected = Series(1, index=idx)
result = grouped.apply(lambda x: 1)
tm.assert_series_equal(result, expected)
def test_observed(observed):
# multiple groupers, don't re-expand the output space
# of the grouper
# gh-14942 (implement)
# gh-10132 (back-compat)
# gh-8138 (back-compat)
# gh-8869
cat1 = Categorical(["a", "a", "b", "b"], categories=["a", "b", "z"], ordered=True)
cat2 = Categorical(["c", "d", "c", "d"], categories=["c", "d", "y"], ordered=True)
df = DataFrame({"A": cat1, "B": cat2, "values": [1, 2, 3, 4]})
df["C"] = ["foo", "bar"] * 2
# multiple groupers with a non-cat
gb = df.groupby(["A", "B", "C"], observed=observed)
exp_index = MultiIndex.from_arrays(
[cat1, cat2, ["foo", "bar"] * 2], names=["A", "B", "C"]
)
expected = DataFrame({"values": Series([1, 2, 3, 4], index=exp_index)}).sort_index()
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2, ["foo", "bar"]], list("ABC"), fill_value=0
)
tm.assert_frame_equal(result, expected)
gb = df.groupby(["A", "B"], observed=observed)
exp_index = MultiIndex.from_arrays([cat1, cat2], names=["A", "B"])
expected = DataFrame({"values": [1, 2, 3, 4]}, index=exp_index)
result = gb.sum()
if not observed:
expected = cartesian_product_for_groupers(
expected, [cat1, cat2], list("AB"), fill_value=0
)
tm.assert_frame_equal(result, expected)
# https://github.com/pandas-dev/pandas/issues/8138
d = {
"cat": Categorical(
["a", "b", "a", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 1, 2, 2],
"val": [10, 20, 30, 40],
}
df = DataFrame(d)
# Grouping on a single column
groups_single_key = df.groupby("cat", observed=observed)
result = groups_single_key.mean()
exp_index = CategoricalIndex(
list("ab"), name="cat", categories=list("abc"), ordered=True
)
expected = DataFrame({"ints": [1.5, 1.5], "val": [20.0, 30]}, index=exp_index)
if not observed:
index = CategoricalIndex(
list("abc"), name="cat", categories=list("abc"), ordered=True
)
expected = expected.reindex(index)
tm.assert_frame_equal(result, expected)
# Grouping on two columns
groups_double_key = df.groupby(["cat", "ints"], observed=observed)
result = groups_double_key.agg("mean")
expected = DataFrame(
{
"val": [10, 30, 20, 40],
"cat": Categorical(
["a", "a", "b", "b"], categories=["a", "b", "c"], ordered=True
),
"ints": [1, 2, 1, 2],
}
).set_index(["cat", "ints"])
if not observed:
expected = cartesian_product_for_groupers(
expected, [df.cat.values, [1, 2]], ["cat", "ints"]
)
tm.assert_frame_equal(result, expected)
# GH 10132
for key in [("a", 1), ("b", 2), ("b", 1), ("a", 2)]:
c, i = key
result = groups_double_key.get_group(key)
expected = df[(df.cat == c) & (df.ints == i)]
tm.assert_frame_equal(result, expected)
# gh-8869
# with as_index
d = {
"foo": [10, 8, 4, 8, 4, 1, 1],
"bar": [10, 20, 30, 40, 50, 60, 70],
"baz": ["d", "c", "e", "a", "a", "d", "c"],
}
df = DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 10, 3))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=False, observed=observed)
result = groups.agg("mean")
groups2 = df.groupby(["range", "baz"], as_index=True, observed=observed)
expected = groups2.agg("mean").reset_index()
tm.assert_frame_equal(result, expected)
def test_observed_codes_remap(observed):
d = {"C1": [3, 3, 4, 5], "C2": [1, 2, 3, 4], "C3": [10, 100, 200, 34]}
df = DataFrame(d)
values = pd.cut(df["C1"], [1, 2, 3, 6])
values.name = "cat"
groups_double_key = df.groupby([values, "C2"], observed=observed)
idx = MultiIndex.from_arrays([values, [1, 2, 3, 4]], names=["cat", "C2"])
expected = DataFrame({"C1": [3, 3, 4, 5], "C3": [10, 100, 200, 34]}, index=idx)
if not observed:
expected = cartesian_product_for_groupers(
expected, [values.values, [1, 2, 3, 4]], ["cat", "C2"]
)
result = groups_double_key.agg("mean")
tm.assert_frame_equal(result, expected)
def test_observed_perf():
# we create a cartesian product, so this is
# non-performant if we don't use observed values
# gh-14942
df = DataFrame(
{
"cat": np.random.randint(0, 255, size=30000),
"int_id": np.random.randint(0, 255, size=30000),
"other_id": np.random.randint(0, 10000, size=30000),
"foo": 0,
}
)
df["cat"] = df.cat.astype(str).astype("category")
grouped = df.groupby(["cat", "int_id", "other_id"], observed=True)
result = grouped.count()
assert result.index.levels[0].nunique() == df.cat.nunique()
assert result.index.levels[1].nunique() == df.int_id.nunique()
assert result.index.levels[2].nunique() == df.other_id.nunique()
def test_observed_groups(observed):
# gh-20583
# test that we have the appropriate groups
cat = Categorical(["a", "c", "a"], categories=["a", "b", "c"])
df = DataFrame({"cat": cat, "vals": [1, 2, 3]})
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64"), "c": Index([1], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"c": Index([1], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_groups_with_nan(observed):
# GH 24740
df = DataFrame(
{
"cat": Categorical(["a", np.nan, "a"], categories=["a", "b", "d"]),
"vals": [1, 2, 3],
}
)
g = df.groupby("cat", observed=observed)
result = g.groups
if observed:
expected = {"a": Index([0, 2], dtype="int64")}
else:
expected = {
"a": Index([0, 2], dtype="int64"),
"b": Index([], dtype="int64"),
"d": Index([], dtype="int64"),
}
tm.assert_dict_equal(result, expected)
def test_observed_nth():
# GH 26385
cat = pd.Categorical(["a", np.nan, np.nan], categories=["a", "b", "c"])
ser = pd.Series([1, 2, 3])
df = pd.DataFrame({"cat": cat, "ser": ser})
result = df.groupby("cat", observed=False)["ser"].nth(0)
index = pd.Categorical(["a", "b", "c"], categories=["a", "b", "c"])
expected = pd.Series([1, np.nan, np.nan], index=index, name="ser")
expected.index.name = "cat"
tm.assert_series_equal(result, expected)
def test_dataframe_categorical_with_nan(observed):
# GH 21151
s1 = Categorical([np.nan, "a", np.nan, "a"], categories=["a", "b", "c"])
s2 = Series([1, 2, 3, 4])
df = DataFrame({"s1": s1, "s2": s2})
result = df.groupby("s1", observed=observed).first().reset_index()
if observed:
expected = DataFrame(
{"s1": Categorical(["a"], categories=["a", "b", "c"]), "s2": [2]}
)
else:
expected = DataFrame(
{
"s1": Categorical(["a", "b", "c"], categories=["a", "b", "c"]),
"s2": [2, np.nan, np.nan],
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("ordered", [True, False])
@pytest.mark.parametrize("observed", [True, False])
@pytest.mark.parametrize("sort", [True, False])
def test_dataframe_categorical_ordered_observed_sort(ordered, observed, sort):
# GH 25871: Fix groupby sorting on ordered Categoricals
# GH 25167: Groupby with observed=True doesn't sort
# Build a dataframe with cat having one unobserved category ('missing'),
# and a Series with identical values
label = Categorical(
["d", "a", "b", "a", "d", "b"],
categories=["a", "b", "missing", "d"],
ordered=ordered,
)
val = Series(["d", "a", "b", "a", "d", "b"])
df = DataFrame({"label": label, "val": val})
# aggregate on the Categorical
result = df.groupby("label", observed=observed, sort=sort)["val"].aggregate("first")
# If ordering works, we expect index labels equal to aggregation results,
# except for 'observed=False': label 'missing' has aggregation None
label = Series(result.index.array, dtype="object")
aggr = Series(result.array)
if not observed:
aggr[aggr.isna()] = "missing"
if not all(label == aggr):
msg = (
"Labels and aggregation results not consistently sorted\n"
f"for (ordered={ordered}, observed={observed}, sort={sort})\n"
f"Result:\n{result}"
)
assert False, msg
def test_datetime():
# GH9049: ensure backward compatibility
levels = pd.date_range("2014-01-01", periods=4)
codes = np.random.randint(0, 4, size=100)
cats = Categorical.from_codes(codes, levels, ordered=True)
data = DataFrame(np.random.randn(100, 4))
result = data.groupby(cats, observed=False).mean()
expected = data.groupby(np.asarray(cats), observed=False).mean()
expected = expected.reindex(levels)
expected.index = CategoricalIndex(
expected.index, categories=expected.index, ordered=True
)
tm.assert_frame_equal(result, expected)
grouped = data.groupby(cats, observed=False)
desc_result = grouped.describe()
idx = cats.codes.argsort()
ord_labels = cats.take(idx)
ord_data = data.take(idx)
expected = ord_data.groupby(ord_labels, observed=False).describe()
tm.assert_frame_equal(desc_result, expected)
tm.assert_index_equal(desc_result.index, expected.index)
tm.assert_index_equal(
desc_result.index.get_level_values(0), expected.index.get_level_values(0)
)
# GH 10460
expc = Categorical.from_codes(np.arange(4).repeat(8), levels, ordered=True)
exp = CategoricalIndex(expc)
tm.assert_index_equal((desc_result.stack().index.get_level_values(0)), exp)
exp = Index(["count", "mean", "std", "min", "25%", "50%", "75%", "max"] * 4)
tm.assert_index_equal((desc_result.stack().index.get_level_values(1)), exp)
def test_categorical_index():
s = np.random.RandomState(12345)
levels = ["foo", "bar", "baz", "qux"]
codes = s.randint(0, 4, size=20)
cats = Categorical.from_codes(codes, levels, ordered=True)
df = DataFrame(np.repeat(np.arange(20), 4).reshape(-1, 4), columns=list("abcd"))
df["cats"] = cats
# with a cat index
result = df.set_index("cats").groupby(level=0, observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
# with a cat column, should produce a cat index
result = df.groupby("cats", observed=False).sum()
expected = df[list("abcd")].groupby(cats.codes, observed=False).sum()
expected.index = CategoricalIndex(
Categorical.from_codes([0, 1, 2, 3], levels, ordered=True), name="cats"
)
tm.assert_frame_equal(result, expected)
def test_describe_categorical_columns():
# GH 11558
cats = CategoricalIndex(
["qux", "foo", "baz", "bar"],
categories=["foo", "bar", "baz", "qux"],
ordered=True,
)
df = DataFrame(np.random.randn(20, 4), columns=cats)
result = df.groupby([1, 2, 3, 4] * 5).describe()
tm.assert_index_equal(result.stack().columns, cats)
tm.assert_categorical_equal(result.stack().columns.values, cats.values)
def test_unstack_categorical():
# GH11558 (example is taken from the original issue)
df = DataFrame(
{"a": range(10), "medium": ["A", "B"] * 5, "artist": list("XYXXY") * 2}
)
df["medium"] = df["medium"].astype("category")
gcat = df.groupby(["artist", "medium"], observed=False)["a"].count().unstack()
result = gcat.describe()
exp_columns = CategoricalIndex(["A", "B"], ordered=False, name="medium")
tm.assert_index_equal(result.columns, exp_columns)
tm.assert_categorical_equal(result.columns.values, exp_columns.values)
result = gcat["A"] + gcat["B"]
expected = Series([6, 4], index=Index(["X", "Y"], name="artist"))
tm.assert_series_equal(result, expected)
def test_bins_unequal_len():
# GH3011
series = Series([np.nan, np.nan, 1, 1, 2, 2, 3, 3, 4, 4])
bins = pd.cut(series.dropna().values, 4)
# len(bins) != len(series) here
msg = r"Length of grouper \(8\) and axis \(10\) must be same length"
with pytest.raises(ValueError, match=msg):
series.groupby(bins).mean()
def test_as_index():
# GH13204
df = DataFrame(
{
"cat": Categorical([1, 2, 2], [1, 2, 3]),
"A": [10, 11, 11],
"B": [101, 102, 103],
}
)
result = df.groupby(["cat", "A"], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# function grouper
f = lambda r: df.loc[r, "A"]
result = df.groupby(["cat", f], as_index=False, observed=True).sum()
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 22],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
tm.assert_frame_equal(result, expected)
# another not in-axis grouper (conflicting names in index)
s = Series(["a", "b", "b"], name="cat")
result = df.groupby(["cat", s], as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
# is original index dropped?
group_columns = ["cat", "A"]
expected = DataFrame(
{
"cat": Categorical([1, 2], categories=df.cat.cat.categories),
"A": [10, 11],
"B": [101, 205],
},
columns=["cat", "A", "B"],
)
for name in [None, "X", "B"]:
df.index = Index(list("abc"), name=name)
result = df.groupby(group_columns, as_index=False, observed=True).sum()
tm.assert_frame_equal(result, expected)
def test_preserve_categories():
# GH-13179
categories = list("abc")
# ordered=True
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=True)})
index = CategoricalIndex(categories, categories, ordered=True, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, index
)
# ordered=False
df = DataFrame({"A": Categorical(list("ba"), categories=categories, ordered=False)})
sort_index = CategoricalIndex(categories, categories, ordered=False, name="A")
nosort_index = CategoricalIndex(list("bac"), list("bac"), ordered=False, name="A")
tm.assert_index_equal(
df.groupby("A", sort=True, observed=False).first().index, sort_index
)
tm.assert_index_equal(
df.groupby("A", sort=False, observed=False).first().index, nosort_index
)
def test_preserve_categorical_dtype():
# GH13743, GH13854
df = DataFrame(
{
"A": [1, 2, 1, 1, 2],
"B": [10, 16, 22, 28, 34],
"C1": Categorical(list("abaab"), categories=list("bac"), ordered=False),
"C2": Categorical(list("abaab"), categories=list("bac"), ordered=True),
}
)
# single grouper
exp_full = DataFrame(
{
"A": [2.0, 1.0, np.nan],
"B": [25.0, 20.0, np.nan],
"C1": Categorical(list("bac"), categories=list("bac"), ordered=False),
"C2": Categorical(list("bac"), categories=list("bac"), ordered=True),
}
)
for col in ["C1", "C2"]:
result1 = df.groupby(by=col, as_index=False, observed=False).mean()
result2 = df.groupby(by=col, as_index=True, observed=False).mean().reset_index()
expected = exp_full.reindex(columns=result1.columns)
tm.assert_frame_equal(result1, expected)
tm.assert_frame_equal(result2, expected)
@pytest.mark.parametrize(
"func, values",
[
("first", ["second", "first"]),
("last", ["fourth", "third"]),
("min", ["fourth", "first"]),
("max", ["second", "third"]),
],
)
def test_preserve_on_ordered_ops(func, values):
# gh-18502
# preserve the categoricals on ops
c = pd.Categorical(["first", "second", "third", "fourth"], ordered=True)
df = pd.DataFrame({"payload": [-1, -2, -1, -2], "col": c})
g = df.groupby("payload")
result = getattr(g, func)()
expected = pd.DataFrame(
{"payload": [-2, -1], "col": pd.Series(values, dtype=c.dtype)}
).set_index("payload")
tm.assert_frame_equal(result, expected)
def test_categorical_no_compress():
data = Series(np.random.randn(9))
codes = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2])
cats = Categorical.from_codes(codes, [0, 1, 2], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean()
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
codes = np.array([0, 0, 0, 1, 1, 1, 3, 3, 3])
cats = Categorical.from_codes(codes, [0, 1, 2, 3], ordered=True)
result = data.groupby(cats, observed=False).mean()
exp = data.groupby(codes, observed=False).mean().reindex(cats.categories)
exp.index = CategoricalIndex(
exp.index, categories=cats.categories, ordered=cats.ordered
)
tm.assert_series_equal(result, exp)
cats = Categorical(
["a", "a", "a", "b", "b", "b", "c", "c", "c"],
categories=["a", "b", "c", "d"],
ordered=True,
)
data = DataFrame({"a": [1, 1, 1, 2, 2, 2, 3, 4, 5], "b": cats})
result = data.groupby("b", observed=False).mean()
result = result["a"].values
exp = np.array([1, 2, 4, np.nan])
tm.assert_numpy_array_equal(result, exp)
def test_groupby_empty_with_category():
# GH-9614
# test fix for when group by on None resulted in
# coercion of dtype categorical -> float
df = pd.DataFrame(
{"A": [None] * 3, "B": pd.Categorical(["train", "train", "test"])}
)
result = df.groupby("A").first()["B"]
expected = pd.Series(
pd.Categorical([], categories=["test", "train"]),
index=pd.Series([], dtype="object", name="A"),
name="B",
)
tm.assert_series_equal(result, expected)
def test_sort():
# https://stackoverflow.com/questions/23814368/sorting-pandas-
# categorical-labels-after-groupby
# This should result in a properly sorted Series so that the plot
# has a sorted x axis
# self.cat.groupby(['value_group'])['value_group'].count().plot(kind='bar')
df = DataFrame({"value": np.random.randint(0, 10000, 100)})
labels = [f"{i} - {i+499}" for i in range(0, 10000, 500)]
cat_labels = Categorical(labels, labels)
df = df.sort_values(by=["value"], ascending=True)
df["value_group"] = pd.cut(
df.value, range(0, 10500, 500), right=False, labels=cat_labels
)
res = df.groupby(["value_group"], observed=False)["value_group"].count()
exp = res[sorted(res.index, key=lambda x: float(x.split()[0]))]
exp.index = CategoricalIndex(exp.index, name=exp.index.name)
tm.assert_series_equal(res, exp)
def test_sort2():
# dataframe groupby sort was being ignored # GH 8868
df = DataFrame(
[
["(7.5, 10]", 10, 10],
["(7.5, 10]", 8, 20],
["(2.5, 5]", 5, 30],
["(5, 7.5]", 6, 40],
["(2.5, 5]", 4, 50],
["(0, 2.5]", 1, 60],
["(5, 7.5]", 7, 70],
],
columns=["range", "foo", "bar"],
)
df["range"] = Categorical(df["range"], ordered=True)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range", ordered=True
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
col = "range"
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
# when categories is ordered, group is ordered by category's order
expected_sort = result_sort
result_sort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
df["range"] = Categorical(df["range"], ordered=False)
index = CategoricalIndex(
["(0, 2.5]", "(2.5, 5]", "(5, 7.5]", "(7.5, 10]"], name="range"
)
expected_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"], index=index
)
index = CategoricalIndex(
["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
categories=["(7.5, 10]", "(2.5, 5]", "(5, 7.5]", "(0, 2.5]"],
name="range",
)
expected_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], index=index, columns=["foo", "bar"]
)
col = "range"
# this is an unordered categorical, but we allow this ####
result_sort = df.groupby(col, sort=True, observed=False).first()
tm.assert_frame_equal(result_sort, expected_sort)
result_nosort = df.groupby(col, sort=False, observed=False).first()
tm.assert_frame_equal(result_nosort, expected_nosort)
def test_sort_datetimelike():
# GH10505
# use same data as test_groupby_sort_categorical, which category is
# corresponding to datetime.month
df = DataFrame(
{
"dt": [
datetime(2011, 7, 1),
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 2, 1),
datetime(2011, 1, 1),
datetime(2011, 5, 1),
],
"foo": [10, 8, 5, 6, 4, 1, 7],
"bar": [10, 20, 30, 40, 50, 60, 70],
},
columns=["dt", "foo", "bar"],
)
# ordered=True
df["dt"] = Categorical(df["dt"], ordered=True)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt", ordered=True)
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(
index, categories=index, name="dt", ordered=True
)
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
# when categories is ordered, group is ordered by category's order
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=False, observed=False).first()
)
# ordered = False
df["dt"] = Categorical(df["dt"], ordered=False)
index = [
datetime(2011, 1, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 7, 1),
]
result_sort = DataFrame(
[[1, 60], [5, 30], [6, 40], [10, 10]], columns=["foo", "bar"]
)
result_sort.index = CategoricalIndex(index, name="dt")
index = [
datetime(2011, 7, 1),
datetime(2011, 2, 1),
datetime(2011, 5, 1),
datetime(2011, 1, 1),
]
result_nosort = DataFrame(
[[10, 10], [5, 30], [6, 40], [1, 60]], columns=["foo", "bar"]
)
result_nosort.index = CategoricalIndex(index, categories=index, name="dt")
col = "dt"
tm.assert_frame_equal(
result_sort, df.groupby(col, sort=True, observed=False).first()
)
tm.assert_frame_equal(
result_nosort, df.groupby(col, sort=False, observed=False).first()
)
def test_empty_sum():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 0 by default
result = df.groupby("A", observed=False).B.sum()
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.sum(min_count=0)
expected = Series([3, 1, 0], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.sum(min_count=1)
expected = Series([3, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count>1
result = df.groupby("A", observed=False).B.sum(min_count=2)
expected = Series([3, np.nan, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_empty_prod():
# https://github.com/pandas-dev/pandas/issues/18678
df = DataFrame(
{"A": Categorical(["a", "a", "b"], categories=["a", "b", "c"]), "B": [1, 2, 1]}
)
expected_idx = CategoricalIndex(["a", "b", "c"], name="A")
# 1 by default
result = df.groupby("A", observed=False).B.prod()
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=0
result = df.groupby("A", observed=False).B.prod(min_count=0)
expected = Series([2, 1, 1], expected_idx, name="B")
tm.assert_series_equal(result, expected)
# min_count=1
result = df.groupby("A", observed=False).B.prod(min_count=1)
expected = Series([2, 1, np.nan], expected_idx, name="B")
tm.assert_series_equal(result, expected)
def test_groupby_multiindex_categorical_datetime():
# https://github.com/pandas-dev/pandas/issues/21390
df = DataFrame(
{
"key1": Categorical(list("abcbabcba")),
"key2": Categorical(
list(pd.date_range("2018-06-01 00", freq="1T", periods=3)) * 3
),
"values": np.arange(9),
}
)
result = df.groupby(["key1", "key2"]).mean()
idx = MultiIndex.from_product(
[
Categorical(["a", "b", "c"]),
Categorical(pd.date_range("2018-06-01 00", freq="1T", periods=3)),
],
names=["key1", "key2"],
)
expected = DataFrame({"values": [0, 4, 8, 3, 4, 5, 6, np.nan, 2]}, index=idx)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"as_index, expected",
[
(
True,
Series(
index=MultiIndex.from_arrays(
[Series([1, 1, 2], dtype="category"), [1, 2, 2]], names=["a", "b"]
),
data=[1, 2, 3],
name="x",
),
),
(
False,
DataFrame(
{
"a": Series([1, 1, 2], dtype="category"),
"b": [1, 2, 2],
"x": [1, 2, 3],
}
),
),
],
)
def test_groupby_agg_observed_true_single_column(as_index, expected):
# GH-23970
df = DataFrame(
{"a": Series([1, 1, 2], dtype="category"), "b": [1, 2, 2], "x": [1, 2, 3]}
)
result = df.groupby(["a", "b"], as_index=as_index, observed=True)["x"].sum()
tm.assert_equal(result, expected)
@pytest.mark.parametrize("fill_value", [None, np.nan, pd.NaT])
def test_shift(fill_value):
ct = Categorical(
["a", "b", "c", "d"], categories=["a", "b", "c", "d"], ordered=False
)
expected = Categorical(
[None, "a", "b", "c"], categories=["a", "b", "c", "d"], ordered=False
)
res = ct.shift(1, fill_value=fill_value)
tm.assert_equal(res, expected)
@pytest.fixture
def df_cat(df):
"""
DataFrame with multiple categorical columns and a column of integers.
Shortened so as not to contain all possible combinations of categories.
Useful for testing `observed` kwarg functionality on GroupBy objects.
Parameters
----------
df: DataFrame
Non-categorical, longer DataFrame from another fixture, used to derive
this one
Returns
-------
df_cat: DataFrame
"""
df_cat = df.copy()[:4] # leave out some groups
df_cat["A"] = df_cat["A"].astype("category")
df_cat["B"] = df_cat["B"].astype("category")
df_cat["C"] = Series([1, 2, 3, 4])
df_cat = df_cat.drop(["D"], axis=1)
return df_cat
@pytest.mark.parametrize(
"operation, kwargs", [("agg", dict(dtype="category")), ("apply", dict())]
)
def test_seriesgroupby_observed_true(df_cat, operation, kwargs):
# GH 24880
index = MultiIndex.from_frame(
DataFrame(
{"A": ["foo", "foo", "bar", "bar"], "B": ["one", "two", "one", "three"]},
**kwargs,
)
)
expected = Series(data=[1, 3, 2, 4], index=index, name="C")
grouped = df_cat.groupby(["A", "B"], observed=True)["C"]
result = getattr(grouped, operation)(sum)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("operation", ["agg", "apply"])
@pytest.mark.parametrize("observed", [False, None])
def test_seriesgroupby_observed_false_or_none(df_cat, observed, operation):
# GH 24880
index, _ = MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
],
names=["A", "B"],
).sortlevel()
expected = Series(data=[2, 4, np.nan, 1, np.nan, 3], index=index, name="C")
if operation == "agg":
expected = expected.fillna(0, downcast="infer")
grouped = df_cat.groupby(["A", "B"], observed=observed)["C"]
result = getattr(grouped, operation)(sum)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"observed, index, data",
[
(
True,
MultiIndex.from_tuples(
[
("foo", "one", "min"),
("foo", "one", "max"),
("foo", "two", "min"),
("foo", "two", "max"),
("bar", "one", "min"),
("bar", "one", "max"),
("bar", "three", "min"),
("bar", "three", "max"),
],
names=["A", "B", None],
),
[1, 1, 3, 3, 2, 2, 4, 4],
),
(
False,
MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
Index(["min", "max"]),
],
names=["A", "B", None],
),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
),
(
None,
MultiIndex.from_product(
[
CategoricalIndex(["bar", "foo"], ordered=False),
CategoricalIndex(["one", "three", "two"], ordered=False),
Index(["min", "max"]),
],
names=["A", "B", None],
),
[2, 2, 4, 4, np.nan, np.nan, 1, 1, np.nan, np.nan, 3, 3],
),
],
)
def test_seriesgroupby_observed_apply_dict(df_cat, observed, index, data):
# GH 24880
expected = Series(data=data, index=index, name="C")
result = df_cat.groupby(["A", "B"], observed=observed)["C"].apply(
lambda x: {"min": x.min(), "max": x.max()}
)
tm.assert_series_equal(result, expected)
def test_groupby_categorical_series_dataframe_consistent(df_cat):
# GH 20416
expected = df_cat.groupby(["A", "B"])["C"].mean()
result = df_cat.groupby(["A", "B"]).mean()["C"]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("code", [([1, 0, 0]), ([0, 0, 0])])
def test_groupby_categorical_axis_1(code):
# GH 13420
df = DataFrame({"a": [1, 2, 3, 4], "b": [-1, -2, -3, -4], "c": [5, 6, 7, 8]})
cat = pd.Categorical.from_codes(code, categories=list("abc"))
result = df.groupby(cat, axis=1).mean()
expected = df.T.groupby(cat, axis=0).mean().T
tm.assert_frame_equal(result, expected)
def test_groupby_cat_preserves_structure(observed, ordered):
# GH 28787
df = DataFrame(
{"Name": Categorical(["Bob", "Greg"], ordered=ordered), "Item": [1, 2]},
columns=["Name", "Item"],
)
expected = df.copy()
result = (
df.groupby("Name", observed=observed)
.agg(pd.DataFrame.sum, skipna=True)
.reset_index()
)
tm.assert_frame_equal(result, expected)
def test_get_nonexistent_category():
# Accessing a Category that is not in the dataframe
df = pd.DataFrame({"var": ["a", "a", "b", "b"], "val": range(4)})
with pytest.raises(KeyError, match="'vau'"):
df.groupby("var").apply(
lambda rows: pd.DataFrame(
{"var": [rows.iloc[-1]["var"]], "val": [rows.iloc[-1]["vau"]]}
)
)
def test_series_groupby_on_2_categoricals_unobserved(reduction_func, observed, request):
# GH 17605
if reduction_func == "ngroup":
pytest.skip("ngroup is not truly a reduction")
if reduction_func == "corrwith": # GH 32293
mark = pytest.mark.xfail(
reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293"
)
request.node.add_marker(mark)
df = pd.DataFrame(
{
"cat_1": pd.Categorical(list("AABB"), categories=list("ABCD")),
"cat_2": pd.Categorical(list("AB") * 2, categories=list("ABCD")),
"value": [0.1] * 4,
}
)
args = {"nth": [0]}.get(reduction_func, [])
expected_length = 4 if observed else 16
series_groupby = df.groupby(["cat_1", "cat_2"], observed=observed)["value"]
agg = getattr(series_groupby, reduction_func)
result = agg(*args)
assert len(result) == expected_length
def test_series_groupby_on_2_categoricals_unobserved_zeroes_or_nans(
reduction_func, request
):
# GH 17605
# Tests whether the unobserved categories in the result contain 0 or NaN
if reduction_func == "ngroup":
pytest.skip("ngroup is not truly a reduction")
if reduction_func == "corrwith": # GH 32293
mark = pytest.mark.xfail(
reason="TODO: implemented SeriesGroupBy.corrwith. See GH 32293"
)
request.node.add_marker(mark)
df = pd.DataFrame(
{
"cat_1": pd.Categorical(list("AABB"), categories=list("ABC")),
"cat_2": pd.Categorical(list("AB") * 2, categories=list("ABC")),
"value": [0.1] * 4,
}
)
unobserved = [tuple("AC"), tuple("BC"), tuple("CA"), tuple("CB"), tuple("CC")]
args = {"nth": [0]}.get(reduction_func, [])
series_groupby = df.groupby(["cat_1", "cat_2"], observed=False)["value"]
agg = getattr(series_groupby, reduction_func)
result = agg(*args)
zero_or_nan = _results_for_groupbys_with_missing_categories[reduction_func]
for idx in unobserved:
val = result.loc[idx]
assert (pd.isna(zero_or_nan) and pd.isna(val)) or (val == zero_or_nan)
# If we expect unobserved values to be zero, we also expect the dtype to be int.
# Except for .sum(). If the observed categories sum to dtype=float (i.e. their
# sums have decimals), then the zeros for the missing categories should also be
# floats.
if zero_or_nan == 0 and reduction_func != "sum":
assert np.issubdtype(result.dtype, np.integer)
def test_dataframe_groupby_on_2_categoricals_when_observed_is_true(reduction_func):
# GH 23865
# GH 27075
# Ensure that df.groupby, when 'by' is two pd.Categorical variables,
# does not return the categories that are not in df when observed=True
if reduction_func == "ngroup":
pytest.skip("ngroup does not return the Categories on the index")
df = pd.DataFrame(
{
"cat_1": pd.Categorical(list("AABB"), categories=list("ABC")),
"cat_2": pd.Categorical(list("1111"), categories=list("12")),
"value": [0.1, 0.1, 0.1, 0.1],
}
)
unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]
df_grp = df.groupby(["cat_1", "cat_2"], observed=True)
args = {"nth": [0], "corrwith": [df]}.get(reduction_func, [])
res = getattr(df_grp, reduction_func)(*args)
for cat in unobserved_cats:
assert cat not in res.index
@pytest.mark.parametrize("observed", [False, None])
def test_dataframe_groupby_on_2_categoricals_when_observed_is_false(
reduction_func, observed, request
):
# GH 23865
# GH 27075
# Ensure that df.groupby, when 'by' is two pd.Categorical variables,
# returns the categories that are not in df when observed=False/None
if reduction_func == "ngroup":
pytest.skip("ngroup does not return the Categories on the index")
df = pd.DataFrame(
{
"cat_1": pd.Categorical(list("AABB"), categories=list("ABC")),
"cat_2": pd.Categorical(list("1111"), categories=list("12")),
"value": [0.1, 0.1, 0.1, 0.1],
}
)
unobserved_cats = [("A", "2"), ("B", "2"), ("C", "1"), ("C", "2")]
df_grp = df.groupby(["cat_1", "cat_2"], observed=observed)
args = {"nth": [0], "corrwith": [df]}.get(reduction_func, [])
res = getattr(df_grp, reduction_func)(*args)
expected = _results_for_groupbys_with_missing_categories[reduction_func]
if expected is np.nan:
assert res.loc[unobserved_cats].isnull().all().all()
else:
assert (res.loc[unobserved_cats] == expected).all().all()
def test_series_groupby_categorical_aggregation_getitem():
# GH 8870
d = {"foo": [10, 8, 4, 1], "bar": [10, 20, 30, 40], "baz": ["d", "c", "d", "c"]}
df = pd.DataFrame(d)
cat = pd.cut(df["foo"], np.linspace(0, 20, 5))
df["range"] = cat
groups = df.groupby(["range", "baz"], as_index=True, sort=True)
result = groups["foo"].agg("mean")
expected = groups.agg("mean")["foo"]
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
"func, expected_values",
[(pd.Series.nunique, [1, 1, 2]), (pd.Series.count, [1, 2, 2])],
)
def test_groupby_agg_categorical_columns(func, expected_values):
# 31256
df = pd.DataFrame(
{
"id": [0, 1, 2, 3, 4],
"groups": [0, 1, 1, 2, 2],
"value": pd.Categorical([0, 0, 0, 0, 1]),
}
).set_index("id")
result = df.groupby("groups").agg(func)
expected = pd.DataFrame(
{"value": expected_values}, index=pd.Index([0, 1, 2], name="groups")
)
tm.assert_frame_equal(result, expected)
def test_groupby_agg_non_numeric():
df = pd.DataFrame(
{"A": pd.Categorical(["a", "a", "b"], categories=["a", "b", "c"])}
)
expected = pd.DataFrame({"A": [2, 1]}, index=[1, 2])
result = df.groupby([1, 2, 1]).agg(pd.Series.nunique)
tm.assert_frame_equal(result, expected)
result = df.groupby([1, 2, 1]).nunique()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["first", "last"])
def test_groupy_first_returned_categorical_instead_of_dataframe(func):
# GH 28641: groupby drops index, when grouping over categorical column with
# first/last. Renamed Categorical instead of DataFrame previously.
df = pd.DataFrame(
{"A": [1997], "B": pd.Series(["b"], dtype="category").cat.as_ordered()}
)
df_grouped = df.groupby("A")["B"]
result = getattr(df_grouped, func)()
expected = pd.Series(["b"], index=pd.Index([1997], name="A"), name="B")
tm.assert_series_equal(result, expected)
def test_read_only_category_no_sort():
# GH33410
cats = np.array([1, 2])
cats.flags.writeable = False
df = DataFrame(
{"a": [1, 3, 5, 7], "b": Categorical([1, 1, 2, 2], categories=Index(cats))}
)
expected = DataFrame(data={"a": [2, 6]}, index=CategoricalIndex([1, 2], name="b"))
result = df.groupby("b", sort=False).mean()
tm.assert_frame_equal(result, expected)
def test_sorted_missing_category_values():
# GH 28597
df = pd.DataFrame(
{
"foo": [
"small",
"large",
"large",
"large",
"medium",
"large",
"large",
"medium",
],
"bar": ["C", "A", "A", "C", "A", "C", "A", "C"],
}
)
df["foo"] = (
df["foo"]
.astype("category")
.cat.set_categories(["tiny", "small", "medium", "large"], ordered=True)
)
expected = pd.DataFrame(
{
"tiny": {"A": 0, "C": 0},
"small": {"A": 0, "C": 1},
"medium": {"A": 1, "C": 1},
"large": {"A": 3, "C": 2},
}
)
expected = expected.rename_axis("bar", axis="index")
expected.columns = pd.CategoricalIndex(
["tiny", "small", "medium", "large"],
categories=["tiny", "small", "medium", "large"],
ordered=True,
name="foo",
dtype="category",
)
result = df.groupby(["bar", "foo"]).size().unstack()
tm.assert_frame_equal(result, expected)
def test_agg_cython_category_not_implemented_fallback():
# https://github.com/pandas-dev/pandas/issues/31450
df = pd.DataFrame({"col_num": [1, 1, 2, 3]})
df["col_cat"] = df["col_num"].astype("category")
result = df.groupby("col_num").col_cat.first()
expected = pd.Series(
[1, 2, 3], index=pd.Index([1, 2, 3], name="col_num"), name="col_cat"
)
tm.assert_series_equal(result, expected)
result = df.groupby("col_num").agg({"col_cat": "first"})
expected = expected.to_frame()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["min", "max"])
def test_aggregate_categorical_lost_index(func: str):
# GH: 28641 groupby drops index, when grouping over categorical column with min/max
ds = pd.Series(["b"], dtype="category").cat.as_ordered()
df = pd.DataFrame({"A": [1997], "B": ds})
result = df.groupby("A").agg({"B": func})
expected = pd.DataFrame({"B": ["b"]}, index=pd.Index([1997], name="A"))
tm.assert_frame_equal(result, expected)
def test_aggregate_categorical_with_isnan():
# GH 29837
df = pd.DataFrame(
{
"A": [1, 1, 1, 1],
"B": [1, 2, 1, 2],
"numerical_col": [0.1, 0.2, np.nan, 0.3],
"object_col": ["foo", "bar", "foo", "fee"],
"categorical_col": ["foo", "bar", "foo", "fee"],
}
)
df = df.astype({"categorical_col": "category"})
result = df.groupby(["A", "B"]).agg(lambda df: df.isna().sum())
index = pd.MultiIndex.from_arrays([[1, 1], [1, 2]], names=("A", "B"))
expected = pd.DataFrame(
data={
"numerical_col": [1.0, 0.0],
"object_col": [0, 0],
"categorical_col": [0, 0],
},
index=index,
)
tm.assert_frame_equal(result, expected)
def test_categorical_transform():
# GH 29037
df = pd.DataFrame(
{
"package_id": [1, 1, 1, 2, 2, 3],
"status": [
"Waiting",
"OnTheWay",
"Delivered",
"Waiting",
"OnTheWay",
"Waiting",
],
}
)
delivery_status_type = pd.CategoricalDtype(
categories=["Waiting", "OnTheWay", "Delivered"], ordered=True
)
df["status"] = df["status"].astype(delivery_status_type)
df["last_status"] = df.groupby("package_id")["status"].transform(max)
result = df.copy()
expected = pd.DataFrame(
{
"package_id": [1, 1, 1, 2, 2, 3],
"status": [
"Waiting",
"OnTheWay",
"Delivered",
"Waiting",
"OnTheWay",
"Waiting",
],
"last_status": [
"Delivered",
"Delivered",
"Delivered",
"OnTheWay",
"OnTheWay",
"Waiting",
],
}
)
expected["status"] = expected["status"].astype(delivery_status_type)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("func", ["first", "last"])
def test_series_groupby_first_on_categorical_col_grouped_on_2_categoricals(
func: str, observed: bool
):
# GH 34951
cat = pd.Categorical([0, 0, 1, 1])
val = [0, 1, 1, 0]
df = pd.DataFrame({"a": cat, "b": cat, "c": val})
idx = pd.Categorical([0, 1])
idx = pd.MultiIndex.from_product([idx, idx], names=["a", "b"])
expected_dict = {
"first": pd.Series([0, np.NaN, np.NaN, 1], idx, name="c"),
"last": pd.Series([1, np.NaN, np.NaN, 0], idx, name="c"),
}
expected = expected_dict[func]
if observed:
expected = expected.dropna().astype(np.int64)
srs_grp = df.groupby(["a", "b"], observed=observed)["c"]
result = getattr(srs_grp, func)()
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("func", ["first", "last"])
def test_df_groupby_first_on_categorical_col_grouped_on_2_categoricals(
func: str, observed: bool
):
# GH 34951
cat = pd.Categorical([0, 0, 1, 1])
val = [0, 1, 1, 0]
df = pd.DataFrame({"a": cat, "b": cat, "c": val})
idx = pd.Categorical([0, 1])
idx = pd.MultiIndex.from_product([idx, idx], names=["a", "b"])
expected_dict = {
"first": pd.Series([0, np.NaN, np.NaN, 1], idx, name="c"),
"last": pd.Series([1, np.NaN, np.NaN, 0], idx, name="c"),
}
expected = expected_dict[func].to_frame()
if observed:
expected = expected.dropna().astype(np.int64)
df_grp = df.groupby(["a", "b"], observed=observed)
result = getattr(df_grp, func)()
tm.assert_frame_equal(result, expected)
| 32.913661 | 88 | 0.570728 |
d4610d0fa7dfedcba127ffab2a3d2b93a6fe8de9 | 261 | py | Python | setup.py | contmp/django-dark | 0e7eb46d720eda3ab2806448971e492049ebb312 | [
"MIT"
] | 4 | 2020-10-21T20:20:58.000Z | 2021-07-20T16:27:48.000Z | setup.py | contmp/django-vacancies | 73f63e18937b9484a2eaf4f7832af653a125cc88 | [
"BSD-3-Clause"
] | 2 | 2020-07-02T00:12:59.000Z | 2020-11-03T04:06:32.000Z | setup.py | contmp/django-vacancies | 73f63e18937b9484a2eaf4f7832af653a125cc88 | [
"BSD-3-Clause"
] | 1 | 2020-10-18T04:03:43.000Z | 2020-10-18T04:03:43.000Z | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
author="Bastian Probian",
author_email="contmp@me.com",
long_description=long_description,
long_description_content_type="text/markdown"
)
| 21.75 | 49 | 0.724138 |
70854d7fd776b04fc9d4098effd96a474e19b5af | 1,723 | py | Python | netengine/backends/snmp/airos.py | technouppercut/NetEngine | 5bf9b9ca44c59f8b48ffaf0480315bfff4053645 | [
"X11"
] | null | null | null | netengine/backends/snmp/airos.py | technouppercut/NetEngine | 5bf9b9ca44c59f8b48ffaf0480315bfff4053645 | [
"X11"
] | null | null | null | netengine/backends/snmp/airos.py | technouppercut/NetEngine | 5bf9b9ca44c59f8b48ffaf0480315bfff4053645 | [
"X11"
] | null | null | null | """
NetEngine SNMP Ubiquiti Air OS backend
"""
__all__ = ['AirOS']
from datetime import timedelta
from netengine.backends.snmp import SNMP
class AirOS(SNMP):
"""
Ubiquiti AirOS SNMP backend
"""
def __str__(self):
""" print a human readable object description """
return u"<SNMP (Ubiquity AirOS): %s>" % self.host
def validate(self):
"""
raises NetEngineError exception if anything is wrong with the connection
for example: wrong host, invalid community
"""
self.name
@property
def os(self):
"""
returns (os_name, os_version)
"""
os_name = 'AirOS'
os_version = self.get_value('1.2.840.10036.3.1.2.1.4.8')
return os_name, os_version
@property
def name(self):
"""
returns a string containing the device name
"""
return self.get_value('1.3.6.1.2.1.1.5.0')
@property
def model(self):
"""
returns a string containing the device model
"""
return self.get_value('1.2.840.10036.3.1.2.1.3.8')
@property
def ssid(self):
"""
returns a string containing the wireless ssid
"""
return self.get_value('1.2.840.10036.1.1.1.9.8')
@property
def uptime(self):
"""
returns an integer representing the number of seconds of uptime
"""
return int(self.get_value('1.3.6.1.2.1.1.3.0')) / 100
@property
def uptime_tuple(self):
"""
returns (days, hours, minutes)
"""
td = timedelta(seconds=self.uptime)
return td.days, td.seconds//3600, (td.seconds//60)%60 | 23.60274 | 80 | 0.556007 |
5cadeb33b22cfbdb2065180732dad9ef799d3fd7 | 3,110 | py | Python | tf_example.py | ligongzzz/TNModel | 2609c184a67aac968248bb9d41000b4d647720c3 | [
"Apache-2.0"
] | 1 | 2021-06-11T11:34:33.000Z | 2021-06-11T11:34:33.000Z | tf_example.py | ligongzzz/TNModel | 2609c184a67aac968248bb9d41000b4d647720c3 | [
"Apache-2.0"
] | null | null | null | tf_example.py | ligongzzz/TNModel | 2609c184a67aac968248bb9d41000b4d647720c3 | [
"Apache-2.0"
] | 1 | 2021-06-11T11:34:37.000Z | 2021-06-11T11:34:37.000Z | from tensorflow.python.framework.ops import disable_eager_execution
from TNModel.utils import *
import TNModel.tf_model as tf_model
import tensornetwork as tn
import tensorflow as tf
import numpy as np
import os
os.environ['TF_DISABLE_MLC'] = '1'
# If you want to run in eager mode, just comment those two lines.
disable_eager_execution()
tn.set_default_backend('tensorflow')
# HyperParams
hyper_params = {
'rank': 28*28,
'phys_dim': 10,
'bond_dim': 2,
'labels': 10,
'string_cnt': 4, # for 1d-sbs only
'sbs_op': 'mean', # mean or prod , alternative choice for 1d-sbs contraction
'model': 'peps', # mps (finished) or 1d-sbs (half-working)
# vectorized_map is only supported in part of the machines.
'vectorized': True,
'batch_size': 16,
'max_singular_values': 64
}
if __name__ == '__main__':
# Datasets
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if not hyper_params['model'] == 'peps':
x_train, x_test = x_train / 255.0, x_test / 255.0
train_cos = (1.0 - x_train).reshape(-1, 28*28, 1)
train_sin = x_train.reshape(-1, 28*28, 1)
test_cos = (1.0 - x_test).reshape(-1, 28*28, 1)
test_sin = x_test.reshape(-1, 28*28, 1)
x_train = np.concatenate((train_cos, train_sin), axis=2)
x_test = np.concatenate((test_cos, test_sin), axis=2)
else:
train_mean, train_std = np.mean(x_train), np.std(x_train)
test_mean, test_std = np.mean(x_test), np.std(x_test)
x_train = (x_train - train_mean) / train_std
x_test = (x_test - test_mean) / test_std
x_train = x_train.reshape([-1, 1, 28, 28])
x_test = x_test.reshape([-1, 1, 28, 28])
# Model
print('Building model...')
if hyper_params['model'] == 'mps':
model = tf.keras.models.Sequential([
tf_model.MPSLayer(hyper_params=hyper_params),
tf.keras.layers.Softmax()
])
elif hyper_params['model'] == '1d-sbs':
model = tf.keras.models.Sequential([
tf_model.SBS1dLayer(hyper_params=hyper_params),
tf.keras.layers.Softmax()
])
elif hyper_params['model'] == 'peps':
model = tf.keras.models.Sequential([
tf_model.PEPSCNNLayer(hyper_params=hyper_params),
tf.keras.layers.Softmax()
])
else:
raise NotImplementedError()
print('Compiling model...')
model.compile(
optimizer='rmsprop',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)
if hyper_params['model'] == 'peps':
print('Building model...')
model.build((None, 1, 28, 28))
model.summary()
print('Finished!')
hist = LossHistory()
print('Start training...')
model.fit(x_train, y_train, epochs=5, verbose=1,
batch_size=hyper_params['batch_size'], callbacks=[hist])
hist.loss_plot(loss_type='batch')
result = model.evaluate(
x_test, y_test, batch_size=hyper_params['batch_size'], verbose=1)
print('Evaluate Results:', result)
| 30.490196 | 81 | 0.62508 |
3e0cd7bc0f90fde4f875110d9daf40c55bd812fc | 516 | py | Python | run/migrations/0010_auto_20200223_1859.py | SharifAIChallenge/AIC-GameRunner | 025c2987b8c288b6e136e6621b99190a7f2ed21d | [
"MIT"
] | null | null | null | run/migrations/0010_auto_20200223_1859.py | SharifAIChallenge/AIC-GameRunner | 025c2987b8c288b6e136e6621b99190a7f2ed21d | [
"MIT"
] | 18 | 2017-01-13T14:27:46.000Z | 2018-02-25T00:29:34.000Z | run/migrations/0010_auto_20200223_1859.py | SharifAIChallenge/AIC-GameRunner | 025c2987b8c288b6e136e6621b99190a7f2ed21d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.10.8 on 2020-02-23 18:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('run', '0009_run_count_tries'),
]
operations = [
migrations.AlterField(
model_name='run',
name='response',
field=models.SmallIntegerField(choices=[(0, 'Wating'), (1, 'Sending'), (2, 'Sent'), (3, 'Failed')], default=0),
),
]
| 24.571429 | 123 | 0.596899 |
e63a18de7304da9f56628efb80991218489eec08 | 81,512 | py | Python | ironic/tests/drivers/test_ipmitool.py | overcastcloud/ironic | c6608e97af33f8d7f3fe2f7deeb78f52196f2cc7 | [
"Apache-2.0"
] | null | null | null | ironic/tests/drivers/test_ipmitool.py | overcastcloud/ironic | c6608e97af33f8d7f3fe2f7deeb78f52196f2cc7 | [
"Apache-2.0"
] | null | null | null | ironic/tests/drivers/test_ipmitool.py | overcastcloud/ironic | c6608e97af33f8d7f3fe2f7deeb78f52196f2cc7 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2014 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Test class for IPMITool driver module."""
import os
import stat
import subprocess
import tempfile
import time
import types
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import uuidutils
from ironic.common import boot_devices
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers.modules import console_utils
from ironic.drivers.modules import ipmitool as ipmi
from ironic.tests import base
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
CONF = cfg.CONF
CONF.import_opt('min_command_interval',
'ironic.drivers.modules.ipminative',
group='ipmi')
INFO_DICT = db_utils.get_test_ipmi_info()
# BRIDGE_INFO_DICT will have all the bridging parameters appended
BRIDGE_INFO_DICT = INFO_DICT.copy()
BRIDGE_INFO_DICT.update(db_utils.get_test_ipmi_bridging_parameters())
class IPMIToolCheckInitTestCase(base.TestCase):
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_power_init_calls(self, mock_check_dir, mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = None
ipmi.IPMIPower()
mock_support.assert_called_with(mock.ANY)
mock_check_dir.assert_called_once_with()
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_power_init_calls_raises_1(self, mock_check_dir, mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = None
mock_check_dir.side_effect = iter(
[exception.PathNotFound(dir="foo_dir")])
self.assertRaises(exception.PathNotFound, ipmi.IPMIPower)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_power_init_calls_raises_2(self, mock_check_dir, mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = None
mock_check_dir.side_effect = iter(
[exception.DirectoryNotWritable(dir="foo_dir")])
self.assertRaises(exception.DirectoryNotWritable, ipmi.IPMIPower)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_power_init_calls_raises_3(self, mock_check_dir, mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = None
mock_check_dir.side_effect = iter([exception.InsufficientDiskSpace(
path="foo_dir", required=1, actual=0)])
self.assertRaises(exception.InsufficientDiskSpace, ipmi.IPMIPower)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_power_init_calls_already_checked(self,
mock_check_dir,
mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = True
ipmi.IPMIPower()
mock_support.assert_called_with(mock.ANY)
self.assertEqual(0, mock_check_dir.call_count)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_management_init_calls(self, mock_check_dir, mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = None
ipmi.IPMIManagement()
mock_support.assert_called_with(mock.ANY)
mock_check_dir.assert_called_once_with()
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_management_init_calls_already_checked(self,
mock_check_dir,
mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = False
ipmi.IPMIManagement()
mock_support.assert_called_with(mock.ANY)
self.assertEqual(0, mock_check_dir.call_count)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_vendor_passthru_init_calls(self, mock_check_dir, mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = None
ipmi.VendorPassthru()
mock_support.assert_called_with(mock.ANY)
mock_check_dir.assert_called_once_with()
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_vendor_passthru_init_calls_already_checked(self,
mock_check_dir,
mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = True
ipmi.VendorPassthru()
mock_support.assert_called_with(mock.ANY)
self.assertEqual(0, mock_check_dir.call_count)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_console_init_calls(self, mock_check_dir, mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = None
ipmi.IPMIShellinaboxConsole()
mock_support.assert_called_with(mock.ANY)
mock_check_dir.assert_called_once_with()
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_console_init_calls_already_checked(self,
mock_check_dir,
mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = True
ipmi.IPMIShellinaboxConsole()
mock_support.assert_called_with(mock.ANY)
self.assertEqual(0, mock_check_dir.call_count)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(subprocess, 'check_call', autospec=True)
class IPMIToolCheckOptionSupportedTestCase(base.TestCase):
def test_check_timing_pass(self, mock_chkcall, mock_support):
mock_chkcall.return_value = (None, None)
mock_support.return_value = None
expected = [mock.call('timing'),
mock.call('timing', True)]
ipmi._check_option_support(['timing'])
self.assertTrue(mock_chkcall.called)
self.assertEqual(expected, mock_support.call_args_list)
def test_check_timing_fail(self, mock_chkcall, mock_support):
mock_chkcall.side_effect = iter(
[subprocess.CalledProcessError(1, 'ipmitool')])
mock_support.return_value = None
expected = [mock.call('timing'),
mock.call('timing', False)]
ipmi._check_option_support(['timing'])
self.assertTrue(mock_chkcall.called)
self.assertEqual(expected, mock_support.call_args_list)
def test_check_timing_no_ipmitool(self, mock_chkcall, mock_support):
mock_chkcall.side_effect = iter([OSError()])
mock_support.return_value = None
expected = [mock.call('timing')]
self.assertRaises(OSError, ipmi._check_option_support, ['timing'])
self.assertTrue(mock_chkcall.called)
self.assertEqual(expected, mock_support.call_args_list)
def test_check_single_bridge_pass(self, mock_chkcall, mock_support):
mock_chkcall.return_value = (None, None)
mock_support.return_value = None
expected = [mock.call('single_bridge'),
mock.call('single_bridge', True)]
ipmi._check_option_support(['single_bridge'])
self.assertTrue(mock_chkcall.called)
self.assertEqual(expected, mock_support.call_args_list)
def test_check_single_bridge_fail(self, mock_chkcall, mock_support):
mock_chkcall.side_effect = iter(
[subprocess.CalledProcessError(1, 'ipmitool')])
mock_support.return_value = None
expected = [mock.call('single_bridge'),
mock.call('single_bridge', False)]
ipmi._check_option_support(['single_bridge'])
self.assertTrue(mock_chkcall.called)
self.assertEqual(expected, mock_support.call_args_list)
def test_check_single_bridge_no_ipmitool(self, mock_chkcall,
mock_support):
mock_chkcall.side_effect = iter([OSError()])
mock_support.return_value = None
expected = [mock.call('single_bridge')]
self.assertRaises(OSError, ipmi._check_option_support,
['single_bridge'])
self.assertTrue(mock_chkcall.called)
self.assertEqual(expected, mock_support.call_args_list)
def test_check_dual_bridge_pass(self, mock_chkcall, mock_support):
mock_chkcall.return_value = (None, None)
mock_support.return_value = None
expected = [mock.call('dual_bridge'),
mock.call('dual_bridge', True)]
ipmi._check_option_support(['dual_bridge'])
self.assertTrue(mock_chkcall.called)
self.assertEqual(expected, mock_support.call_args_list)
def test_check_dual_bridge_fail(self, mock_chkcall, mock_support):
mock_chkcall.side_effect = iter(
[subprocess.CalledProcessError(1, 'ipmitool')])
mock_support.return_value = None
expected = [mock.call('dual_bridge'),
mock.call('dual_bridge', False)]
ipmi._check_option_support(['dual_bridge'])
self.assertTrue(mock_chkcall.called)
self.assertEqual(expected, mock_support.call_args_list)
def test_check_dual_bridge_no_ipmitool(self, mock_chkcall, mock_support):
mock_chkcall.side_effect = iter([OSError()])
mock_support.return_value = None
expected = [mock.call('dual_bridge')]
self.assertRaises(OSError, ipmi._check_option_support,
['dual_bridge'])
self.assertTrue(mock_chkcall.called)
self.assertEqual(expected, mock_support.call_args_list)
def test_check_all_options_pass(self, mock_chkcall, mock_support):
mock_chkcall.return_value = (None, None)
mock_support.return_value = None
expected = [
mock.call('timing'), mock.call('timing', True),
mock.call('single_bridge'),
mock.call('single_bridge', True),
mock.call('dual_bridge'), mock.call('dual_bridge', True)]
ipmi._check_option_support(['timing', 'single_bridge', 'dual_bridge'])
self.assertTrue(mock_chkcall.called)
self.assertEqual(expected, mock_support.call_args_list)
def test_check_all_options_fail(self, mock_chkcall, mock_support):
options = ['timing', 'single_bridge', 'dual_bridge']
mock_chkcall.side_effect = iter(
[subprocess.CalledProcessError(1, 'ipmitool')] * len(options))
mock_support.return_value = None
expected = [
mock.call('timing'), mock.call('timing', False),
mock.call('single_bridge'),
mock.call('single_bridge', False),
mock.call('dual_bridge'),
mock.call('dual_bridge', False)]
ipmi._check_option_support(options)
self.assertTrue(mock_chkcall.called)
self.assertEqual(expected, mock_support.call_args_list)
def test_check_all_options_no_ipmitool(self, mock_chkcall, mock_support):
mock_chkcall.side_effect = iter([OSError()])
mock_support.return_value = None
# exception is raised once ipmitool was not found for an command
expected = [mock.call('timing')]
self.assertRaises(OSError, ipmi._check_option_support,
['timing', 'single_bridge', 'dual_bridge'])
self.assertTrue(mock_chkcall.called)
self.assertEqual(expected, mock_support.call_args_list)
@mock.patch.object(time, 'sleep', autospec=True)
class IPMIToolPrivateMethodTestCase(db_base.DbTestCase):
def setUp(self):
super(IPMIToolPrivateMethodTestCase, self).setUp()
self.node = obj_utils.get_test_node(
self.context,
driver='fake_ipmitool',
driver_info=INFO_DICT)
self.info = ipmi._parse_driver_info(self.node)
def _test__make_password_file(self, mock_sleep, input_password,
exception_to_raise=None):
pw_file = None
try:
with ipmi._make_password_file(input_password) as pw_file:
if exception_to_raise is not None:
raise exception_to_raise
self.assertTrue(os.path.isfile(pw_file))
self.assertEqual(0o600, os.stat(pw_file)[stat.ST_MODE] & 0o777)
with open(pw_file, "r") as f:
password = f.read()
self.assertEqual(str(input_password), password)
finally:
if pw_file is not None:
self.assertFalse(os.path.isfile(pw_file))
def test__make_password_file_str_password(self, mock_sleep):
self._test__make_password_file(mock_sleep, self.info.get('password'))
def test__make_password_file_with_numeric_password(self, mock_sleep):
self._test__make_password_file(mock_sleep, 12345)
def test__make_password_file_caller_exception(self, mock_sleep):
# Test caller raising exception
result = self.assertRaises(
ValueError,
self._test__make_password_file,
mock_sleep, 12345, ValueError('we should fail'))
self.assertEqual('we should fail', result.args[0])
@mock.patch.object(tempfile, 'NamedTemporaryFile',
new=mock.MagicMock(side_effect=OSError('Test Error')))
def test__make_password_file_tempfile_known_exception(self, mock_sleep):
# Test OSError exception in _make_password_file for
# tempfile.NamedTemporaryFile
self.assertRaises(
exception.PasswordFileFailedToCreate,
self._test__make_password_file, mock_sleep, 12345)
@mock.patch.object(
tempfile, 'NamedTemporaryFile',
new=mock.MagicMock(side_effect=OverflowError('Test Error')))
def test__make_password_file_tempfile_unknown_exception(self, mock_sleep):
# Test exception in _make_password_file for tempfile.NamedTemporaryFile
result = self.assertRaises(
OverflowError,
self._test__make_password_file, mock_sleep, 12345)
self.assertEqual('Test Error', result.args[0])
def test__make_password_file_write_exception(self, mock_sleep):
# Test exception in _make_password_file for write()
mock_namedtemp = mock.mock_open(mock.MagicMock(name='JLV'))
with mock.patch('tempfile.NamedTemporaryFile', mock_namedtemp):
mock_filehandle = mock_namedtemp.return_value
mock_write = mock_filehandle.write
mock_write.side_effect = OSError('Test 2 Error')
self.assertRaises(
exception.PasswordFileFailedToCreate,
self._test__make_password_file, mock_sleep, 12345)
def test__parse_driver_info(self, mock_sleep):
# make sure we get back the expected things
_OPTIONS = ['address', 'username', 'password', 'uuid']
for option in _OPTIONS:
self.assertIsNotNone(self.info.get(option))
info = dict(INFO_DICT)
# test the default value for 'priv_level'
node = obj_utils.get_test_node(self.context, driver_info=info)
ret = ipmi._parse_driver_info(node)
self.assertEqual('ADMINISTRATOR', ret['priv_level'])
# ipmi_username / ipmi_password are not mandatory
del info['ipmi_username']
node = obj_utils.get_test_node(self.context, driver_info=info)
ipmi._parse_driver_info(node)
del info['ipmi_password']
node = obj_utils.get_test_node(self.context, driver_info=info)
ipmi._parse_driver_info(node)
# make sure error is raised when ipmi_address is missing
info = dict(INFO_DICT)
del info['ipmi_address']
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
ipmi._parse_driver_info,
node)
# test the invalid priv_level value
info = dict(INFO_DICT)
info['ipmi_priv_level'] = 'ABCD'
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.InvalidParameterValue,
ipmi._parse_driver_info,
node)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
def test__parse_driver_info_with_invalid_bridging_type(
self, mock_support, mock_sleep):
info = BRIDGE_INFO_DICT.copy()
# make sure error is raised when ipmi_bridging has unexpected value
info['ipmi_bridging'] = 'junk'
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.InvalidParameterValue,
ipmi._parse_driver_info,
node)
self.assertFalse(mock_support.called)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
def test__parse_driver_info_with_no_bridging(
self, mock_support, mock_sleep):
_OPTIONS = ['address', 'username', 'password', 'uuid']
_BRIDGING_OPTIONS = ['local_address', 'transit_channel',
'transit_address',
'target_channel', 'target_address']
info = BRIDGE_INFO_DICT.copy()
info['ipmi_bridging'] = 'no'
node = obj_utils.get_test_node(self.context, driver='fake_ipmitool',
driver_info=info)
ret = ipmi._parse_driver_info(node)
# ensure that _is_option_supported was not called
self.assertFalse(mock_support.called)
# check if we got all the required options
for option in _OPTIONS:
self.assertIsNotNone(ret[option])
# test the default value for 'priv_level'
self.assertEqual('ADMINISTRATOR', ret['priv_level'])
# check if bridging parameters were set to None
for option in _BRIDGING_OPTIONS:
self.assertIsNone(ret[option])
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
def test__parse_driver_info_with_dual_bridging_pass(
self, mock_support, mock_sleep):
_OPTIONS = ['address', 'username', 'password', 'uuid',
'local_address', 'transit_channel', 'transit_address',
'target_channel', 'target_address']
node = obj_utils.get_test_node(self.context, driver='fake_ipmitool',
driver_info=BRIDGE_INFO_DICT)
expected = [mock.call('dual_bridge')]
# test double bridging and make sure we get back expected result
mock_support.return_value = True
ret = ipmi._parse_driver_info(node)
self.assertEqual(expected, mock_support.call_args_list)
for option in _OPTIONS:
self.assertIsNotNone(ret[option])
# test the default value for 'priv_level'
self.assertEqual('ADMINISTRATOR', ret['priv_level'])
info = BRIDGE_INFO_DICT.copy()
# ipmi_local_address / ipmi_username / ipmi_password are not mandatory
for optional_arg in ['ipmi_local_address', 'ipmi_username',
'ipmi_password']:
del info[optional_arg]
node = obj_utils.get_test_node(self.context, driver_info=info)
ipmi._parse_driver_info(node)
self.assertEqual(mock.call('dual_bridge'), mock_support.call_args)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
def test__parse_driver_info_with_dual_bridging_not_supported(
self, mock_support, mock_sleep):
node = obj_utils.get_test_node(self.context, driver='fake_ipmitool',
driver_info=BRIDGE_INFO_DICT)
# if dual bridge is not supported then check if error is raised
mock_support.return_value = False
self.assertRaises(exception.InvalidParameterValue,
ipmi._parse_driver_info, node)
mock_support.assert_called_once_with('dual_bridge')
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
def test__parse_driver_info_with_dual_bridging_missing_parameters(
self, mock_support, mock_sleep):
info = BRIDGE_INFO_DICT.copy()
mock_support.return_value = True
# make sure error is raised when dual bridging is selected and the
# required parameters for dual bridging are not provided
for param in ['ipmi_transit_channel', 'ipmi_target_address',
'ipmi_transit_address', 'ipmi_target_channel']:
del info[param]
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
ipmi._parse_driver_info, node)
self.assertEqual(mock.call('dual_bridge'),
mock_support.call_args)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
def test__parse_driver_info_with_single_bridging_pass(
self, mock_support, mock_sleep):
_OPTIONS = ['address', 'username', 'password', 'uuid',
'local_address', 'target_channel', 'target_address']
info = BRIDGE_INFO_DICT.copy()
info['ipmi_bridging'] = 'single'
node = obj_utils.get_test_node(self.context, driver='fake_ipmitool',
driver_info=info)
expected = [mock.call('single_bridge')]
# test single bridging and make sure we get back expected things
mock_support.return_value = True
ret = ipmi._parse_driver_info(node)
self.assertEqual(expected, mock_support.call_args_list)
for option in _OPTIONS:
self.assertIsNotNone(ret[option])
# test the default value for 'priv_level'
self.assertEqual('ADMINISTRATOR', ret['priv_level'])
# check if dual bridge params are set to None
self.assertIsNone(ret['transit_channel'])
self.assertIsNone(ret['transit_address'])
# ipmi_local_address / ipmi_username / ipmi_password are not mandatory
for optional_arg in ['ipmi_local_address', 'ipmi_username',
'ipmi_password']:
del info[optional_arg]
node = obj_utils.get_test_node(self.context, driver_info=info)
ipmi._parse_driver_info(node)
self.assertEqual(mock.call('single_bridge'),
mock_support.call_args)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
def test__parse_driver_info_with_single_bridging_not_supported(
self, mock_support, mock_sleep):
info = BRIDGE_INFO_DICT.copy()
info['ipmi_bridging'] = 'single'
node = obj_utils.get_test_node(self.context, driver='fake_ipmitool',
driver_info=info)
# if single bridge is not supported then check if error is raised
mock_support.return_value = False
self.assertRaises(exception.InvalidParameterValue,
ipmi._parse_driver_info, node)
mock_support.assert_called_once_with('single_bridge')
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
def test__parse_driver_info_with_single_bridging_missing_parameters(
self, mock_support, mock_sleep):
info = dict(BRIDGE_INFO_DICT)
info['ipmi_bridging'] = 'single'
mock_support.return_value = True
# make sure error is raised when single bridging is selected and the
# required parameters for single bridging are not provided
for param in ['ipmi_target_channel', 'ipmi_target_address']:
del info[param]
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.MissingParameterValue,
ipmi._parse_driver_info,
node)
self.assertEqual(mock.call('single_bridge'),
mock_support.call_args)
def test__parse_driver_info_ipmi_prot_version_1_5(self, mock_sleep):
info = dict(INFO_DICT)
info['ipmi_protocol_version'] = '1.5'
node = obj_utils.get_test_node(self.context, driver_info=info)
ret = ipmi._parse_driver_info(node)
self.assertEqual('1.5', ret['protocol_version'])
def test__parse_driver_info_invalid_ipmi_prot_version(self, mock_sleep):
info = dict(INFO_DICT)
info['ipmi_protocol_version'] = '9000'
node = obj_utils.get_test_node(self.context, driver_info=info)
self.assertRaises(exception.InvalidParameterValue,
ipmi._parse_driver_info, node)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(ipmi, '_make_password_file', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test__exec_ipmitool_first_call_to_address(self, mock_exec, mock_pwf,
mock_support, mock_sleep):
ipmi.LAST_CMD_TIME = {}
pw_file_handle = tempfile.NamedTemporaryFile()
pw_file = pw_file_handle.name
file_handle = open(pw_file, "w")
args = [
'ipmitool',
'-I', 'lanplus',
'-H', self.info['address'],
'-L', self.info['priv_level'],
'-U', self.info['username'],
'-f', file_handle,
'A', 'B', 'C',
]
mock_support.return_value = False
mock_pwf.return_value = file_handle
mock_exec.return_value = (None, None)
ipmi._exec_ipmitool(self.info, 'A B C')
mock_support.assert_called_once_with('timing')
mock_pwf.assert_called_once_with(self.info['password'])
mock_exec.assert_called_once_with(*args)
self.assertFalse(mock_sleep.called)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(ipmi, '_make_password_file', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test__exec_ipmitool_second_call_to_address_sleep(
self, mock_exec, mock_pwf, mock_support, mock_sleep):
ipmi.LAST_CMD_TIME = {}
pw_file_handle1 = tempfile.NamedTemporaryFile()
pw_file1 = pw_file_handle1.name
file_handle1 = open(pw_file1, "w")
pw_file_handle2 = tempfile.NamedTemporaryFile()
pw_file2 = pw_file_handle2.name
file_handle2 = open(pw_file2, "w")
args = [[
'ipmitool',
'-I', 'lanplus',
'-H', self.info['address'],
'-L', self.info['priv_level'],
'-U', self.info['username'],
'-f', file_handle1,
'A', 'B', 'C',
], [
'ipmitool',
'-I', 'lanplus',
'-H', self.info['address'],
'-L', self.info['priv_level'],
'-U', self.info['username'],
'-f', file_handle2,
'D', 'E', 'F',
]]
expected = [mock.call('timing'),
mock.call('timing')]
mock_support.return_value = False
mock_pwf.side_effect = iter([file_handle1, file_handle2])
mock_exec.side_effect = iter([(None, None), (None, None)])
ipmi._exec_ipmitool(self.info, 'A B C')
mock_exec.assert_called_with(*args[0])
ipmi._exec_ipmitool(self.info, 'D E F')
self.assertTrue(mock_sleep.called)
self.assertEqual(expected, mock_support.call_args_list)
mock_exec.assert_called_with(*args[1])
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(ipmi, '_make_password_file', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test__exec_ipmitool_second_call_to_address_no_sleep(
self, mock_exec, mock_pwf, mock_support, mock_sleep):
ipmi.LAST_CMD_TIME = {}
pw_file_handle1 = tempfile.NamedTemporaryFile()
pw_file1 = pw_file_handle1.name
file_handle1 = open(pw_file1, "w")
pw_file_handle2 = tempfile.NamedTemporaryFile()
pw_file2 = pw_file_handle2.name
file_handle2 = open(pw_file2, "w")
args = [[
'ipmitool',
'-I', 'lanplus',
'-H', self.info['address'],
'-L', self.info['priv_level'],
'-U', self.info['username'],
'-f', file_handle1,
'A', 'B', 'C',
], [
'ipmitool',
'-I', 'lanplus',
'-H', self.info['address'],
'-L', self.info['priv_level'],
'-U', self.info['username'],
'-f', file_handle2,
'D', 'E', 'F',
]]
expected = [mock.call('timing'),
mock.call('timing')]
mock_support.return_value = False
mock_pwf.side_effect = iter([file_handle1, file_handle2])
mock_exec.side_effect = iter([(None, None), (None, None)])
ipmi._exec_ipmitool(self.info, 'A B C')
mock_exec.assert_called_with(*args[0])
# act like enough time has passed
ipmi.LAST_CMD_TIME[self.info['address']] = (
time.time() - CONF.ipmi.min_command_interval)
ipmi._exec_ipmitool(self.info, 'D E F')
self.assertFalse(mock_sleep.called)
self.assertEqual(expected, mock_support.call_args_list)
mock_exec.assert_called_with(*args[1])
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(ipmi, '_make_password_file', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test__exec_ipmitool_two_calls_to_diff_address(
self, mock_exec, mock_pwf, mock_support, mock_sleep):
ipmi.LAST_CMD_TIME = {}
pw_file_handle1 = tempfile.NamedTemporaryFile()
pw_file1 = pw_file_handle1.name
file_handle1 = open(pw_file1, "w")
pw_file_handle2 = tempfile.NamedTemporaryFile()
pw_file2 = pw_file_handle2.name
file_handle2 = open(pw_file2, "w")
args = [[
'ipmitool',
'-I', 'lanplus',
'-H', self.info['address'],
'-L', self.info['priv_level'],
'-U', self.info['username'],
'-f', file_handle1,
'A', 'B', 'C',
], [
'ipmitool',
'-I', 'lanplus',
'-H', '127.127.127.127',
'-L', self.info['priv_level'],
'-U', self.info['username'],
'-f', file_handle2,
'D', 'E', 'F',
]]
expected = [mock.call('timing'),
mock.call('timing')]
mock_support.return_value = False
mock_pwf.side_effect = iter([file_handle1, file_handle2])
mock_exec.side_effect = iter([(None, None), (None, None)])
ipmi._exec_ipmitool(self.info, 'A B C')
mock_exec.assert_called_with(*args[0])
self.info['address'] = '127.127.127.127'
ipmi._exec_ipmitool(self.info, 'D E F')
self.assertFalse(mock_sleep.called)
self.assertEqual(expected, mock_support.call_args_list)
mock_exec.assert_called_with(*args[1])
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(ipmi, '_make_password_file', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test__exec_ipmitool_without_timing(
self, mock_exec, mock_pwf, mock_support, mock_sleep):
pw_file_handle = tempfile.NamedTemporaryFile()
pw_file = pw_file_handle.name
file_handle = open(pw_file, "w")
args = [
'ipmitool',
'-I', 'lanplus',
'-H', self.info['address'],
'-L', self.info['priv_level'],
'-U', self.info['username'],
'-f', file_handle,
'A', 'B', 'C',
]
mock_support.return_value = False
mock_pwf.return_value = file_handle
mock_exec.return_value = (None, None)
ipmi._exec_ipmitool(self.info, 'A B C')
mock_support.assert_called_once_with('timing')
mock_pwf.assert_called_once_with(self.info['password'])
mock_exec.assert_called_once_with(*args)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(ipmi, '_make_password_file', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test__exec_ipmitool_with_timing(
self, mock_exec, mock_pwf, mock_support, mock_sleep):
pw_file_handle = tempfile.NamedTemporaryFile()
pw_file = pw_file_handle.name
file_handle = open(pw_file, "w")
args = [
'ipmitool',
'-I', 'lanplus',
'-H', self.info['address'],
'-L', self.info['priv_level'],
'-U', self.info['username'],
'-R', '12',
'-N', '5',
'-f', file_handle,
'A', 'B', 'C',
]
mock_support.return_value = True
mock_pwf.return_value = file_handle
mock_exec.return_value = (None, None)
ipmi._exec_ipmitool(self.info, 'A B C')
mock_support.assert_called_once_with('timing')
mock_pwf.assert_called_once_with(self.info['password'])
mock_exec.assert_called_once_with(*args)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(ipmi, '_make_password_file', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test__exec_ipmitool_without_username(
self, mock_exec, mock_pwf, mock_support, mock_sleep):
self.info['username'] = None
pw_file_handle = tempfile.NamedTemporaryFile()
pw_file = pw_file_handle.name
file_handle = open(pw_file, "w")
args = [
'ipmitool',
'-I', 'lanplus',
'-H', self.info['address'],
'-L', self.info['priv_level'],
'-f', file_handle,
'A', 'B', 'C',
]
mock_support.return_value = False
mock_pwf.return_value = file_handle
mock_exec.return_value = (None, None)
ipmi._exec_ipmitool(self.info, 'A B C')
mock_support.assert_called_once_with('timing')
self.assertTrue(mock_pwf.called)
mock_exec.assert_called_once_with(*args)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(ipmi, '_make_password_file', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test__exec_ipmitool_with_dual_bridging(self,
mock_exec, mock_pwf,
mock_support,
mock_sleep):
node = obj_utils.get_test_node(self.context, driver='fake_ipmitool',
driver_info=BRIDGE_INFO_DICT)
# when support for dual bridge command is called returns True
mock_support.return_value = True
info = ipmi._parse_driver_info(node)
pw_file_handle = tempfile.NamedTemporaryFile()
pw_file = pw_file_handle.name
file_handle = open(pw_file, "w")
args = [
'ipmitool',
'-I', 'lanplus',
'-H', info['address'],
'-L', info['priv_level'],
'-U', info['username'],
'-m', info['local_address'],
'-B', info['transit_channel'],
'-T', info['transit_address'],
'-b', info['target_channel'],
'-t', info['target_address'],
'-f', file_handle,
'A', 'B', 'C',
]
expected = [mock.call('dual_bridge'),
mock.call('timing')]
# When support for timing command is called returns False
mock_support.return_value = False
mock_pwf.return_value = file_handle
mock_exec.return_value = (None, None)
ipmi._exec_ipmitool(info, 'A B C')
self.assertEqual(expected, mock_support.call_args_list)
self.assertTrue(mock_pwf.called)
mock_exec.assert_called_once_with(*args)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(ipmi, '_make_password_file', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test__exec_ipmitool_with_single_bridging(self,
mock_exec, mock_pwf,
mock_support,
mock_sleep):
single_bridge_info = dict(BRIDGE_INFO_DICT)
single_bridge_info['ipmi_bridging'] = 'single'
node = obj_utils.get_test_node(self.context, driver='fake_ipmitool',
driver_info=single_bridge_info)
# when support for single bridge command is called returns True
mock_support.return_value = True
info = ipmi._parse_driver_info(node)
info['transit_channel'] = info['transit_address'] = None
pw_file_handle = tempfile.NamedTemporaryFile()
pw_file = pw_file_handle.name
file_handle = open(pw_file, "w")
args = [
'ipmitool',
'-I', 'lanplus',
'-H', info['address'],
'-L', info['priv_level'],
'-U', info['username'],
'-m', info['local_address'],
'-b', info['target_channel'],
'-t', info['target_address'],
'-f', file_handle,
'A', 'B', 'C',
]
expected = [mock.call('single_bridge'),
mock.call('timing')]
# When support for timing command is called returns False
mock_support.return_value = False
mock_pwf.return_value = file_handle
mock_exec.return_value = (None, None)
ipmi._exec_ipmitool(info, 'A B C')
self.assertEqual(expected, mock_support.call_args_list)
self.assertTrue(mock_pwf.called)
mock_exec.assert_called_once_with(*args)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(ipmi, '_make_password_file', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test__exec_ipmitool_exception(
self, mock_exec, mock_pwf, mock_support, mock_sleep):
pw_file_handle = tempfile.NamedTemporaryFile()
pw_file = pw_file_handle.name
file_handle = open(pw_file, "w")
args = [
'ipmitool',
'-I', 'lanplus',
'-H', self.info['address'],
'-L', self.info['priv_level'],
'-U', self.info['username'],
'-f', file_handle,
'A', 'B', 'C',
]
mock_support.return_value = False
mock_pwf.return_value = file_handle
mock_exec.side_effect = iter([processutils.ProcessExecutionError("x")])
self.assertRaises(processutils.ProcessExecutionError,
ipmi._exec_ipmitool,
self.info, 'A B C')
mock_support.assert_called_once_with('timing')
mock_pwf.assert_called_once_with(self.info['password'])
mock_exec.assert_called_once_with(*args)
self.assertEqual(1, mock_exec.call_count)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test__exec_ipmitool_exception_retry(
self, mock_exec, mock_support, mock_sleep):
ipmi.LAST_CMD_TIME = {}
mock_support.return_value = False
mock_exec.side_effect = iter([
processutils.ProcessExecutionError(
stderr="insufficient resources for session"
),
(None, None)
])
# Directly set the configuration values such that
# the logic will cause _exec_ipmitool to retry twice.
self.config(min_command_interval=1, group='ipmi')
self.config(retry_timeout=2, group='ipmi')
ipmi._exec_ipmitool(self.info, 'A B C')
mock_support.assert_called_once_with('timing')
self.assertEqual(2, mock_exec.call_count)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test__exec_ipmitool_exception_retries_exceeded(
self, mock_exec, mock_support, mock_sleep):
ipmi.LAST_CMD_TIME = {}
mock_support.return_value = False
mock_exec.side_effect = iter([processutils.ProcessExecutionError(
stderr="insufficient resources for session"
)])
# Directly set the configuration values such that
# the logic will cause _exec_ipmitool to timeout.
self.config(min_command_interval=1, group='ipmi')
self.config(retry_timeout=1, group='ipmi')
self.assertRaises(processutils.ProcessExecutionError,
ipmi._exec_ipmitool,
self.info, 'A B C')
mock_support.assert_called_once_with('timing')
self.assertEqual(1, mock_exec.call_count)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test__exec_ipmitool_exception_non_retryable_failure(
self, mock_exec, mock_support, mock_sleep):
ipmi.LAST_CMD_TIME = {}
mock_support.return_value = False
# Return a retryable error, then an error that cannot
# be retried thus resulting in a single retry
# attempt by _exec_ipmitool.
mock_exec.side_effect = iter([
processutils.ProcessExecutionError(
stderr="insufficient resources for session"
),
processutils.ProcessExecutionError(
stderr="Unknown"
),
])
# Directly set the configuration values such that
# the logic will cause _exec_ipmitool to retry up
# to 3 times.
self.config(min_command_interval=1, group='ipmi')
self.config(retry_timeout=3, group='ipmi')
self.assertRaises(processutils.ProcessExecutionError,
ipmi._exec_ipmitool,
self.info, 'A B C')
mock_support.assert_called_once_with('timing')
self.assertEqual(2, mock_exec.call_count)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(ipmi, '_make_password_file', autospec=True)
@mock.patch.object(utils, 'execute', autospec=True)
def test__exec_ipmitool_IPMI_version_1_5(
self, mock_exec, mock_pwf, mock_support, mock_sleep):
self.info['protocol_version'] = '1.5'
# Assert it uses "-I lan" (1.5) instead of "-I lanplus" (2.0)
args = [
'ipmitool',
'-I', 'lan',
'-H', self.info['address'],
'-L', self.info['priv_level'],
'-U', self.info['username'],
'-f', mock.ANY,
'A', 'B', 'C',
]
mock_support.return_value = False
mock_exec.return_value = (None, None)
ipmi._exec_ipmitool(self.info, 'A B C')
mock_support.assert_called_once_with('timing')
self.assertTrue(mock_pwf.called)
mock_exec.assert_called_once_with(*args)
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test__power_status_on(self, mock_exec, mock_sleep):
mock_exec.return_value = ["Chassis Power is on\n", None]
state = ipmi._power_status(self.info)
mock_exec.assert_called_once_with(self.info, "power status")
self.assertEqual(states.POWER_ON, state)
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test__power_status_off(self, mock_exec, mock_sleep):
mock_exec.return_value = ["Chassis Power is off\n", None]
state = ipmi._power_status(self.info)
mock_exec.assert_called_once_with(self.info, "power status")
self.assertEqual(states.POWER_OFF, state)
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test__power_status_error(self, mock_exec, mock_sleep):
mock_exec.return_value = ["Chassis Power is badstate\n", None]
state = ipmi._power_status(self.info)
mock_exec.assert_called_once_with(self.info, "power status")
self.assertEqual(states.ERROR, state)
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test__power_status_exception(self, mock_exec, mock_sleep):
mock_exec.side_effect = iter(
[processutils.ProcessExecutionError("error")])
self.assertRaises(exception.IPMIFailure,
ipmi._power_status,
self.info)
mock_exec.assert_called_once_with(self.info, "power status")
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
@mock.patch('eventlet.greenthread.sleep', autospec=True)
def test__power_on_max_retries(self, sleep_mock, mock_exec, mock_sleep):
self.config(retry_timeout=2, group='ipmi')
def side_effect(driver_info, command):
resp_dict = {"power status": ["Chassis Power is off\n", None],
"power on": [None, None]}
return resp_dict.get(command, ["Bad\n", None])
mock_exec.side_effect = side_effect
expected = [mock.call(self.info, "power on"),
mock.call(self.info, "power status"),
mock.call(self.info, "power status")]
state = ipmi._power_on(self.info)
self.assertEqual(mock_exec.call_args_list, expected)
self.assertEqual(states.ERROR, state)
class IPMIToolDriverTestCase(db_base.DbTestCase):
def setUp(self):
super(IPMIToolDriverTestCase, self).setUp()
mgr_utils.mock_the_extension_manager(driver="fake_ipmitool")
self.driver = driver_factory.get_driver("fake_ipmitool")
self.node = obj_utils.create_test_node(self.context,
driver='fake_ipmitool',
driver_info=INFO_DICT)
self.info = ipmi._parse_driver_info(self.node)
@mock.patch.object(ipmi, "_parse_driver_info", autospec=True)
def test_power_validate(self, mock_parse):
node = obj_utils.get_test_node(self.context, driver='fake_ipmitool',
driver_info=INFO_DICT)
mock_parse.return_value = {}
with task_manager.acquire(self.context, node.uuid) as task:
task.driver.power.validate(task)
mock_parse.assert_called_once_with(mock.ANY)
def test_get_properties(self):
expected = ipmi.COMMON_PROPERTIES
self.assertEqual(expected, self.driver.power.get_properties())
expected = list(ipmi.COMMON_PROPERTIES) + list(ipmi.CONSOLE_PROPERTIES)
self.assertEqual(sorted(expected),
sorted(self.driver.console.get_properties().keys()))
self.assertEqual(sorted(expected),
sorted(self.driver.get_properties().keys()))
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test_get_power_state(self, mock_exec):
returns = iter([["Chassis Power is off\n", None],
["Chassis Power is on\n", None],
["\n", None]])
expected = [mock.call(self.info, "power status"),
mock.call(self.info, "power status"),
mock.call(self.info, "power status")]
mock_exec.side_effect = returns
with task_manager.acquire(self.context, self.node.uuid) as task:
pstate = self.driver.power.get_power_state(task)
self.assertEqual(states.POWER_OFF, pstate)
pstate = self.driver.power.get_power_state(task)
self.assertEqual(states.POWER_ON, pstate)
pstate = self.driver.power.get_power_state(task)
self.assertEqual(states.ERROR, pstate)
self.assertEqual(mock_exec.call_args_list, expected)
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test_get_power_state_exception(self, mock_exec):
mock_exec.side_effect = iter(
[processutils.ProcessExecutionError("error")])
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.IPMIFailure,
self.driver.power.get_power_state,
task)
mock_exec.assert_called_once_with(self.info, "power status")
@mock.patch.object(ipmi, '_power_on', autospec=True)
@mock.patch.object(ipmi, '_power_off', autospec=True)
def test_set_power_on_ok(self, mock_off, mock_on):
self.config(retry_timeout=0, group='ipmi')
mock_on.return_value = states.POWER_ON
with task_manager.acquire(self.context,
self.node['uuid']) as task:
self.driver.power.set_power_state(task,
states.POWER_ON)
mock_on.assert_called_once_with(self.info)
self.assertFalse(mock_off.called)
@mock.patch.object(ipmi, '_power_on', autospec=True)
@mock.patch.object(ipmi, '_power_off', autospec=True)
def test_set_power_off_ok(self, mock_off, mock_on):
self.config(retry_timeout=0, group='ipmi')
mock_off.return_value = states.POWER_OFF
with task_manager.acquire(self.context,
self.node['uuid']) as task:
self.driver.power.set_power_state(task,
states.POWER_OFF)
mock_off.assert_called_once_with(self.info)
self.assertFalse(mock_on.called)
@mock.patch.object(ipmi, '_power_on', autospec=True)
@mock.patch.object(ipmi, '_power_off', autospec=True)
def test_set_power_on_fail(self, mock_off, mock_on):
self.config(retry_timeout=0, group='ipmi')
mock_on.return_value = states.ERROR
with task_manager.acquire(self.context,
self.node['uuid']) as task:
self.assertRaises(exception.PowerStateFailure,
self.driver.power.set_power_state,
task,
states.POWER_ON)
mock_on.assert_called_once_with(self.info)
self.assertFalse(mock_off.called)
def test_set_power_invalid_state(self):
with task_manager.acquire(self.context, self.node['uuid']) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.power.set_power_state,
task,
"fake state")
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test_send_raw_bytes_ok(self, mock_exec):
mock_exec.return_value = [None, None]
with task_manager.acquire(self.context,
self.node['uuid']) as task:
self.driver.vendor.send_raw(task, http_method='POST',
raw_bytes='0x00 0x01')
mock_exec.assert_called_once_with(self.info, 'raw 0x00 0x01')
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test_send_raw_bytes_fail(self, mock_exec):
mock_exec.side_effect = iter(
[exception.PasswordFileFailedToCreate('error')])
with task_manager.acquire(self.context,
self.node['uuid']) as task:
self.assertRaises(exception.IPMIFailure,
self.driver.vendor.send_raw,
task,
http_method='POST',
raw_bytes='0x00 0x01')
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test__bmc_reset_ok(self, mock_exec):
mock_exec.return_value = [None, None]
with task_manager.acquire(self.context,
self.node['uuid']) as task:
self.driver.vendor.bmc_reset(task, 'POST')
mock_exec.assert_called_once_with(self.info, 'bmc reset warm')
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test__bmc_reset_cold(self, mock_exec):
mock_exec.return_value = [None, None]
with task_manager.acquire(self.context,
self.node['uuid']) as task:
self.driver.vendor.bmc_reset(task, 'POST', warm=False)
mock_exec.assert_called_once_with(self.info, 'bmc reset cold')
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test__bmc_reset_fail(self, mock_exec):
mock_exec.side_effect = iter([processutils.ProcessExecutionError()])
with task_manager.acquire(self.context,
self.node['uuid']) as task:
self.assertRaises(exception.IPMIFailure,
self.driver.vendor.bmc_reset,
task, 'POST')
@mock.patch.object(ipmi, '_power_off', spec_set=types.FunctionType)
@mock.patch.object(ipmi, '_power_on', spec_set=types.FunctionType)
def test_reboot_ok(self, mock_on, mock_off):
manager = mock.MagicMock()
# NOTE(rloo): if autospec is True, then manager.mock_calls is empty
mock_on.return_value = states.POWER_ON
manager.attach_mock(mock_off, 'power_off')
manager.attach_mock(mock_on, 'power_on')
expected = [mock.call.power_off(self.info),
mock.call.power_on(self.info)]
with task_manager.acquire(self.context,
self.node['uuid']) as task:
self.driver.power.reboot(task)
self.assertEqual(manager.mock_calls, expected)
@mock.patch.object(ipmi, '_power_off', spec_set=types.FunctionType)
@mock.patch.object(ipmi, '_power_on', spec_set=types.FunctionType)
def test_reboot_fail(self, mock_on, mock_off):
manager = mock.MagicMock()
# NOTE(rloo): if autospec is True, then manager.mock_calls is empty
mock_on.return_value = states.ERROR
manager.attach_mock(mock_off, 'power_off')
manager.attach_mock(mock_on, 'power_on')
expected = [mock.call.power_off(self.info),
mock.call.power_on(self.info)]
with task_manager.acquire(self.context,
self.node['uuid']) as task:
self.assertRaises(exception.PowerStateFailure,
self.driver.power.reboot,
task)
self.assertEqual(manager.mock_calls, expected)
@mock.patch.object(ipmi, '_parse_driver_info', autospec=True)
def test_vendor_passthru_validate__parse_driver_info_fail(self, info_mock):
info_mock.side_effect = iter([exception.InvalidParameterValue("bad")])
with task_manager.acquire(self.context, self.node['uuid']) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.vendor.validate,
task, method='send_raw', raw_bytes='0x00 0x01')
info_mock.assert_called_once_with(task.node)
def test_vendor_passthru_validate__send_raw_bytes_good(self):
with task_manager.acquire(self.context, self.node['uuid']) as task:
self.driver.vendor.validate(task,
method='send_raw',
http_method='POST',
raw_bytes='0x00 0x01')
def test_vendor_passthru_validate__send_raw_bytes_fail(self):
with task_manager.acquire(self.context, self.node['uuid']) as task:
self.assertRaises(exception.MissingParameterValue,
self.driver.vendor.validate,
task, method='send_raw')
@mock.patch.object(ipmi.VendorPassthru, 'send_raw', autospec=True)
def test_vendor_passthru_call_send_raw_bytes(self, raw_bytes_mock):
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.driver.vendor.send_raw(task, http_method='POST',
raw_bytes='0x00 0x01')
raw_bytes_mock.assert_called_once_with(
self.driver.vendor, task, http_method='POST',
raw_bytes='0x00 0x01')
def test_vendor_passthru_validate__bmc_reset_good(self):
with task_manager.acquire(self.context, self.node['uuid']) as task:
self.driver.vendor.validate(task,
method='bmc_reset')
def test_vendor_passthru_validate__bmc_reset_warm_good(self):
with task_manager.acquire(self.context, self.node['uuid']) as task:
self.driver.vendor.validate(task,
method='bmc_reset',
warm=True)
def test_vendor_passthru_validate__bmc_reset_cold_good(self):
with task_manager.acquire(self.context, self.node['uuid']) as task:
self.driver.vendor.validate(task,
method='bmc_reset',
warm=False)
@mock.patch.object(ipmi.VendorPassthru, 'bmc_reset', autospec=True)
def test_vendor_passthru_call_bmc_reset_warm(self, bmc_mock):
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.driver.vendor.bmc_reset(task, 'POST', warm=True)
bmc_mock.assert_called_once_with(
self.driver.vendor, task, 'POST', warm=True)
@mock.patch.object(ipmi.VendorPassthru, 'bmc_reset', autospec=True)
def test_vendor_passthru_call_bmc_reset_cold(self, bmc_mock):
with task_manager.acquire(self.context, self.node['uuid'],
shared=False) as task:
self.driver.vendor.bmc_reset(task, 'POST', warm=False)
bmc_mock.assert_called_once_with(
self.driver.vendor, task, 'POST', warm=False)
def test_vendor_passthru_vendor_routes(self):
expected = ['send_raw', 'bmc_reset']
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
vendor_routes = task.driver.vendor.vendor_routes
self.assertIsInstance(vendor_routes, dict)
self.assertEqual(sorted(expected), sorted(vendor_routes))
def test_vendor_passthru_driver_routes(self):
with task_manager.acquire(self.context, self.node.uuid,
shared=True) as task:
driver_routes = task.driver.vendor.driver_routes
self.assertIsInstance(driver_routes, dict)
self.assertEqual({}, driver_routes)
def test_console_validate(self):
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
task.node.driver_info['ipmi_terminal_port'] = 123
task.driver.console.validate(task)
def test_console_validate_missing_port(self):
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
task.node.driver_info.pop('ipmi_terminal_port', None)
self.assertRaises(exception.MissingParameterValue,
task.driver.console.validate, task)
def test_console_validate_wrong_ipmi_protocol_version(self):
with task_manager.acquire(
self.context, self.node.uuid, shared=True) as task:
task.node.driver_info['ipmi_terminal_port'] = 123
task.node.driver_info['ipmi_protocol_version'] = '1.5'
self.assertRaises(exception.InvalidParameterValue,
task.driver.console.validate, task)
@mock.patch.object(console_utils, 'start_shellinabox_console',
autospec=True)
def test_start_console(self, mock_exec):
mock_exec.return_value = None
with task_manager.acquire(self.context,
self.node['uuid']) as task:
self.driver.console.start_console(task)
mock_exec.assert_called_once_with(self.info['uuid'],
self.info['port'],
mock.ANY)
self.assertTrue(mock_exec.called)
@mock.patch.object(console_utils, 'start_shellinabox_console',
autospec=True)
def test_start_console_fail(self, mock_exec):
mock_exec.side_effect = iter(
[exception.ConsoleSubprocessFailed(error='error')])
with task_manager.acquire(self.context,
self.node['uuid']) as task:
self.assertRaises(exception.ConsoleSubprocessFailed,
self.driver.console.start_console,
task)
@mock.patch.object(console_utils, 'start_shellinabox_console',
autospec=True)
def test_start_console_fail_nodir(self, mock_exec):
mock_exec.side_effect = iter([exception.ConsoleError()])
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.assertRaises(exception.ConsoleError,
self.driver.console.start_console,
task)
mock_exec.assert_called_once_with(self.node.uuid, mock.ANY, mock.ANY)
@mock.patch.object(console_utils, 'stop_shellinabox_console',
autospec=True)
def test_stop_console(self, mock_exec):
mock_exec.return_value = None
with task_manager.acquire(self.context,
self.node['uuid']) as task:
self.driver.console.stop_console(task)
mock_exec.assert_called_once_with(self.info['uuid'])
self.assertTrue(mock_exec.called)
@mock.patch.object(console_utils, 'stop_shellinabox_console',
autospec=True)
def test_stop_console_fail(self, mock_stop):
mock_stop.side_effect = iter([exception.ConsoleError()])
with task_manager.acquire(self.context,
self.node.uuid) as task:
self.assertRaises(exception.ConsoleError,
self.driver.console.stop_console,
task)
mock_stop.assert_called_once_with(self.node.uuid)
@mock.patch.object(console_utils, 'get_shellinabox_console_url',
autospec=True)
def test_get_console(self, mock_exec):
url = 'http://localhost:4201'
mock_exec.return_value = url
expected = {'type': 'shellinabox', 'url': url}
with task_manager.acquire(self.context,
self.node['uuid']) as task:
console_info = self.driver.console.get_console(task)
self.assertEqual(expected, console_info)
mock_exec.assert_called_once_with(self.info['port'])
self.assertTrue(mock_exec.called)
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test_management_interface_set_boot_device_ok(self, mock_exec):
mock_exec.return_value = [None, None]
with task_manager.acquire(self.context, self.node.uuid) as task:
self.driver.management.set_boot_device(task, boot_devices.PXE)
mock_calls = [mock.call(self.info, "raw 0x00 0x08 0x03 0x08"),
mock.call(self.info, "chassis bootdev pxe")]
mock_exec.assert_has_calls(mock_calls)
def test_management_interface_set_boot_device_bad_device(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.InvalidParameterValue,
self.driver.management.set_boot_device,
task, 'fake-device')
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test_management_interface_set_boot_device_exec_failed(self, mock_exec):
mock_exec.side_effect = iter([processutils.ProcessExecutionError()])
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(exception.IPMIFailure,
self.driver.management.set_boot_device,
task, boot_devices.PXE)
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test_management_interface_set_boot_device_unknown_exception(self,
mock_exec):
class FakeException(Exception):
pass
mock_exec.side_effect = iter([FakeException('boom')])
with task_manager.acquire(self.context, self.node.uuid) as task:
self.assertRaises(FakeException,
self.driver.management.set_boot_device,
task, boot_devices.PXE)
def test_management_interface_get_supported_boot_devices(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
expected = [boot_devices.PXE, boot_devices.DISK,
boot_devices.CDROM, boot_devices.BIOS,
boot_devices.SAFE]
self.assertEqual(sorted(expected), sorted(task.driver.management.
get_supported_boot_devices()))
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test_management_interface_get_boot_device(self, mock_exec):
# output, expected boot device
bootdevs = [('Boot Device Selector : '
'Force Boot from default Hard-Drive\n',
boot_devices.DISK),
('Boot Device Selector : '
'Force Boot from default Hard-Drive, request Safe-Mode\n',
boot_devices.SAFE),
('Boot Device Selector : '
'Force Boot into BIOS Setup\n',
boot_devices.BIOS),
('Boot Device Selector : '
'Force PXE\n',
boot_devices.PXE),
('Boot Device Selector : '
'Force Boot from CD/DVD\n',
boot_devices.CDROM)]
with task_manager.acquire(self.context, self.node.uuid) as task:
for out, expected_device in bootdevs:
mock_exec.return_value = (out, '')
expected_response = {'boot_device': expected_device,
'persistent': False}
self.assertEqual(expected_response,
task.driver.management.get_boot_device(task))
mock_exec.assert_called_with(mock.ANY,
"chassis bootparam get 5")
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test_management_interface_get_boot_device_unknown_dev(self, mock_exec):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_exec.return_value = ('Boot Device Selector : Fake\n', '')
response = task.driver.management.get_boot_device(task)
self.assertIsNone(response['boot_device'])
mock_exec.assert_called_with(mock.ANY, "chassis bootparam get 5")
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test_management_interface_get_boot_device_fail(self, mock_exec):
with task_manager.acquire(self.context, self.node.uuid) as task:
mock_exec.side_effect = iter(
[processutils.ProcessExecutionError()])
self.assertRaises(exception.IPMIFailure,
task.driver.management.get_boot_device, task)
mock_exec.assert_called_with(mock.ANY, "chassis bootparam get 5")
@mock.patch.object(ipmi, '_exec_ipmitool', autospec=True)
def test_management_interface_get_boot_device_persistent(self, mock_exec):
outputs = [('Options apply to only next boot\n'
'Boot Device Selector : Force PXE\n',
False),
('Options apply to all future boots\n'
'Boot Device Selector : Force PXE\n',
True)]
with task_manager.acquire(self.context, self.node.uuid) as task:
for out, expected_persistent in outputs:
mock_exec.return_value = (out, '')
expected_response = {'boot_device': boot_devices.PXE,
'persistent': expected_persistent}
self.assertEqual(expected_response,
task.driver.management.get_boot_device(task))
mock_exec.assert_called_with(mock.ANY,
"chassis bootparam get 5")
def test_management_interface_validate_good(self):
with task_manager.acquire(self.context, self.node.uuid) as task:
task.driver.management.validate(task)
def test_management_interface_validate_fail(self):
# Missing IPMI driver_info information
node = obj_utils.create_test_node(self.context,
uuid=uuidutils.generate_uuid(),
driver='fake_ipmitool')
with task_manager.acquire(self.context, node.uuid) as task:
self.assertRaises(exception.MissingParameterValue,
task.driver.management.validate, task)
def test__parse_ipmi_sensor_data_ok(self):
fake_sensors_data = """
Sensor ID : Temp (0x1)
Entity ID : 3.1 (Processor)
Sensor Type (Analog) : Temperature
Sensor Reading : -58 (+/- 1) degrees C
Status : ok
Nominal Reading : 50.000
Normal Minimum : 11.000
Normal Maximum : 69.000
Upper critical : 90.000
Upper non-critical : 85.000
Positive Hysteresis : 1.000
Negative Hysteresis : 1.000
Sensor ID : Temp (0x2)
Entity ID : 3.2 (Processor)
Sensor Type (Analog) : Temperature
Sensor Reading : 50 (+/- 1) degrees C
Status : ok
Nominal Reading : 50.000
Normal Minimum : 11.000
Normal Maximum : 69.000
Upper critical : 90.000
Upper non-critical : 85.000
Positive Hysteresis : 1.000
Negative Hysteresis : 1.000
Sensor ID : FAN MOD 1A RPM (0x30)
Entity ID : 7.1 (System Board)
Sensor Type (Analog) : Fan
Sensor Reading : 8400 (+/- 75) RPM
Status : ok
Nominal Reading : 5325.000
Normal Minimum : 10425.000
Normal Maximum : 14775.000
Lower critical : 4275.000
Positive Hysteresis : 375.000
Negative Hysteresis : 375.000
Sensor ID : FAN MOD 1B RPM (0x31)
Entity ID : 7.1 (System Board)
Sensor Type (Analog) : Fan
Sensor Reading : 8550 (+/- 75) RPM
Status : ok
Nominal Reading : 7800.000
Normal Minimum : 10425.000
Normal Maximum : 14775.000
Lower critical : 4275.000
Positive Hysteresis : 375.000
Negative Hysteresis : 375.000
"""
expected_return = {
'Fan': {
'FAN MOD 1A RPM (0x30)': {
'Status': 'ok',
'Sensor Reading': '8400 (+/- 75) RPM',
'Entity ID': '7.1 (System Board)',
'Normal Minimum': '10425.000',
'Positive Hysteresis': '375.000',
'Normal Maximum': '14775.000',
'Sensor Type (Analog)': 'Fan',
'Lower critical': '4275.000',
'Negative Hysteresis': '375.000',
'Sensor ID': 'FAN MOD 1A RPM (0x30)',
'Nominal Reading': '5325.000'
},
'FAN MOD 1B RPM (0x31)': {
'Status': 'ok',
'Sensor Reading': '8550 (+/- 75) RPM',
'Entity ID': '7.1 (System Board)',
'Normal Minimum': '10425.000',
'Positive Hysteresis': '375.000',
'Normal Maximum': '14775.000',
'Sensor Type (Analog)': 'Fan',
'Lower critical': '4275.000',
'Negative Hysteresis': '375.000',
'Sensor ID': 'FAN MOD 1B RPM (0x31)',
'Nominal Reading': '7800.000'
}
},
'Temperature': {
'Temp (0x1)': {
'Status': 'ok',
'Sensor Reading': '-58 (+/- 1) degrees C',
'Entity ID': '3.1 (Processor)',
'Normal Minimum': '11.000',
'Positive Hysteresis': '1.000',
'Upper non-critical': '85.000',
'Normal Maximum': '69.000',
'Sensor Type (Analog)': 'Temperature',
'Negative Hysteresis': '1.000',
'Upper critical': '90.000',
'Sensor ID': 'Temp (0x1)',
'Nominal Reading': '50.000'
},
'Temp (0x2)': {
'Status': 'ok',
'Sensor Reading': '50 (+/- 1) degrees C',
'Entity ID': '3.2 (Processor)',
'Normal Minimum': '11.000',
'Positive Hysteresis': '1.000',
'Upper non-critical': '85.000',
'Normal Maximum': '69.000',
'Sensor Type (Analog)': 'Temperature',
'Negative Hysteresis': '1.000',
'Upper critical': '90.000',
'Sensor ID': 'Temp (0x2)',
'Nominal Reading': '50.000'
}
}
}
ret = ipmi._parse_ipmi_sensors_data(self.node, fake_sensors_data)
self.assertEqual(expected_return, ret)
def test__parse_ipmi_sensor_data_missing_sensor_reading(self):
fake_sensors_data = """
Sensor ID : Temp (0x1)
Entity ID : 3.1 (Processor)
Sensor Type (Analog) : Temperature
Status : ok
Nominal Reading : 50.000
Normal Minimum : 11.000
Normal Maximum : 69.000
Upper critical : 90.000
Upper non-critical : 85.000
Positive Hysteresis : 1.000
Negative Hysteresis : 1.000
Sensor ID : Temp (0x2)
Entity ID : 3.2 (Processor)
Sensor Type (Analog) : Temperature
Sensor Reading : 50 (+/- 1) degrees C
Status : ok
Nominal Reading : 50.000
Normal Minimum : 11.000
Normal Maximum : 69.000
Upper critical : 90.000
Upper non-critical : 85.000
Positive Hysteresis : 1.000
Negative Hysteresis : 1.000
Sensor ID : FAN MOD 1A RPM (0x30)
Entity ID : 7.1 (System Board)
Sensor Type (Analog) : Fan
Sensor Reading : 8400 (+/- 75) RPM
Status : ok
Nominal Reading : 5325.000
Normal Minimum : 10425.000
Normal Maximum : 14775.000
Lower critical : 4275.000
Positive Hysteresis : 375.000
Negative Hysteresis : 375.000
"""
expected_return = {
'Fan': {
'FAN MOD 1A RPM (0x30)': {
'Status': 'ok',
'Sensor Reading': '8400 (+/- 75) RPM',
'Entity ID': '7.1 (System Board)',
'Normal Minimum': '10425.000',
'Positive Hysteresis': '375.000',
'Normal Maximum': '14775.000',
'Sensor Type (Analog)': 'Fan',
'Lower critical': '4275.000',
'Negative Hysteresis': '375.000',
'Sensor ID': 'FAN MOD 1A RPM (0x30)',
'Nominal Reading': '5325.000'
}
},
'Temperature': {
'Temp (0x2)': {
'Status': 'ok',
'Sensor Reading': '50 (+/- 1) degrees C',
'Entity ID': '3.2 (Processor)',
'Normal Minimum': '11.000',
'Positive Hysteresis': '1.000',
'Upper non-critical': '85.000',
'Normal Maximum': '69.000',
'Sensor Type (Analog)': 'Temperature',
'Negative Hysteresis': '1.000',
'Upper critical': '90.000',
'Sensor ID': 'Temp (0x2)',
'Nominal Reading': '50.000'
}
}
}
ret = ipmi._parse_ipmi_sensors_data(self.node, fake_sensors_data)
self.assertEqual(expected_return, ret)
def test__parse_ipmi_sensor_data_failed(self):
fake_sensors_data = "abcdef"
self.assertRaises(exception.FailedToParseSensorData,
ipmi._parse_ipmi_sensors_data,
self.node,
fake_sensors_data)
fake_sensors_data = "abc:def:ghi"
self.assertRaises(exception.FailedToParseSensorData,
ipmi._parse_ipmi_sensors_data,
self.node,
fake_sensors_data)
| 44.59081 | 79 | 0.591091 |
ffb4fe8ae655f460a834d783f0140ecb51d9c970 | 8,023 | py | Python | docs/conf.py | dante381/addons-server | 9702860a19ecca1cb4e4998f37bc43c1b2dd3aa7 | [
"BSD-3-Clause"
] | null | null | null | docs/conf.py | dante381/addons-server | 9702860a19ecca1cb4e4998f37bc43c1b2dd3aa7 | [
"BSD-3-Clause"
] | 1,884 | 2020-06-26T13:41:51.000Z | 2022-03-30T22:00:45.000Z | docs/conf.py | dante381/addons-server | 9702860a19ecca1cb4e4998f37bc43c1b2dd3aa7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# zamboni documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 22 20:39:35 2009.
#
# This file is execfile()d with the current directory set to its containing
# dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.abspath('.'))
sys.path.append(os.path.abspath('..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.intersphinx', 'sphinx.ext.todo',
'sphinx.ext.coverage', 'extensions.src_role',
'sphinxcontrib.httpdomain']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'addons-server'
copyright = u'2021, Mozilla'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
# version = '3.0'
# The full version.
# release = '3.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directory, that shouldn't be searched
# for source files.
exclude_trees = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
#html_theme_path = ['_themes']
#html_theme = 'mozilla'
# on_rtd is whether we are on readthedocs.org, this line of code grabbed
# from docs.readthedocs.io
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify
# it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'olympiadoc'
# -- Options for LaTeX output -------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples (source start
# file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'olympia.tex', u'olympia Documentation', u'Mozilla Addons Team',
'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_use_modindex = True
# Intersphinx links to the local _intersphinx cache.
intersphinx_mapping = {
'http://docs.python.org/': 'python.inv',
'http://docs.djangoproject.com/en/dev': 'django.inv',
'http://jinja.pocoo.org/2/documentation/': 'jinja.inv',
'http://sphinx.pocoo.org/': 'jinja.inv',
}
for key, val in intersphinx_mapping.items():
intersphinx_mapping[key] = '_intersphinx/' + val
# Root url where source files can be browsed online.
src_base_url = 'http://github.com/mozilla/olympia/tree/master/'
# Ignore missing targets for the http:obj <type>, it's how we declare the types
# for input/output fields in the API docs.
nitpick_ignore = [
('http:obj', 'array'),
('http:obj', 'boolean'),
('http:obj', 'int'),
('http:obj', 'float'),
('http:obj', 'object'),
('http:obj', 'object|null'),
('http:obj', 'string'),
('http:obj', 'string|object|null'),
('http:obj', 'string|object'),
('http:obj', 'string|null'),
('http:obj', 'array|null'),
('http:obj', 'int|null'),
]
| 32.613821 | 79 | 0.708463 |
5a5545ad6294fe0829fa96f8162c48573833cd38 | 178 | py | Python | server/config.py | bgarrofe/essay-grader | 30e51e0c580beba4ef99df0ba2ad0fcdb1b634d3 | [
"MIT"
] | 1 | 2021-08-08T21:43:36.000Z | 2021-08-08T21:43:36.000Z | server/config.py | bgarrofe/essay-grader | 30e51e0c580beba4ef99df0ba2ad0fcdb1b634d3 | [
"MIT"
] | null | null | null | server/config.py | bgarrofe/essay-grader | 30e51e0c580beba4ef99df0ba2ad0fcdb1b634d3 | [
"MIT"
] | null | null | null | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config(object):
SECRET_KEY = os.environ.get(
'SECRET_KEY') or 'WTWl6UC|<_=EWP^gaU*f%IcFDB=J#*'
| 22.25 | 57 | 0.679775 |
ecf8b2701b44a5eb64cffb1f67fc2af414757e6d | 228 | py | Python | message.py | fivunlm/sbb8 | 9493cfb9d799b57ae4b3bb8a44672fa92736881e | [
"MIT"
] | null | null | null | message.py | fivunlm/sbb8 | 9493cfb9d799b57ae4b3bb8a44672fa92736881e | [
"MIT"
] | null | null | null | message.py | fivunlm/sbb8 | 9493cfb9d799b57ae4b3bb8a44672fa92736881e | [
"MIT"
] | null | null | null | class Message:
class Kind:
TEXT = 100
COMMAND = 200
def __init__(self, kind=Kind.TEXT, payload=None, channel=None):
self.kind = kind
self.payload = payload
self.channel = channel
| 22.8 | 67 | 0.592105 |
40b66251a96f4ab60da5edcd9b2f61cd3861893e | 8,825 | py | Python | scripts/old_run_snorkel.py | liuzi/weak_supervision | 3a0ef4cdb15455df013e96b4c8392231c41e800f | [
"MIT"
] | null | null | null | scripts/old_run_snorkel.py | liuzi/weak_supervision | 3a0ef4cdb15455df013e96b4c8392231c41e800f | [
"MIT"
] | null | null | null | scripts/old_run_snorkel.py | liuzi/weak_supervision | 3a0ef4cdb15455df013e96b4c8392231c41e800f | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import sys
# from os import listdir
# from os.path import isfile,
from os.path import join
import os
# from pathlib import Path
# import re
# import argparse
# import inspect
# import textwrap
# import pickle
import sklearn
from datetime import datetime
# for cleaning discharge summaries
import nltk
from nltk.corpus import stopwords
# for label models
from snorkel.labeling.model import LabelModel
from snorkel.labeling.model import MajorityLabelVoter
from sklearn import metrics
# for labeling functions
from snorkel.labeling import labeling_function
from snorkel.labeling.lf.nlp import nlp_labeling_function
from nltk.tokenize import RegexpTokenizer
from snorkel.labeling import PandasLFApplier
from snorkel.labeling import LFAnalysis
sys.path.insert(1, '../utils')
from tools import append_csv_bydf, create_folder
'''
import models and rules
'''
# ['linear','poly','sigmoid','rbf']
########################
### models and rules ###
########################
##############
### models ###
##############
sys.path.insert(1, '../models')
from pretrained_label_functions import *
# from pretrained_label_functions import lf_model_svm_linear
# from pretrained_label_functions import lf_model_svm_poly
# from pretrained_label_functions import lf_model_svm_rbf
# from pretrained_label_functions import lf_model_svm_sigmoid
# from pretrained_label_functions import lf_model_rfc
from utils import *
lf_models=[lf_model_svm_linear, lf_model_svm_poly,
lf_model_svm_rbf, lf_model_svm_sigmoid, lf_model_rfc]
# models_dict = {
# "1": lf_model_svm_ade_only_linear}
models_dict_desc = {
"0": "No Models Selected",
"1": "ADE-Only Prediction using Linear SVM",
"2": "ADE-Only Prediction using Polynomial SVM",
"3": "ADE-Only Prediction using Sigmoid SVM",
"4": "ADE-Only Prediction using RBF SVM",
}
#############
### rules ###
#############
# TODO: put rules together into one file
sys.path.insert(1, '../rules')
from rule1 import lf_ade_drug_single
from rule2 import lf_ade_drug_pair
from rule3 import lf_ade_drug_pair_lem
from rule4 import lf_ade_drug_pair_lem_keyword_triggers
from rule5 import lf_sider2_triggers
from rule6 import lf_sider2_triggers_25words
from rule7 import lf_semmeddb_triggers
from rule8 import lf_keyword_triggers
from rule9 import lf_paper_triggers
from rule10 import lf_paper_triggers_200char
from rule11 import lf_paper_triggers_200char_negate
from rule12 import lf_paper_triggers_25words
rules_dict = {
"1": lf_ade_drug_single,
"2": lf_ade_drug_pair,
"3": lf_ade_drug_pair_lem,
"4": lf_ade_drug_pair_lem_keyword_triggers,
"5": lf_sider2_triggers,
"6": lf_sider2_triggers_25words,
"7": lf_semmeddb_triggers,
"8": lf_keyword_triggers,
"9": lf_paper_triggers,
"10": lf_paper_triggers_200char,
"11": lf_paper_triggers_200char_negate,
"12": lf_paper_triggers_25words
}
rules_dict_desc = {
"0": "No Rules Selected",
"1": "lf_ade_drug_single - any keywords in ade_drug_single found in discharge summary",
"2": "lf_ade_drug_pair - any pair of keywords in ade_drug_pair found in discharge summary",
"3": "lf_ade_drug_pair_lem - any pair of lemmatised keywords in ade_drug_pair found in discharge summary",
"4": "lf_ade_drug_pair_lem_keyword_triggers - any pair of lemmatised keywords in ade_drug_pair and any trigger word in keyword_triggers found in discharge summary",
"5": "lf_sider2_triggers - any pair of trigger words in sider2_triggers found in discharge summary",
"6": "lf_sider2_triggers_25words - any pair of trigger words in sider2_triggers within 25 words of each other found in discharge summary",
"7": "lf_semmeddb_triggers - any pair of trigger words in semmeddb_triggers found in discharge summary",
"8": "lf_keyword_triggers - any trigger word in keyword_triggers found in discharge summary",
"9": "lf_paper_triggers - any trigger word in paper_triggers found in discharge summary",
"10": "lf_paper_triggers_200char - any trigger word in paper_triggers within 200 characters of any keyword in ade_drug_single found in discharge summary",
"11": "lf_paper_triggers_200char_negate - any trigger word in paper_triggers within 200 characters of any keyword in negate found in discharge summary",
"12": "lf_paper_triggers_25words - any trigger word in paper_triggers within 25 words of any keyword in ade_drug_single found in discharge summary"
}
### execute the parse_args() method ###
# args = parser.parse_args()
################################
### create folder for output ###
################################
def create_output_folder():
# log_df=pd.DataFrame({log_df_title:[]})
# if not Path(join(output_folder,"log.csv")).exists():
# append_csv_bydf(log_df_title,join(output_folder,"log.csv",sep=","))
date = datetime.now().strftime("%Y%m%d-%I%M%S%p")
folder_name=f"result_{date}"
create_folder(join(output_folder,folder_name))
return folder_name
data_path = "../N2C2"
def main():
# result_folder_path=join(output_folder,create_output_folder())
### save args configuration ###
# print('Save settings of arguments into file %s'%join(result_folder_path, 'args_info.csv'))
# info = pd.DataFrame({'models': [args.models], 'rules': [args.rules]})
# info.to_csv(join(result_folder_path, 'args_info.csv'), index=False)
lfs= []
# FIXME: PUT CLASSIFIER AT THE END
# get models #
print("models selected:")
model_list = [
# lf_models[0],
# lf_models[1],
# lf_models[2],
lf_models[4]
]
for lf in model_list:
print(f"model {lf.name} is selected")
lfs.append(lf)
# models_list = args.models.split(",")s
# FIXME:
print("\n")
print("rules selected:")
# get rules #
# rules_list = args.rules.split(",")
rules_list = [
'3',
'8',
'12']
for i in range(0, len(rules_list)) :
print(rules_list[i], ":", rules_dict_desc[rules_list[i]])
if rules_list[i] != "0" :
lfs.append(rules_dict[rules_list[i]])
###########################
### discharge summaries ###
###########################
# train dataset
'''
get train discharge summaries labels
'''
prepared_data_path=join(data_path,"dataframe")
data_folder_l=["train_txt", "test_txt"]
if(os.path.exists(prepared_data_path)):
trainData, testData = [
pd.read_csv(join(prepared_data_path, "%s.csv"%data_folder))
for data_folder in data_folder_l]
else:
print("please run ../models/train_classifiers_to_pickles.py to prepare data")
'''
[optional] clean training data and test data
'''
cleaned_Data_l=[]
for Data in [trainData, testData]:
for func in [cleanHtml,cleanPunc,keepAlpha,removeStopWords]:
Data["summary"]=Data["summary"].apply(func)
cleaned_Data_l.append(Data)
trainData, testData = cleaned_Data_l
print(trainData.shape)
print(trainData.columns)
print(testData.shape)
print(testData.columns)
'''
LF Applier
'''
applier = PandasLFApplier(lfs=lfs)
L_train = applier.apply(df=trainData)
print("\n")
print("Labeling Function Analysis on train dataset")
print(f"{LFAnalysis(L_train, lfs).lf_summary()}")
L_test = applier.apply(df=testData)
num_classes = 2
#####################
### Label Models ###
#####################
### Label Model ###
print("\n")
print("###################")
print("### Label Model ###")
print("###################")
# define model
label_model = LabelModel(cardinality=num_classes, verbose=True)
label_model.fit(L_train=L_train, n_epochs=500,
lr=0.001, log_freq=100, seed=42)
# weights of labeling functions used
label_model_weights = np.around(label_model.get_weights(), 2)
# prediction
funcs, metrics_names = get_metric_funcs_list()
normal_labels = label_model.predict(L_test)
# quit()
for func, metrics_name in zip(funcs, metrics_names):
print(metrics_name, func(testData.label, normal_labels))
### Majority Label Voter ###
print("\n")
print("############################")
print("### Majority Label Voter ###")
print("############################")
# define model
majority_model = MajorityLabelVoter(cardinality=num_classes)
# prediction
majority_labels = majority_model.predict(L_test)
# print(majority_labels)
# print(majority_labels)
# print(testData.label)
# quit()
for func, metrics_name in zip(funcs, metrics_names):
print(metrics_name, func(testData.label, majority_labels))
if __name__ == '__main__':
# main(args)
main() | 33.052434 | 168 | 0.676487 |
9ef15199bc6966bd0bd0c8e9deaaf33280065227 | 1,580 | py | Python | config.py | MichaelNjoroge254/One-Minute_Pitch | 14c8e3f160c5a937782bc549bb825aa222ba63f5 | [
"MIT"
] | null | null | null | config.py | MichaelNjoroge254/One-Minute_Pitch | 14c8e3f160c5a937782bc549bb825aa222ba63f5 | [
"MIT"
] | null | null | null | config.py | MichaelNjoroge254/One-Minute_Pitch | 14c8e3f160c5a937782bc549bb825aa222ba63f5 | [
"MIT"
] | null | null | null | import os
class Config:
'''
General configuration parent class
'''
SECRET_KEY = '1234!'
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:Access@localhost/pitch'
UPLOADED_PHOTOS_DEST ='app/static/photos'
SQLALCHEMY_TRACK_MODIFICATIONS = True
#email configurations
MAIL_SERVER = 'smtp.gmail.com'
MAIL_PORT = 587
MAIL_USE_TLS = True
MAIL_USERNAME = os.environ.get("MAIL_USERNAME")
MAIL_PASSWORD = os.environ.get("MAIL_PASSWORD")
SUBJECT_PREFIX = 'pitch'
SENDER_EMAIL = 'michael.m.njoroge254@gmail.com'
# simple mde configurations
SIMPLEMDE_JS_IIFE = True
SIMPLEMDE_USE_CDN = True
class ProdConfig(Config):
'''
Production configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
# TESTING = True
SQLALCHEMY_DATABASE_URI = os.environ.get("DATABASE_URL")
class TestConfig(Config):
'''
Testing configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:Calmanb@localhost/pitch'
class DevConfig(Config):
'''
Development configuration child class
Args:
Config: The parent configuration class with General configuration settings
'''
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://moringa:Calman@localhost/pitch'
DEBUG = True
ENV = 'development'
config_options = {
'development':DevConfig,
'production':ProdConfig,
'test':TestConfig
} | 29.811321 | 85 | 0.709494 |
047a109a9438bc8a8bdfa6f66f044fc94e6f8068 | 6,173 | py | Python | tests/train_test.py | mjirik/imtools | b7ff33631a035db5fa5156adc5d8536650e4573b | [
"MIT"
] | 7 | 2016-01-06T08:36:31.000Z | 2021-11-11T01:00:05.000Z | tests/train_test.py | mjirik/imtools | b7ff33631a035db5fa5156adc5d8536650e4573b | [
"MIT"
] | 1 | 2018-08-06T18:44:19.000Z | 2018-08-06T18:44:19.000Z | tests/train_test.py | mjirik/imtools | b7ff33631a035db5fa5156adc5d8536650e4573b | [
"MIT"
] | 1 | 2018-08-06T18:27:03.000Z | 2018-08-06T18:27:03.000Z | #! /usr/bin/python
# -*- coding: utf-8 -*-
# import funkcí z jiného adresáře
import unittest
import numpy as np
import os
import os.path
import pytest
import os.path as op
import sys
import matplotlib.pyplot as plt
import glob
import itertools
import io3d
import sed3
import imtools.trainer3d
import imtools.datasets
import sklearn
import sklearn.metrics
import sklearn.neural_network
from sklearn.svm import SVC
from loguru import logger
sys.path.append(op.expanduser("~/projects/bodynavigation"))
# import bodynavigation
pt = op.expanduser("~/projects/imtools")
sys.path.append(pt)
import imtools
# @pytest.mark.interactive
def test_intensity_training_ircad():
#nth - use every nth pixel
nth = 10
# TODO use ircad
sliver_reference_dir = io3d.datasets.joinp("~/data/medical/orig/sliver07/training/", return_as_str=True)
# sliver_reference_dir = op.expanduser("~/data/medical/orig/sliver07/training/")
# Train
ol = imtools.trainer3d.Trainer3D()
# ol.feature_function = localization_fv
# for one in imtools.datasets.sliver_reader("*[0].mhd", read_seg=True):
for i in range(1, 2):
datap = io3d.read_dataset('3Dircadb1', "data3d", i)
datap_liver = io3d.read_dataset('3Dircadb1', "liver", i)
ol.add_train_data(datap["data3d"], (datap_liver["data3d"] > 0).astype(np.uint8), voxelsize_mm=datap["voxelsize_mm"])
# numeric_label, vs_mm, oname, orig_data, rname, ref_data = one
# ol.add_train_data(orig_data, ref_data, voxelsize_mm=vs_mm)
ol.fit()
# Testing
i = 1
datap = io3d.datasets.read_dataset("3Dircadb1", 'data3d', i)
datap_liver = io3d.datasets.read_dataset("3Dircadb1", 'liver', i)
data3d = datap["data3d"]
segmentation = (datap_liver["data3d"] > 0).astype(np.uint8)
fit = ol.predict(data3d, voxelsize_mm=datap["voxelsize_mm"])
# one = list(imtools.datasets.sliver_reader("*018.mhd", read_seg=True))[0]
# numeric_label, vs_mm, oname, orig_data, rname, ref_data = one
# fit = ol.predict(orig_data, voxelsize_mm=vs_mm)
err = segmentation != (fit > 0).astype(np.uint8)
# visualization
# plt.figure(figsize=(15, 10))
# sed3.show_slices(datap["data3d"], fit, slice_step=20, axis=1, flipV=True)
accuracy = np.sum(~err) / np.prod(data3d.shape)
assert accuracy >= 0.80
# assert
def _mk_data(slice3, offset=1, shape=[10, 11, 12]):
data3d = np.random.random(shape)
data3d[slice3] += offset
segmentation = np.zeros(shape, dtype=int)
segmentation[slice3] = 1
return data3d, segmentation
# TODO finish product
_gmm__mix_clf = imtools.ml.gmmcl.GMMCl()
_gmm__mix_clf.cl = {0:sklearn.mixture.GaussianMixture(n_components=1), 1:sklearn.mixture.GaussianMixture(n_components=3)}
@pytest.mark.parametrize('cl,shape', itertools.product(
[
# sklearn.tree.DecisionTreeClassifier(),
# _gmm__mix_clf,
imtools.ml.gmmcl.GMMCl(),
# sklearn.neural_network.MLPClassifier(),
SVC(kernel='linear', class_weight='balanced', probability=True),
# SVC()
],
[
# [10, 11, 12],
[30, 31, 32],
]
))
def test_intensity_training_artificial(cl, shape):
"""
Test different classifiers on unbalanced dataset.
:param cl:
:param shape:
:return:
"""
# scl = str(cl)
# logger.debug(f'cl={scl[:min(30, len(scl))]}')
logger.debug(f'cl={cl}')
logger.debug(f'shape={shape}')
slice3 = (slice(3, 7), slice(3, 7), slice(3, 7))
# shape = [30,31,32]
voxelsize_mm = [1, 2, 3]
d3d, seg = _mk_data(slice3, shape=shape, offset=0.7)
un, counts = np.unique(seg.flatten(), return_counts=True)
logger.debug(f'counts={counts}')
ol = imtools.trainer3d.Trainer3D(classifier=cl)
ol.working_voxelsize_mm=[2,2,2]
# ol.cl = tree.DecisionTreeClassifier()
# ol.cl = cl
ol.add_train_data(d3d, seg, voxelsize_mm=voxelsize_mm, nth=None) # We take all samples
# https://elitedatascience.com/imbalanced-classes
un, counts = np.unique(ol.target, return_counts=True)
n_samples = np.min(counts)
new_data_list = []
new_target_list = []
for label in un:
all_data_for_one_label = ol.data[ol.target.astype(np.uint8).flatten() == label]
# TODO mozna pouzit funkci sklearn.utils.resample
# https://scikit-learn.org/stable/modules/generated/sklearn.utils.resample.html
resamples = sklearn.utils.resample(all_data_for_one_label, n_samples=n_samples, replace=True)
# data_subset = all_data_for_one_label[:n_samples] # pick first n samples
# new_data_list.append(data_subset)
new_data_list.append(resamples)
new_target_list.append(np.ones([n_samples], dtype=type(label)) * label)
original_data = ol.data
original_target = ol.target
new_data = np.concatenate(new_data_list, axis=0)
new_target = np.concatenate(new_target_list, axis=0)
ol.data = new_data
ol.target = new_target
ol.fit()
# test
# slice3 = (slice(2, 6), slice(2, 8), slice(2, 7))
# shape = [12, 11, 10]
# voxelsize_mm = [1, 2, 3]
d3d, seg = _mk_data(slice3, shape=shape, offset=0.7)
pred_seg = ol.predict(d3d, voxelsize_mm)
sed3.show_slices(d3d, contour=seg, slice_number=8)
# ed = sed3.sed3(d3d, contour=seg)
# ed.show()
sc = sklearn.metrics.accuracy_score(seg.flatten(), pred_seg.flatten())
f1 = sklearn.metrics.f1_score(seg.flatten(), pred_seg.flatten())
logger.debug(f"f1={f1}")
assert sc > 0.5
assert f1 > 0.5
def test_resample():
y = np.array([0, 0, 0, 1, 1, 1, 1, 1, 1])
X = np.random.rand(y.shape[0]) + y
X = X.reshape([-1,1])
# X = np.array([[1., 0.], [2., 1.], [0., 0.]])
# y = np.array([0, 1, 2])
# from scipy.sparse import coo_matrix
# X_sparse = coo_matrix(X)
from sklearn.utils import shuffle, resample
# X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
# X
#
# X_sparse
#
# X_sparse.toarray()
y
# shuffle(y, n_samples=2, random_state=0)
Xr = resample(X, n_samples=2, random_state=0)
print(Xr)
def balance_dataset(X,y):
labels, counts = np.unique(y)
| 29.966019 | 124 | 0.660619 |
c3060f7a5e487ba2b9354b3b6af03cffe47d444e | 2,784 | py | Python | cmsplugin_cascade/bootstrap3/tabs.py | angonyfox/djangocms-cascade | dd18e9bf4d77528dd5d87e0718ecc7edec61e2c0 | [
"MIT"
] | 1 | 2022-01-11T07:21:17.000Z | 2022-01-11T07:21:17.000Z | cmsplugin_cascade/bootstrap3/tabs.py | angonyfox/djangocms-cascade | dd18e9bf4d77528dd5d87e0718ecc7edec61e2c0 | [
"MIT"
] | null | null | null | cmsplugin_cascade/bootstrap3/tabs.py | angonyfox/djangocms-cascade | dd18e9bf4d77528dd5d87e0718ecc7edec61e2c0 | [
"MIT"
] | 1 | 2020-03-27T18:09:21.000Z | 2020-03-27T18:09:21.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.forms import widgets
from django.forms.models import ModelForm
from django.utils.translation import ungettext_lazy, ugettext_lazy as _
from django.utils.text import Truncator
from django.utils.html import format_html
from django.forms.fields import IntegerField
from cms.plugin_pool import plugin_pool
from cmsplugin_cascade.forms import ManageChildrenFormMixin
from cmsplugin_cascade.fields import GlossaryField
from cmsplugin_cascade.plugin_base import TransparentWrapper, TransparentContainer
from cmsplugin_cascade.widgets import NumberInputWidget
from .plugin_base import BootstrapPluginBase
class TabForm(ManageChildrenFormMixin, ModelForm):
num_children = IntegerField(min_value=1, initial=1,
widget=NumberInputWidget(attrs={'size': '3', 'style': 'width: 5em !important;'}),
label=_("Tabs"),
help_text=_("Number of tabs."))
class BootstrapTabSetPlugin(TransparentWrapper, BootstrapPluginBase):
name = _("Tab Set")
form = TabForm
parent_classes = ('BootstrapColumnPlugin',)
direct_child_classes = ('BootstrapTabPanePlugin',)
require_parent = True
allow_children = True
render_template = 'cascade/bootstrap3/{}/tabset.html'
justified = GlossaryField(
widgets.CheckboxInput(),
label=_("Justified tabs")
)
@classmethod
def get_identifier(cls, instance):
identifier = super(BootstrapTabSetPlugin, cls).get_identifier(instance)
num_cols = instance.get_num_children()
content = ungettext_lazy('with {} tab', 'with {} tabs', num_cols).format(num_cols)
return format_html('{0}{1}', identifier, content)
def save_model(self, request, obj, form, change):
wanted_children = int(form.cleaned_data.get('num_children'))
super(BootstrapTabSetPlugin, self).save_model(request, obj, form, change)
self.extend_children(obj, wanted_children, BootstrapTabPanePlugin)
plugin_pool.register_plugin(BootstrapTabSetPlugin)
class BootstrapTabPanePlugin(TransparentContainer, BootstrapPluginBase):
name = _("Tab Pane")
direct_parent_classes = parent_classes = ('BootstrapTabSetPlugin',)
require_parent = True
allow_children = True
alien_child_classes = True
tab_title = GlossaryField(
widgets.TextInput(attrs={'size': 80}),
label=_("Tab Title")
)
@classmethod
def get_identifier(cls, obj):
identifier = super(BootstrapTabPanePlugin, cls).get_identifier(obj)
content = obj.glossary.get('tab_title', '')
if content:
content = Truncator(content).words(3, truncate=' ...')
return format_html('{0}{1}', identifier, content)
plugin_pool.register_plugin(BootstrapTabPanePlugin)
| 37.12 | 90 | 0.733836 |
5137338c627e35068bf027dde37d32d27141ef36 | 5,280 | py | Python | huaweicloud-sdk-bssintl/huaweicloudsdkbssintl/v2/model/list_customerself_resource_records_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 64 | 2020-06-12T07:05:07.000Z | 2022-03-30T03:32:50.000Z | huaweicloud-sdk-bssintl/huaweicloudsdkbssintl/v2/model/list_customerself_resource_records_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 11 | 2020-07-06T07:56:54.000Z | 2022-01-11T11:14:40.000Z | huaweicloud-sdk-bssintl/huaweicloudsdkbssintl/v2/model/list_customerself_resource_records_response.py | huaweicloud/huaweicloud-sdk-python-v3 | 7a6270390fcbf192b3882bf763e7016e6026ef78 | [
"Apache-2.0"
] | 24 | 2020-06-08T11:42:13.000Z | 2022-03-04T06:44:08.000Z | # coding: utf-8
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ListCustomerselfResourceRecordsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'fee_records': 'list[ResFeeRecordV2]',
'total_count': 'int',
'currency': 'str'
}
attribute_map = {
'fee_records': 'fee_records',
'total_count': 'total_count',
'currency': 'currency'
}
def __init__(self, fee_records=None, total_count=None, currency=None):
"""ListCustomerselfResourceRecordsResponse - a model defined in huaweicloud sdk"""
super(ListCustomerselfResourceRecordsResponse, self).__init__()
self._fee_records = None
self._total_count = None
self._currency = None
self.discriminator = None
if fee_records is not None:
self.fee_records = fee_records
if total_count is not None:
self.total_count = total_count
if currency is not None:
self.currency = currency
@property
def fee_records(self):
"""Gets the fee_records of this ListCustomerselfResourceRecordsResponse.
|参数名称:资源费用记录数据。具体请参见表 ResFeeRecordV2。| |参数约束以及描述:资源费用记录数据。具体请参见表 ResFeeRecordV2。|
:return: The fee_records of this ListCustomerselfResourceRecordsResponse.
:rtype: list[ResFeeRecordV2]
"""
return self._fee_records
@fee_records.setter
def fee_records(self, fee_records):
"""Sets the fee_records of this ListCustomerselfResourceRecordsResponse.
|参数名称:资源费用记录数据。具体请参见表 ResFeeRecordV2。| |参数约束以及描述:资源费用记录数据。具体请参见表 ResFeeRecordV2。|
:param fee_records: The fee_records of this ListCustomerselfResourceRecordsResponse.
:type: list[ResFeeRecordV2]
"""
self._fee_records = fee_records
@property
def total_count(self):
"""Gets the total_count of this ListCustomerselfResourceRecordsResponse.
|参数名称:结果集数量,只有成功才返回这个参数。| |参数的约束及描述:结果集数量,只有成功才返回这个参数。|
:return: The total_count of this ListCustomerselfResourceRecordsResponse.
:rtype: int
"""
return self._total_count
@total_count.setter
def total_count(self, total_count):
"""Sets the total_count of this ListCustomerselfResourceRecordsResponse.
|参数名称:结果集数量,只有成功才返回这个参数。| |参数的约束及描述:结果集数量,只有成功才返回这个参数。|
:param total_count: The total_count of this ListCustomerselfResourceRecordsResponse.
:type: int
"""
self._total_count = total_count
@property
def currency(self):
"""Gets the currency of this ListCustomerselfResourceRecordsResponse.
|参数名称:货币单位代码:CNY:人民币USD:美元| |参数约束及描述:货币单位代码:CNY:人民币USD:美元|
:return: The currency of this ListCustomerselfResourceRecordsResponse.
:rtype: str
"""
return self._currency
@currency.setter
def currency(self, currency):
"""Sets the currency of this ListCustomerselfResourceRecordsResponse.
|参数名称:货币单位代码:CNY:人民币USD:美元| |参数约束及描述:货币单位代码:CNY:人民币USD:美元|
:param currency: The currency of this ListCustomerselfResourceRecordsResponse.
:type: str
"""
self._currency = currency
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListCustomerselfResourceRecordsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.242604 | 92 | 0.619318 |
00c4b01e958f092a3e65716374d63b9018d5a050 | 2,426 | py | Python | python-sdk/src/forta_agent/utils.py | e1Ru1o/forta-agent-sdk | ae99dd91e2a253658c1658f7af1a3f850129e45b | [
"MIT"
] | null | null | null | python-sdk/src/forta_agent/utils.py | e1Ru1o/forta-agent-sdk | ae99dd91e2a253658c1658f7af1a3f850129e45b | [
"MIT"
] | null | null | null | python-sdk/src/forta_agent/utils.py | e1Ru1o/forta-agent-sdk | ae99dd91e2a253658c1658f7af1a3f850129e45b | [
"MIT"
] | null | null | null | import sys
import os
from jsonc_parser.parser import JsoncParser
import sha3
def get_forta_config():
config = {}
# try to read global config
global_config_path = os.path.join(
os.path.expanduser('~'), '.forta', 'forta.config.json')
if os.path.isfile(global_config_path):
global_config = JsoncParser.parse_file(global_config_path)
config = {**config, **global_config}
# try to read local project config
config_flag_index = sys.argv.index(
'--config') if '--config' in sys.argv else -1
local_config_file = None if config_flag_index == - \
1 else sys.argv[config_flag_index + 1]
local_config_path = os.path.join(
os.getcwd(), local_config_file if local_config_file else 'forta.config.json')
if os.path.isfile(local_config_path):
local_config = JsoncParser.parse_file(local_config_path)
config = {**config, **local_config}
return config
def get_json_rpc_url():
if 'JSON_RPC_HOST' in os.environ:
return f'http://{os.environ["JSON_RPC_HOST"]}{":"+os.environ["JSON_RPC_PORT"] if "JSON_RPC_PORT" in os.environ else ""}'
config = get_forta_config()
if "jsonRpcUrl" not in config:
raise Exception("no jsonRpcUrl found")
if not str(config.get("jsonRpcUrl")).startswith("http"):
raise Exception("jsonRpcUrl must begin with http(s)")
return config["jsonRpcUrl"]
def create_block_event(dict):
from .block_event import BlockEvent # avoid circular import
return BlockEvent(dict)
def create_transaction_event(dict):
from .transaction_event import TransactionEvent # avoid circular import
return TransactionEvent(dict)
def assert_non_empty_string_in_dict(dict, key):
assert_key_in_dict(dict, key)
assert isinstance(dict[key], str) and len(
dict[key]) > 0, f'{key} must be non-empty string'
def assert_enum_value_in_dict(dict, key, enum):
assert_key_in_dict(dict, key)
assert isinstance(dict[key], enum), f'{key} must be valid enum value'
def assert_key_in_dict(dict, key):
assert key in dict, f'{key} is required'
def hex_to_int(strVal):
if not strVal or type(strVal) == int:
return strVal
return int(strVal, 16) if type(strVal) == str and strVal.startswith('0x') else int(strVal, 10)
def keccak256(val):
hash = sha3.keccak_256()
hash.update(bytes(val, encoding='utf-8'))
return f'0x{hash.hexdigest()}'
| 32.346667 | 128 | 0.693735 |
27ccf953f2a6724563ef7e14807ac1fda8844b10 | 871 | py | Python | data/review_transformer.py | Ruebe92/BoredLameRecommender | ff6b1070359442ca9155641170ed2193fd637e67 | [
"MIT"
] | null | null | null | data/review_transformer.py | Ruebe92/BoredLameRecommender | ff6b1070359442ca9155641170ed2193fd637e67 | [
"MIT"
] | 23 | 2020-12-02T14:06:55.000Z | 2020-12-10T15:08:55.000Z | data/review_transformer.py | Ruebe92/BoredLameRecommender | ff6b1070359442ca9155641170ed2193fd637e67 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 7 10:00:46 2020
@author: chris
"""
import pandas as pd
#%%
popular_games = pd.read_csv('/data/popular_games_total.csv')
popular_games = popular_games.sort_values('num_user_ratings',ascending=False)
popular_games_more_than_X =popular_games[popular_games['num_user_ratings'] > 1]
#%%
df = pd.read_feather('reviews.file')
df = df.drop(['id','date','title','description'], axis = 1)
#%%
rating_list = []
user_list = []
game_list = []
df_new = pd.DataFrame()
for _, row in df.iterrows():
rating = row['rating']
game_id = row['game']['id']['objectId']
game_object = popular_games_more_than_X[popular_games_more_than_X['id'] == game_id]
game_name = game_object['name'].iloc[0]
user = row['user']['username']
df_new.loc[f'{user}',f'{game_name}'] = rating
#%%
df_new.to_csv('reviews.csv') | 18.531915 | 87 | 0.669346 |
68ec347d3e18260f8d26d0c679dd71654b0e2cc2 | 219 | py | Python | release/scripts/presets/tracking_camera/Nexus_5.py | rbabari/blender | 6daa85f14b2974abfc3d0f654c5547f487bb3b74 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 116 | 2015-11-02T16:36:53.000Z | 2021-06-08T20:36:18.000Z | release/scripts/presets/tracking_camera/Nexus_5.py | rbabari/blender | 6daa85f14b2974abfc3d0f654c5547f487bb3b74 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 39 | 2016-04-25T12:18:34.000Z | 2021-03-01T19:06:36.000Z | release/scripts/presets/tracking_camera/Nexus_5.py | rbabari/blender | 6daa85f14b2974abfc3d0f654c5547f487bb3b74 | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | 19 | 2016-01-24T14:24:00.000Z | 2020-07-19T05:26:24.000Z | import bpy
camera = bpy.context.edit_movieclip.tracking.camera
camera.sensor_width = 4.5
camera.units = 'MILLIMETERS'
camera.focal_length = 3.91
camera.pixel_aspect = 1
camera.k1 = 0.0
camera.k2 = 0.0
camera.k3 = 0.0
| 18.25 | 51 | 0.753425 |
f27e9b9ebac0702a44b8073f22a7c423c689b7ea | 31,589 | py | Python | ws2122-lspm/Lib/site-packages/pm4py/filtering.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2022-01-19T04:02:46.000Z | 2022-01-19T04:02:46.000Z | ws2122-lspm/Lib/site-packages/pm4py/filtering.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2021-11-19T07:21:48.000Z | 2021-11-19T07:21:48.000Z | ws2122-lspm/Lib/site-packages/pm4py/filtering.py | Malekhy/ws2122-lspm | e4dc8b801d12f862b8ef536a0f125f346f085a00 | [
"MIT"
] | 1 | 2022-01-14T17:15:38.000Z | 2022-01-14T17:15:38.000Z | '''
This file is part of PM4Py (More Info: https://pm4py.fit.fraunhofer.de).
PM4Py is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PM4Py is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with PM4Py. If not, see <https://www.gnu.org/licenses/>.
'''
import warnings
from typing import List, Union, Set, List, Tuple, Collection, Any, Dict
import deprecation
import pandas as pd
from pm4py.meta import VERSION as PM4PY_CURRENT_VERSION
from pm4py.objects.log.obj import EventLog
from pm4py.util import constants
from pm4py.util.pandas_utils import check_is_pandas_dataframe, check_pandas_dataframe_columns
from pm4py.utils import get_properties, general_checks_classical_event_log
from pm4py.objects.ocel.obj import OCEL
import datetime
def filter_start_activities(log: Union[EventLog, pd.DataFrame], activities: Union[Set[str], List[str]], retain: bool = True) -> \
Union[EventLog, pd.DataFrame]:
"""
Filter cases having a start activity in the provided list
Parameters
--------------
log
Log object
activities
List start activities
retain
if True, we retain the traces containing the given activities, if false, we drop the traces
Returns
--------------
filtered_log
Filtered log object
"""
general_checks_classical_event_log(log)
parameters = get_properties(log)
if check_is_pandas_dataframe(log):
check_pandas_dataframe_columns(log)
from pm4py.algo.filtering.pandas.start_activities import start_activities_filter
parameters[start_activities_filter.Parameters.POSITIVE] = retain
return start_activities_filter.apply(log, activities,
parameters=parameters)
else:
from pm4py.algo.filtering.log.start_activities import start_activities_filter
parameters[start_activities_filter.Parameters.POSITIVE] = retain
return start_activities_filter.apply(log, activities,
parameters=parameters)
def filter_end_activities(log: Union[EventLog, pd.DataFrame], activities: Union[Set[str], List[str]], retain: bool = True) -> Union[
EventLog, pd.DataFrame]:
"""
Filter cases having an end activity in the provided list
Parameters
---------------
log
Log object
activities
List of admitted end activities
retain
if True, we retain the traces containing the given activities, if false, we drop the traces
Returns
---------------
filtered_log
Filtered log object
"""
general_checks_classical_event_log(log)
parameters = get_properties(log)
if check_is_pandas_dataframe(log):
check_pandas_dataframe_columns(log)
from pm4py.algo.filtering.pandas.end_activities import end_activities_filter
parameters[end_activities_filter.Parameters.POSITIVE] = retain
return end_activities_filter.apply(log, activities,
parameters=parameters)
else:
from pm4py.algo.filtering.log.end_activities import end_activities_filter
parameters[end_activities_filter.Parameters.POSITIVE] = retain
return end_activities_filter.apply(log, activities,
parameters=parameters)
@deprecation.deprecated(deprecated_in='2.1.4', removed_in='2.4.0', current_version=PM4PY_CURRENT_VERSION,
details='Filtering method will be removed due to fuzzy naming.\
Use: filter_event_attribute_values')
def filter_attribute_values(log, attribute_key, values, level="case", retain=True):
return filter_event_attribute_values(log, attribute_key, values, level=level, retain=retain)
def filter_event_attribute_values(log: Union[EventLog, pd.DataFrame], attribute_key: str, values: Union[Set[str], List[str]],
level: str = "case", retain: bool = True) -> Union[EventLog, pd.DataFrame]:
"""
Filter a log object on the values of some event attribute
Parameters
--------------
log
Log object
attribute_key
Attribute to filter
values
Admitted (or forbidden) values
level
Specifies how the filter should be applied ('case' filters the cases where at least one occurrence happens,
'event' filter the events eventually trimming the cases)
retain
Specified if the values should be kept or removed
Returns
--------------
filtered_log
Filtered log object
"""
general_checks_classical_event_log(log)
parameters = get_properties(log)
parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = attribute_key
if check_is_pandas_dataframe(log):
check_pandas_dataframe_columns(log)
from pm4py.algo.filtering.pandas.attributes import attributes_filter
if level == "event":
parameters[attributes_filter.Parameters.POSITIVE] = retain
return attributes_filter.apply_events(log, values,
parameters=parameters)
elif level == "case":
parameters[attributes_filter.Parameters.POSITIVE] = retain
return attributes_filter.apply(log, values, parameters=parameters)
else:
from pm4py.algo.filtering.log.attributes import attributes_filter
if level == "event":
parameters[attributes_filter.Parameters.POSITIVE] = retain
return attributes_filter.apply_events(log, values,
parameters=parameters)
elif level == "case":
parameters[attributes_filter.Parameters.POSITIVE] = retain
return attributes_filter.apply(log, values, parameters=parameters)
@deprecation.deprecated(deprecated_in='2.1.4', removed_in='2.4.0', current_version=PM4PY_CURRENT_VERSION,
details='Filtering method will be removed due to fuzzy naming.\
Use: filter_event_attribute_values')
def filter_trace_attribute(log, attribute_key, values, retain=True):
return filter_trace_attribute_values(log, attribute_key, values, retain=retain)
def filter_trace_attribute_values(log: Union[EventLog, pd.DataFrame], attribute_key: str, values: Union[Set[str], List[str]],
retain: bool = True) -> Union[EventLog, pd.DataFrame]:
"""
Filter a log on the values of a trace attribute
Parameters
--------------
log
Event log
attribute_key
Attribute to filter
values
Values to filter (list of)
retain
Boolean value (keep/discard matching traces)
Returns
--------------
filtered_log
Filtered event log
"""
general_checks_classical_event_log(log)
parameters = get_properties(log)
parameters[constants.PARAMETER_CONSTANT_ATTRIBUTE_KEY] = attribute_key
if check_is_pandas_dataframe(log):
check_pandas_dataframe_columns(log)
from pm4py.algo.filtering.pandas.attributes import attributes_filter
parameters[attributes_filter.Parameters.POSITIVE] = retain
return attributes_filter.apply(log, values,
parameters=parameters)
else:
from pm4py.algo.filtering.log.attributes import attributes_filter
parameters[attributes_filter.Parameters.POSITIVE] = retain
return attributes_filter.apply_trace_attribute(log, values, parameters=parameters)
def filter_variants(log: Union[EventLog, pd.DataFrame], variants: Union[Set[str], List[str]], retain: bool = True) -> Union[
EventLog, pd.DataFrame]:
"""
Filter a log on a specified set of variants
Parameters
---------------
log
Event log
variants
collection of variants to filter; A variant should be specified as a list of activity names, e.g., ['a','b','c']
retain
boolean; if True all traces conforming to the specified variants are retained; if False, all those traces are removed
Returns
--------------
filtered_log
Filtered log object
"""
general_checks_classical_event_log(log)
from pm4py.util import variants_util
parameters = get_properties(log)
if variants_util.VARIANT_SPECIFICATION == variants_util.VariantsSpecifications.STRING:
variants = [constants.DEFAULT_VARIANT_SEP.join(v) for v in variants]
if check_is_pandas_dataframe(log):
check_pandas_dataframe_columns(log)
from pm4py.algo.filtering.pandas.variants import variants_filter
parameters[variants_filter.Parameters.POSITIVE] = retain
return variants_filter.apply(log, variants,
parameters=parameters)
else:
from pm4py.algo.filtering.log.variants import variants_filter
parameters[variants_filter.Parameters.POSITIVE] = retain
return variants_filter.apply(log, variants,
parameters=parameters)
@deprecation.deprecated(deprecated_in='2.1.3.1', removed_in='2.4.0', current_version=PM4PY_CURRENT_VERSION,
details='Filtering method will be removed due to fuzzy interpretation of the threshold.\
Will be replaced with two new functions filter_variants_top_k and filter_variants_relative_frequency')
def filter_variants_percentage(log: Union[EventLog, pd.DataFrame], threshold: float = 0.8) -> Union[
EventLog, pd.DataFrame]:
"""
Filter a log on the percentage of variants
Parameters
---------------
log
Event log
threshold
Percentage (scale 0.1) of admitted variants
Returns
--------------
filtered_log
Filtered log object
"""
general_checks_classical_event_log(log)
if check_is_pandas_dataframe(log):
raise Exception(
"filtering variants percentage on Pandas dataframe is currently not available! please convert the dataframe to event log with the method: log = pm4py.convert_to_event_log(df)")
else:
from pm4py.algo.filtering.log.variants import variants_filter
return variants_filter.filter_log_variants_percentage(log, percentage=threshold, parameters=get_properties(log))
@deprecation.deprecated(deprecated_in='2.1.3.1', removed_in='2.4.0', current_version=PM4PY_CURRENT_VERSION,
details='Use filter_directly_follows_relation')
def filter_paths(log, allowed_paths, retain=True):
general_checks_classical_event_log(log)
return filter_directly_follows_relation(log, allowed_paths, retain)
def filter_directly_follows_relation(log: Union[EventLog, pd.DataFrame], relations: List[str], retain: bool = True) -> \
Union[EventLog, pd.DataFrame]:
"""
Retain traces that contain any of the specified 'directly follows' relations.
For example, if relations == [('a','b'),('a','c')] and log [<a,b,c>,<a,c,b>,<a,d,b>]
the resulting log will contain traces describing [<a,b,c>,<a,c,b>].
Parameters
---------------
log
Log object
relations
List of activity name pairs, which are allowed/forbidden paths
retain
Parameter that says whether the paths
should be kept/removed
Returns
----------------
filtered_log
Filtered log object
"""
general_checks_classical_event_log(log)
parameters = get_properties(log)
if check_is_pandas_dataframe(log):
from pm4py.algo.filtering.pandas.paths import paths_filter
parameters[paths_filter.Parameters.POSITIVE] = retain
return paths_filter.apply(log, relations, parameters=parameters)
else:
from pm4py.algo.filtering.log.paths import paths_filter
parameters[paths_filter.Parameters.POSITIVE] = retain
return paths_filter.apply(log, relations, parameters=parameters)
def filter_eventually_follows_relation(log: Union[EventLog, pd.DataFrame], relations: List[str], retain: bool = True) -> \
Union[EventLog, pd.DataFrame]:
"""
Retain traces that contain any of the specified 'eventually follows' relations.
For example, if relations == [('a','b'),('a','c')] and log [<a,b,c>,<a,c,b>,<a,d,b>]
the resulting log will contain traces describing [<a,b,c>,<a,c,b>,<a,d,b>].
Parameters
---------------
log
Log object
relations
List of activity name pairs, which are allowed/forbidden paths
retain
Parameter that says whether the paths
should be kept/removed
Returns
----------------
filtered_log
Filtered log object
"""
general_checks_classical_event_log(log)
parameters = get_properties(log)
if check_is_pandas_dataframe(log):
from pm4py.algo.filtering.pandas.ltl import ltl_checker
parameters[ltl_checker.Parameters.POSITIVE] = retain
if retain:
cases = set()
else:
cases = set(log[constants.CASE_CONCEPT_NAME])
for path in relations:
filt_log = ltl_checker.eventually_follows(log, path,
parameters=parameters)
this_traces = set(filt_log[constants.CASE_CONCEPT_NAME])
if retain:
cases = cases.union(this_traces)
else:
cases = cases.intersection(this_traces)
return log[log[constants.CASE_CONCEPT_NAME].isin(cases)]
else:
from pm4py.objects.log.obj import EventLog
from pm4py.algo.filtering.log.ltl import ltl_checker
parameters[ltl_checker.Parameters.POSITIVE] = retain
if retain:
cases = set()
else:
cases = set(id(trace) for trace in log)
for path in relations:
filt_log = ltl_checker.eventually_follows(log, path,
parameters=parameters)
this_traces = set(id(trace) for trace in filt_log)
if retain:
cases = cases.union(this_traces)
else:
cases = cases.intersection(this_traces)
filtered_log = EventLog(attributes=log.attributes, extensions=log.extensions, omni_present=log.omni_present,
classifiers=log.classifiers, properties=log.properties)
for trace in log:
if id(trace) in cases:
filtered_log.append(trace)
return filtered_log
def filter_time_range(log: Union[EventLog, pd.DataFrame], dt1: str, dt2: str, mode="events") -> Union[
EventLog, pd.DataFrame]:
"""
Filter a log on a time interval
Parameters
----------------
log
Log object
dt1
Left extreme of the interval
dt2
Right extreme of the interval
mode
Modality of filtering (events, traces_contained, traces_intersecting)
events: any event that fits the time frame is retained
traces_contained: any trace completely contained in the timeframe is retained
traces_intersecting: any trace intersecting with the time-frame is retained.
Returns
----------------
filtered_log
Filtered log
"""
general_checks_classical_event_log(log)
if check_is_pandas_dataframe(log):
from pm4py.algo.filtering.pandas.timestamp import timestamp_filter
if mode == "events":
return timestamp_filter.apply_events(log, dt1, dt2, parameters=get_properties(log))
elif mode == "traces_contained":
return timestamp_filter.filter_traces_contained(log, dt1, dt2, parameters=get_properties(log))
elif mode == "traces_intersecting":
return timestamp_filter.filter_traces_intersecting(log, dt1, dt2, parameters=get_properties(log))
else:
warnings.warn('mode provided: ' + mode + ' is not recognized; original log returned!')
return log
else:
from pm4py.algo.filtering.log.timestamp import timestamp_filter
if mode == "events":
return timestamp_filter.apply_events(log, dt1, dt2, parameters=get_properties(log))
elif mode == "traces_contained":
return timestamp_filter.filter_traces_contained(log, dt1, dt2, parameters=get_properties(log))
elif mode == "traces_intersecting":
return timestamp_filter.filter_traces_intersecting(log, dt1, dt2, parameters=get_properties(log))
else:
warnings.warn('mode provided: ' + mode + ' is not recognized; original log returned!')
return log
def filter_between(log: Union[EventLog, pd.DataFrame], act1: str, act2: str) -> Union[EventLog, pd.DataFrame]:
"""
Finds all the sub-cases leading from an event with activity "act1" to an event with activity "act2" in the log,
and returns a log containing only them.
Example:
Log
A B C D E F
A B E F C
A B F C B C B E F C
act1 = B
act2 = C
Returned sub-cases:
B C (from the first case)
B E F C (from the second case)
B F C (from the third case)
B C (from the third case)
B E F C (from the third case)
Parameters
-----------------
log
Event log / Pandas dataframe
act1
Source activity
act2
Target activity
Returns
-----------------
filtered_log
Log containing all the subcases
"""
general_checks_classical_event_log(log)
parameters = get_properties(log)
if check_is_pandas_dataframe(log):
check_pandas_dataframe_columns(log)
from pm4py.algo.filtering.pandas.between import between_filter
return between_filter.apply(log, act1, act2, parameters=parameters)
else:
from pm4py.algo.filtering.log.between import between_filter
return between_filter.apply(log, act1, act2, parameters=parameters)
def filter_case_size(log: Union[EventLog, pd.DataFrame], min_size: int, max_size: int) -> Union[EventLog, pd.DataFrame]:
"""
Filters the event log, keeping the cases having a length (number of events) included between min_size
and max_size
Parameters
-----------------
log
Event log / Pandas dataframe
min_size
Minimum allowed number of events
max_size
Maximum allowed number of events
Returns
----------------
filtered_log
Log with cases having the desidered number of events.
"""
general_checks_classical_event_log(log)
parameters = get_properties(log)
if check_is_pandas_dataframe(log):
check_pandas_dataframe_columns(log)
from pm4py.algo.filtering.pandas.cases import case_filter
case_id = parameters[
constants.PARAMETER_CONSTANT_CASEID_KEY] if constants.PARAMETER_CONSTANT_CASEID_KEY in parameters else constants.CASE_CONCEPT_NAME
return case_filter.filter_on_case_size(log, case_id, min_size, max_size)
else:
from pm4py.algo.filtering.log.cases import case_filter
return case_filter.filter_on_case_size(log, min_size, max_size)
def filter_case_performance(log: Union[EventLog, pd.DataFrame], min_performance: float, max_performance: float) -> Union[EventLog, pd.DataFrame]:
"""
Filters the event log, keeping the cases having a duration (the timestamp of the last event minus the timestamp
of the first event) included between min_performance and max_performance
Parameters
----------------
log
Event log / Pandas dataframe
min_performance
Minimum allowed case duration
max_performance
Maximum allowed case duration
Returns
----------------
filtered_log
Log with cases having a duration in the specified range
"""
general_checks_classical_event_log(log)
parameters = get_properties(log)
if check_is_pandas_dataframe(log):
check_pandas_dataframe_columns(log)
from pm4py.algo.filtering.pandas.cases import case_filter
return case_filter.filter_case_performance(log, min_performance, max_performance, parameters=parameters)
else:
from pm4py.algo.filtering.log.cases import case_filter
return case_filter.filter_case_performance(log, min_performance, max_performance, parameters=parameters)
def filter_activities_rework(log: Union[EventLog, pd.DataFrame], activity: str, min_occurrences: int = 2) -> Union[EventLog, pd.DataFrame]:
"""
Filters the event log, keeping the cases where the specified activity occurs at least min_occurrences times.
Parameters
-----------------
log
Event log / Pandas dataframe
activity
Activity
min_occurrences
Minimum desidered number of occurrences
Returns
-----------------
filtered_log
Log with cases having at least min_occurrences occurrences of the given activity
"""
general_checks_classical_event_log(log)
parameters = get_properties(log)
parameters["min_occurrences"] = min_occurrences
if check_is_pandas_dataframe(log):
check_pandas_dataframe_columns(log)
from pm4py.algo.filtering.pandas.rework import rework_filter
return rework_filter.apply(log, activity, parameters=parameters)
else:
from pm4py.algo.filtering.log.rework import rework_filter
return rework_filter.apply(log, activity, parameters=parameters)
def filter_paths_performance(log: Union[EventLog, pd.DataFrame], path: Tuple[str, str], min_performance: float, max_performance: float, keep=True) -> Union[EventLog, pd.DataFrame]:
"""
Filters the event log, either:
- (keep=True) keeping the cases having the specified path (tuple of 2 activities) with a duration included between min_performance and max_performance
- (keep=False) discarding the cases having the specified path with a duration included between min_performance and max_performance
Parameters
----------------
log
Event log
path
Tuple of two activities (source_activity, target_activity)
min_performance
Minimum allowed performance (of the path)
max_performance
Maximum allowed performance (of the path)
keep
Keep/discard the cases having the specified path with a duration included between min_performance and max_performance
Returns
----------------
filtered_log
Filtered log with the desidered behavior
"""
general_checks_classical_event_log(log)
parameters = get_properties(log)
parameters["positive"] = keep
parameters["min_performance"] = min_performance
parameters["max_performance"] = max_performance
path = tuple(path)
if check_is_pandas_dataframe(log):
check_pandas_dataframe_columns(log)
from pm4py.algo.filtering.pandas.paths import paths_filter
return paths_filter.apply_performance(log, path, parameters=parameters)
else:
from pm4py.algo.filtering.log.paths import paths_filter
return paths_filter.apply_performance(log, path, parameters=parameters)
def filter_variants_top_k(log: Union[EventLog, pd.DataFrame], k: int) -> Union[EventLog, pd.DataFrame]:
"""
Keeps the top-k variants of the log
Parameters
-------------
log
Event log
k
Number of variants that should be kept
parameters
Parameters
Returns
-------------
filtered_log
Filtered log
"""
general_checks_classical_event_log(log)
parameters = get_properties(log)
if check_is_pandas_dataframe(log):
check_pandas_dataframe_columns(log)
from pm4py.algo.filtering.pandas.variants import variants_filter
return variants_filter.filter_variants_top_k(log, k, parameters=parameters)
else:
from pm4py.algo.filtering.log.variants import variants_filter
return variants_filter.filter_variants_top_k(log, k, parameters=parameters)
def filter_variants_by_coverage_percentage(log: Union[EventLog, pd.DataFrame], min_coverage_percentage: float) -> Union[EventLog, pd.DataFrame]:
"""
Filters the variants of the log by a coverage percentage
(e.g., if min_coverage_percentage=0.4, and we have a log with 1000 cases,
of which 500 of the variant 1, 400 of the variant 2, and 100 of the variant 3,
the filter keeps only the traces of variant 1 and variant 2).
Parameters
---------------
log
Event log
min_coverage_percentage
Minimum allowed percentage of coverage
parameters
Parameters
Returns
---------------
filtered_log
Filtered log
"""
general_checks_classical_event_log(log)
parameters = get_properties(log)
if check_is_pandas_dataframe(log):
check_pandas_dataframe_columns(log)
from pm4py.algo.filtering.pandas.variants import variants_filter
return variants_filter.filter_variants_by_coverage_percentage(log, min_coverage_percentage, parameters=parameters)
else:
from pm4py.algo.filtering.log.variants import variants_filter
return variants_filter.filter_variants_by_coverage_percentage(log, min_coverage_percentage, parameters=parameters)
def filter_ocel_event_attribute(ocel: OCEL, attribute_key: str, attribute_values: Collection[Any], positive: bool = True) -> OCEL:
"""
Filters the object-centric event log on the provided event attributes values
Parameters
----------------
ocel
Object-centric event log
attribute_key
Attribute at the event level
attribute_values
Attribute values
positive
Decides if the values should be kept (positive=True) or removed (positive=False)
Returns
----------------
filtered_ocel
Filtered object-centric event log
"""
from pm4py.algo.filtering.ocel import event_attributes
return event_attributes.apply(ocel, attribute_values, parameters={event_attributes.Parameters.ATTRIBUTE_KEY: attribute_key, event_attributes.Parameters.POSITIVE: positive})
def filter_ocel_object_attribute(ocel: OCEL, attribute_key: str, attribute_values: Collection[Any], positive: bool = True) -> OCEL:
"""
Filters the object-centric event log on the provided object attributes values
Parameters
----------------
ocel
Object-centric event log
attribute_key
Attribute at the event level
attribute_values
Attribute values
positive
Decides if the values should be kept (positive=True) or removed (positive=False)
Returns
----------------
filtered_ocel
Filtered object-centric event log
"""
from pm4py.algo.filtering.ocel import object_attributes
return object_attributes.apply(ocel, attribute_values, parameters={object_attributes.Parameters.ATTRIBUTE_KEY: attribute_key, object_attributes.Parameters.POSITIVE: positive})
def filter_ocel_object_types_allowed_activities(ocel: OCEL, correspondence_dict: Dict[str, Collection[str]]) -> OCEL:
"""
Filters an object-centric event log keeping only the specified object types
with the specified activity set (filters out the rest).
Parameters
----------------
ocel
Object-centric event log
correspondence_dict
Dictionary containing, for every object type of interest, a
collection of allowed activities. Example:
{"order": ["Create Order"], "element": ["Create Order", "Create Delivery"]}
Keeps only the object types "order" and "element".
For the "order" object type, only the activity "Create Order" is kept.
For the "element" object type, only the activities "Create Order" and "Create Delivery" are kept.
Returns
-----------------
filtered_ocel
Filtered object-centric event log
"""
from pm4py.algo.filtering.ocel import activity_type_matching
return activity_type_matching.apply(ocel, correspondence_dict)
def filter_ocel_object_per_type_count(ocel: OCEL, min_num_obj_type: Dict[str, int]) -> OCEL:
"""
Filters the events of the object-centric logs which are related to at least
the specified amount of objects per type.
E.g. pm4py.filter_object_per_type_count(ocel, {"order": 1, "element": 2})
Would keep the following events:
ocel:eid ocel:timestamp ocel:activity ocel:type:element ocel:type:order
0 e1 1980-01-01 Create Order [i4, i1, i3, i2] [o1]
1 e11 1981-01-01 Create Order [i6, i5] [o2]
2 e14 1981-01-04 Create Order [i8, i7] [o3]
Parameters
------------------
ocel
Object-centric event log
min_num_obj_type
Minimum number of objects per type
Returns
-----------------
filtered_event_log
Filtered object-centric event log
"""
from pm4py.algo.filtering.ocel import objects_ot_count
return objects_ot_count.apply(ocel, min_num_obj_type)
def filter_ocel_start_events_per_object_type(ocel: OCEL, object_type: str) -> OCEL:
"""
Filters the events in which a new object for the given object type is spawn.
(E.g. an event with activity "Create Order" might spawn new orders).
Parameters
------------------
ocel
Object-centric event log
object_type
Object type to consider
Returns
------------------
filtered_ocel
Filtered object-centric event log
"""
from pm4py.algo.filtering.ocel import ot_endpoints
return ot_endpoints.filter_start_events_per_object_type(ocel, object_type)
def filter_ocel_end_events_per_object_type(ocel: OCEL, object_type: str) -> OCEL:
"""
Filters the events in which an object for the given object type terminates its lifecycle.
(E.g. an event with activity "Pay Order" might terminate an order).
Parameters
------------------
ocel
Object-centric event log
object_type
Object type to consider
Returns
------------------
filtered_ocel
Filtered object-centric event log
"""
from pm4py.algo.filtering.ocel import ot_endpoints
return ot_endpoints.filter_end_events_per_object_type(ocel, object_type)
def filter_ocel_events_timestamp(ocel: OCEL, min_timest: Union[datetime.datetime, str], max_timest: Union[datetime.datetime, str], timestamp_key: str = "ocel:timestamp") -> OCEL:
"""
Filters the object-centric event log keeping events in the provided timestamp range
Parameters
-----------------
ocel
Object-centric event log
min_timest
Left extreme of the allowed timestamp interval (provided in the format: YYYY-mm-dd HH:MM:SS)
max_timest
Right extreme of the allowed timestamp interval (provided in the format: YYYY-mm-dd HH:MM:SS)
timestamp_key
The attribute to use as timestamp (default: ocel:timestamp)
Returns
-----------------
filtered_ocel
Filtered object-centric event log
"""
from pm4py.algo.filtering.ocel import event_attributes
return event_attributes.apply_timestamp(ocel, min_timest, max_timest, parameters={"pm4py:param:timestamp_key": timestamp_key})
| 37.921969 | 189 | 0.676533 |
8d33c6c0fcf02badd2a9d2828f81a195e3107f1a | 142 | py | Python | python/epsilon.py | davxy/numeric | 1e8b44a72e1d570433a5ba81ae0795a750ce5921 | [
"Unlicense"
] | 2 | 2020-05-03T17:02:44.000Z | 2022-02-21T04:09:34.000Z | python/epsilon.py | davxy/numeric | 1e8b44a72e1d570433a5ba81ae0795a750ce5921 | [
"Unlicense"
] | null | null | null | python/epsilon.py | davxy/numeric | 1e8b44a72e1d570433a5ba81ae0795a750ce5921 | [
"Unlicense"
] | null | null | null | # Find machine precision
epsilon = 1.0
while (1.0 + 0.5 * epsilon) != 1.0:
epsilon = 0.5 * epsilon
print('Machine precision: ', epsilon)
| 20.285714 | 37 | 0.640845 |
3f7d8152a827f3873e1140b7b88885c43c4ac400 | 1,001 | py | Python | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/public_ip_address_sku_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 1 | 2022-03-30T22:39:15.000Z | 2022-03-30T22:39:15.000Z | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/public_ip_address_sku_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 54 | 2016-03-25T17:25:01.000Z | 2018-10-22T17:27:54.000Z | azure-mgmt-network/azure/mgmt/network/v2017_08_01/models/public_ip_address_sku_py3.py | JonathanGailliez/azure-sdk-for-python | f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b | [
"MIT"
] | 2 | 2017-01-20T18:25:46.000Z | 2017-05-12T21:31:47.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class PublicIPAddressSku(Model):
"""SKU of a public IP address.
:param name: Name of a public IP address SKU. Possible values include:
'Basic', 'Standard'
:type name: str or
~azure.mgmt.network.v2017_08_01.models.PublicIPAddressSkuName
"""
_attribute_map = {
'name': {'key': 'name', 'type': 'str'},
}
def __init__(self, *, name=None, **kwargs) -> None:
super(PublicIPAddressSku, self).__init__(**kwargs)
self.name = name
| 32.290323 | 76 | 0.582418 |
090f3a857def9dd221f75b995146a91b5b6ea2f4 | 7,440 | py | Python | benchmarks/f3_wrong_hints/scaling_ltl_infinite_state/7-extending_bound_25.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/f3_wrong_hints/scaling_ltl_infinite_state/7-extending_bound_25.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/f3_wrong_hints/scaling_ltl_infinite_state/7-extending_bound_25.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from typing import Tuple, FrozenSet
from collections import Iterable
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or
from mathsat import msat_make_leq, msat_make_equal
from mathsat import msat_make_number, msat_make_plus
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def check_ltl(menv: msat_env, enc: LTLEncoder) -> Tuple[Iterable, msat_term,
msat_term, msat_term]:
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
bool_type = msat_get_bool_type(menv)
real_type = msat_get_rational_type(menv)
i = msat_declare_function(menv, "i", real_type)
i = msat_make_constant(menv, i)
r = msat_declare_function(menv, "r", real_type)
r = msat_make_constant(menv, r)
l = msat_declare_function(menv, "l", real_type)
l = msat_make_constant(menv, l)
inc_i = msat_declare_function(menv, "inc_i", bool_type)
inc_i = msat_make_constant(menv, inc_i)
x_i = msat_declare_function(menv, name_next("i"), real_type)
x_i = msat_make_constant(menv, x_i)
x_r = msat_declare_function(menv, name_next("r"), real_type)
x_r = msat_make_constant(menv, x_r)
x_l = msat_declare_function(menv, name_next("l"), real_type)
x_l = msat_make_constant(menv, x_l)
x_inc_i = msat_declare_function(menv, name_next("inc_i"), bool_type)
x_inc_i = msat_make_constant(menv, x_inc_i)
curr2next = {i: x_i, r: x_r, l: x_l, inc_i: x_inc_i}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
r_gt_0 = msat_make_gt(menv, r, zero)
r_lt_l = msat_make_lt(menv, r, l)
i_geq_0 = msat_make_geq(menv, i, zero)
init = msat_make_and(menv, r_gt_0, r_lt_l)
init = msat_make_and(menv, init,
msat_make_and(menv, i_geq_0,
msat_make_not(menv, inc_i)))
init = msat_make_and(menv, init, msat_make_gt(menv, l, zero))
# r' = r
trans = msat_make_equal(menv, x_r, r)
# i < l -> ((inc_i' & i' = i + 1) | (!inc_i' & i' = i)) & l' = l
i_lt_l = msat_make_lt(menv, i, l)
x_i_eq_i_p_1 = msat_make_and(menv, x_inc_i,
msat_make_equal(menv, x_i,
msat_make_plus(menv, i, one)))
x_i_eq_i = msat_make_and(menv, msat_make_not(menv, x_inc_i),
msat_make_equal(menv, x_i, i))
x_i_eq_i_p_1_or_i = msat_make_or(menv, x_i_eq_i_p_1, x_i_eq_i)
x_l_eq_l = msat_make_equal(menv, x_l, l)
x_i_eq_i_p_1_or_i_and_x_l_eq_l = msat_make_and(menv, x_i_eq_i_p_1_or_i,
x_l_eq_l)
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_lt_l,
x_i_eq_i_p_1_or_i_and_x_l_eq_l))
# i >= l -> i' = 0 & l' = l + 1 & !inc_i'
i_geq_l = msat_make_geq(menv, i, l)
x_i_eq_0 = msat_make_equal(menv, x_i, zero)
x_l_eq_l_p_1 = msat_make_equal(menv, x_l, msat_make_plus(menv, l, one))
x_i_eq_0_and_x_l_eq_l_p_1 = msat_make_and(menv,
msat_make_and(menv, x_i_eq_0,
x_l_eq_l_p_1),
msat_make_not(menv, x_inc_i))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, i_geq_l,
x_i_eq_0_and_x_l_eq_l_p_1))
# (G F inc_i) -> ! G F r > i
G_F_x_i_gt_i = enc.make_G(enc.make_F(inc_i))
r_gt_i = msat_make_gt(menv, r, i)
n_G_F_r_gt_i = msat_make_not(menv, enc.make_G(enc.make_F(r_gt_i)))
ltl = msat_make_impl(menv, G_F_x_i_gt_i, n_G_F_r_gt_i)
return TermMap(curr2next), init, trans, ltl
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
i = mgr.Symbol("i", types.REAL)
r = mgr.Symbol("r", types.REAL)
l = mgr.Symbol("l", types.REAL)
inc_i = mgr.Symbol("inc_i", types.BOOL)
symbs = frozenset([i, r, l, inc_i])
x_i = symb_to_next(mgr, i)
x_r = symb_to_next(mgr, r)
x_l = symb_to_next(mgr, l)
x_inc_i = symb_to_next(mgr, inc_i)
res = []
n0 = mgr.Real(0)
n1 = mgr.Real(1)
stutter = mgr.Equals(x_i, i)
loc = Location(env, mgr.LE(i, n0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_i, mgr.Minus(i, n1)))
h_i = Hint("h_i1", env, frozenset([i]), symbs)
h_i.set_locs([loc])
res.append(h_i)
loc = Location(env, mgr.Not(inc_i))
loc.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc1", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc])
res.append(h_inc)
loc0 = Location(env, mgr.Not(inc_i))
loc0.set_progress(1, x_inc_i)
loc1 = Location(env, inc_i)
loc1.set_progress(0, mgr.Not(x_inc_i))
h_inc = Hint("h_inc2", env, frozenset([inc_i]), symbs)
h_inc.set_locs([loc0, loc1])
res.append(h_inc)
loc0 = Location(env, mgr.GE(i, n0), mgr.GE(l, n0),
stutterT=mgr.Equals(x_i, mgr.Plus(i, l)))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i3", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1])
res.append(h_i)
loc0 = Location(env, mgr.GE(r, n0), mgr.GE(i, n0),
stutterT=mgr.Equals(x_r, mgr.Plus(r, i)))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(0, mgr.Equals(x_r, mgr.Plus(r, n1)))
h_r = Hint("h_r3", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1])
res.append(h_r)
loc0 = Location(env, mgr.GE(i, n0))
loc0.set_progress(1, mgr.Equals(x_i, mgr.Plus(i, n1)))
loc1 = Location(env, mgr.GE(i, n0))
loc1.set_progress(2, mgr.Equals(x_i, i))
loc2 = Location(env, mgr.GE(i, n0))
loc2.set_progress(0, mgr.Equals(x_i, i))
h_i = Hint("h_i4", env, frozenset([i]), symbs)
h_i.set_locs([loc0, loc1, loc2])
res.append(h_i)
loc0 = Location(env, mgr.GE(r, n0))
loc0.set_progress(1, mgr.Equals(x_r, r))
loc1 = Location(env, mgr.GE(r, n0))
loc1.set_progress(2, mgr.Equals(x_r, mgr.Plus(r, n1)))
loc2 = Location(env, mgr.GE(r, n0))
loc2.set_progress(0, mgr.Equals(x_r, r))
h_r = Hint("h_r4", env, frozenset([r]), symbs)
h_r.set_locs([loc0, loc1, loc2])
res.append(h_r)
return frozenset(res)
| 37.2 | 89 | 0.62836 |
b1e02d9bd4ed7d475057aba106748da7117a98a3 | 600 | py | Python | IMDB_task6.py | amansharmma/IMDB-movie-scraper | d17b5598f8188675429979dfe06e455429aea84d | [
"MIT"
] | null | null | null | IMDB_task6.py | amansharmma/IMDB-movie-scraper | d17b5598f8188675429979dfe06e455429aea84d | [
"MIT"
] | null | null | null | IMDB_task6.py | amansharmma/IMDB-movie-scraper | d17b5598f8188675429979dfe06e455429aea84d | [
"MIT"
] | null | null | null | from pprint import pprint
from IMDB_task5 import movie_detail
# in this task we are taking a data, how many movies made same language par bani hei
# analyse of movie language
def analyse_movies_language():
movie_data= movie_detail()
count_of_language = {}
for one_movie_data in movie_data :
movie_language= (one_movie_data["Language"])
for one_movie_language in movie_language:
if one_movie_language not in count_of_language :
count_of_language[one_movie_language]=1
else :
count_of_language[one_movie_language]+=1
return (count_of_language)
pprint (analyse_movies_language())
| 35.294118 | 85 | 0.796667 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.