Skip to content

Commit 768cf30

Browse files
author
Quentin Berthet
committed
Apply pre-commit suggested changes (formating)
1 parent f3d4090 commit 768cf30

15 files changed

+467
-594
lines changed

hls4ml/backends/vitis_accelerator/supported_boards.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,4 +23,4 @@
2323
"platform": "xilinx_vck5000_gen4x8_qdma_2_202220_1",
2424
"memory":{"type": "ddr", "channels": 3, "capacity": 12}
2525
}
26-
}
26+
}

hls4ml/backends/vitis_accelerator/vitis_accelerator_backend.py

Lines changed: 16 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import os
2-
import sys
32
import subprocess
3+
import sys
4+
45
import numpy as np
56

67
from hls4ml.backends import VitisBackend, VivadoBackend
@@ -23,7 +24,7 @@ def create_initial_config(
2324
num_thread=1,
2425
batchsize=8192,
2526
hw_quant=False,
26-
vivado_directives=[],
27+
vivado_directives=None,
2728
):
2829
"""
2930
Create initial accelerator config with default parameters
@@ -64,15 +65,18 @@ def build(
6465
if "linux" in sys.platform:
6566
if "XILINX_VITIS" not in os.environ:
6667
raise Exception(
67-
"XILINX_VITIS environmental variable missing. Please install XRT and Vitis, and run the setup scripts before building"
68+
"XILINX_VITIS environmental variable missing."
69+
" Please install XRT and Vitis, and run the setup scripts before building"
6870
)
6971
if "XILINX_XRT" not in os.environ:
7072
raise Exception(
71-
"XILINX_XRT environmental variable missing. Please install XRT and Vitis, and run the setup scripts before building"
73+
"XILINX_XRT environmental variable missing."
74+
" Please install XRT and Vitis, and run the setup scripts before building"
7275
)
7376
if "XILINX_VIVADO" not in os.environ:
7477
raise Exception(
75-
"XILINX_VIVADO environmental variable missing. Please install XRT and Vitis, and run the setup scripts before building"
78+
"XILINX_VIVADO environmental variable missing."
79+
" Please install XRT and Vitis, and run the setup scripts before building"
7680
)
7781

7882
curr_dir = os.getcwd()
@@ -106,14 +110,10 @@ def build(
106110
command = "make " + target
107111

108112
# Pre-loading libudev
109-
ldconfig_output = subprocess.check_output(["ldconfig", "-p"]).decode(
110-
"utf-8"
111-
)
113+
ldconfig_output = subprocess.check_output(["ldconfig", "-p"]).decode("utf-8")
112114
for line in ldconfig_output.split("\n"):
113115
if "libudev.so" in line and "x86" in line:
114-
command = (
115-
"LD_PRELOAD=" + line.split("=>")[1].strip() + " " + command
116-
)
116+
command = "LD_PRELOAD=" + line.split("=>")[1].strip() + " " + command
117117
break
118118
os.system(command)
119119

@@ -129,9 +129,7 @@ def numpy_to_dat(self, model, x):
129129
expected_shape = model.get_input_variables()[0].size()
130130
actual_shape = np.prod(x.shape[1:])
131131
if expected_shape != actual_shape:
132-
raise Exception(
133-
f"Input shape mismatch, got {x.shape}, expected (_, {expected_shape})"
134-
)
132+
raise Exception(f"Input shape mismatch, got {x.shape}, expected (_, {expected_shape})")
135133

136134
# Write to tb_data/tb_input_features.dat
137135
samples = x.reshape(x.shape[0], -1)
@@ -175,18 +173,10 @@ def _register_flows(self):
175173
)
176174

177175
writer_passes = ["make_stamp", "vitisaccelerator:write_hls"]
178-
self._writer_flow = register_flow(
179-
"write", writer_passes, requires=["vitis:ip"], backend=self.name
180-
)
176+
self._writer_flow = register_flow("write", writer_passes, requires=["vitis:ip"], backend=self.name)
181177

182178
ip_flow_requirements = get_flow("vivado:ip").requires.copy()
183-
ip_flow_requirements.insert(
184-
ip_flow_requirements.index("vivado:init_layers"), validation_flow
185-
)
186-
ip_flow_requirements.insert(
187-
ip_flow_requirements.index("vivado:apply_templates"), template_flow
188-
)
179+
ip_flow_requirements.insert(ip_flow_requirements.index("vivado:init_layers"), validation_flow)
180+
ip_flow_requirements.insert(ip_flow_requirements.index("vivado:apply_templates"), template_flow)
189181

190-
self._default_flow = register_flow(
191-
"ip", None, requires=ip_flow_requirements, backend=self.name
192-
)
182+
self._default_flow = register_flow("ip", None, requires=ip_flow_requirements, backend=self.name)

hls4ml/backends/vitis_accelerator/vitis_accelerator_config.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,7 @@ def __init__(self, config):
1111
raise Exception("Missing AcceleratorConfig")
1212

1313
self.board = accel_config.get("Board", "alveo-u55c")
14-
self.supported_boards = json.load(
15-
open(os.path.dirname(__file__) + "/supported_boards.json")
16-
)
14+
self.supported_boards = json.load(open(os.path.dirname(__file__) + "/supported_boards.json"))
1715
if self.board in self.supported_boards.keys():
1816
board_info = self.supported_boards[self.board]
1917
self.board_type = board_info["board_type"]

hls4ml/backends/vitis_accelerator/vivado_directives.json

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -131,7 +131,7 @@
131131
}
132132
}
133133
}
134-
}
134+
}
135135
}
136-
}
137-
}
136+
}
137+
}
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,2 @@
11
config_interface -m_axi_auto_max_ports=true
2-
config_interface -m_axi_offset slave
2+
config_interface -m_axi_offset slave

hls4ml/templates/vitis_accelerator/kernel_wrapper.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,4 +7,4 @@
77

88
// hls-fpga-machine-learning accelerator io
99

10-
#endif
10+
#endif
Lines changed: 25 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -1,38 +1,39 @@
1-
#include "kernel_wrapper.h"
21
#include "firmware/myproject.h"
2+
#include "kernel_wrapper.h"
33

44
static void read_input(const /*IN_INTERFACE_TYPE*/ *in, in_buffer_t (&in_buf)[BATCHSIZE][DATA_SIZE_IN]) {
5-
for (int i = 0; i < BATCHSIZE; i++) {
6-
#pragma HLS PIPELINE
7-
for(int j = 0; j < DATA_SIZE_IN; j++) {
8-
#pragma HLS UNROLL
9-
in_buf[i][j] = /*IN_HW_QUANT*/in[i * DATA_SIZE_IN + j];
10-
}
5+
for (int i = 0; i < BATCHSIZE; i++) {
6+
#pragma HLS PIPELINE
7+
for (int j = 0; j < DATA_SIZE_IN; j++) {
8+
#pragma HLS UNROLL
9+
in_buf[i][j] = /*IN_HW_QUANT*/ in[i * DATA_SIZE_IN + j];
10+
}
1111
}
1212
}
13-
static void run_inference(in_buffer_t (&in_buf)[BATCHSIZE][DATA_SIZE_IN], out_buffer_t (&out_buf)[BATCHSIZE][DATA_SIZE_OUT]) {
14-
for (int i = 0; i < BATCHSIZE; i++) {
15-
#pragma HLS DATAFLOW
16-
myproject(in_buf[i],out_buf[i]);
13+
static void run_inference(in_buffer_t (&in_buf)[BATCHSIZE][DATA_SIZE_IN],
14+
out_buffer_t (&out_buf)[BATCHSIZE][DATA_SIZE_OUT]) {
15+
for (int i = 0; i < BATCHSIZE; i++) {
16+
#pragma HLS DATAFLOW
17+
myproject(in_buf[i], out_buf[i]);
1718
}
1819
}
1920
static void write_result(/*OUT_INTERFACE_TYPE*/ *out, out_buffer_t (&out_buf)[BATCHSIZE][DATA_SIZE_OUT]) {
20-
for (int i = 0; i < BATCHSIZE; i++) {
21-
#pragma HLS PIPELINE
22-
for (int j = 0; j < DATA_SIZE_OUT; j++) {
23-
#pragma HLS UNROLL
24-
out[i * DATA_SIZE_OUT + j] = /*OUT_HW_QUANT*/out_buf[i][j];
21+
for (int i = 0; i < BATCHSIZE; i++) {
22+
#pragma HLS PIPELINE
23+
for (int j = 0; j < DATA_SIZE_OUT; j++) {
24+
#pragma HLS UNROLL
25+
out[i * DATA_SIZE_OUT + j] = /*OUT_HW_QUANT*/ out_buf[i][j];
26+
}
2527
}
26-
}
2728
}
2829

2930
extern "C" {
30-
/**
31-
\brief HLS4ML Kernel Implementation
32-
\param in Input Vector
33-
\param out Output Vector
31+
/**
32+
\brief HLS4ML Kernel Implementation
33+
\param in Input Vector
34+
\param out Output Vector
3435
*/
35-
void kernel_wrapper(const /*IN_INTERFACE_TYPE*/ *in, /*OUT_INTERFACE_TYPE*/ *out) {
36+
void kernel_wrapper(const /*IN_INTERFACE_TYPE*/ *in, /*OUT_INTERFACE_TYPE*/ *out) {
3637
in_buffer_t in_buf[BATCHSIZE][DATA_SIZE_IN];
3738
out_buffer_t out_buf[BATCHSIZE][DATA_SIZE_OUT];
3839
#pragma HLS ARRAY_RESHAPE variable=in_buf complete dim=2
@@ -42,5 +43,5 @@ extern "C" {
4243
read_input(in, in_buf);
4344
run_inference(in_buf, out_buf);
4445
write_result(out, out_buf);
45-
}
46-
}
46+
}
47+
}
Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1,43 +1,43 @@
1-
#include "kernel_wrapper.h"
21
#include "firmware/myproject.h"
2+
#include "kernel_wrapper.h"
33

44
static void read_input(const /*IN_INTERFACE_TYPE*/ *in, hls::stream<input_t> &input, int n) {
5-
for (int i = 0; i < DATA_SIZE_IN; i++) {
6-
#pragma HLS PIPELINE
7-
input_t tmp;
8-
for (int j = 0; j < NNET_ARRAY_DEPTH; j++) {
9-
#pragma HLS UNROLL
10-
tmp[j] = /*IN_HW_QUANT*/in[(n * DATA_SIZE_IN * NNET_ARRAY_DEPTH) + (i * NNET_ARRAY_DEPTH) + j];
5+
for (int i = 0; i < DATA_SIZE_IN; i++) {
6+
#pragma HLS PIPELINE
7+
input_t tmp;
8+
for (int j = 0; j < NNET_ARRAY_DEPTH; j++) {
9+
#pragma HLS UNROLL
10+
tmp[j] = /*IN_HW_QUANT*/ in[(n * DATA_SIZE_IN * NNET_ARRAY_DEPTH) + (i * NNET_ARRAY_DEPTH) + j];
11+
}
12+
input << tmp;
1113
}
12-
input << tmp;
13-
}
1414
}
1515

1616
static void write_result(/*OUT_INTERFACE_TYPE*/ *out, hls::stream<result_t> &output, int n) {
17-
result_t tmp = output.read();
18-
for (int i = 0; i < DATA_SIZE_OUT; i++) {
19-
#pragma HLS UNROLL
20-
out[(n * DATA_SIZE_OUT) + i] = /*OUT_HW_QUANT*/tmp[i];
21-
}
17+
result_t tmp = output.read();
18+
for (int i = 0; i < DATA_SIZE_OUT; i++) {
19+
#pragma HLS UNROLL
20+
out[(n * DATA_SIZE_OUT) + i] = /*OUT_HW_QUANT*/ tmp[i];
21+
}
2222
}
2323

2424
extern "C" {
25-
/**
26-
\brief HLS4ML Kernel Implementation
27-
\param in Input Vector
28-
\param out Output Vector
25+
/**
26+
\brief HLS4ML Kernel Implementation
27+
\param in Input Vector
28+
\param out Output Vector
2929
*/
30-
void kernel_wrapper(const /*IN_INTERFACE_TYPE*/ *in, /*OUT_INTERFACE_TYPE*/ *out) {
30+
void kernel_wrapper(const /*IN_INTERFACE_TYPE*/ *in, /*OUT_INTERFACE_TYPE*/ *out) {
3131
hls::stream<input_t> input("input");
3232
hls::stream<result_t> output("output");
3333
#pragma HLS STREAM variable=input depth=DATA_SIZE_IN
3434
#pragma HLS STREAM variable=output depth=1
35-
35+
3636
for (int n = 0; n < BATCHSIZE; n++) {
37-
#pragma HLS DATAFLOW
38-
read_input(in, input, n);
39-
myproject(input, output);
40-
write_result(out, output, n);
37+
#pragma HLS DATAFLOW
38+
read_input(in, input, n);
39+
myproject(input, output);
40+
write_result(out, output, n);
4141
}
42-
}
43-
}
42+
}
43+
}

hls4ml/templates/vitis_accelerator/libs/DdrFpga.hpp

Lines changed: 12 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -2,37 +2,32 @@
22

33
#include "FpgaObj.hpp"
44

5-
template <class V, class W>
6-
class DdrFpga : public FpgaObj<V, W> {
7-
public:
5+
template <class V, class W> class DdrFpga : public FpgaObj<V, W> {
6+
public:
87
DdrFpga(int kernInputSize, int kernOutputSize, int numCU, int numThreads, int numEpochs)
9-
: FpgaObj<V, W>(kernInputSize, kernOutputSize, numCU, numThreads, numEpochs) {
10-
}
8+
: FpgaObj<V, W>(kernInputSize, kernOutputSize, numCU, numThreads, numEpochs) {}
119

1210
void allocateHostMemory(int chan_per_port) {
1311
// Creating Buffer objects in Host memory
1412
/* ***NOTE*** When creating a Buffer with user pointer (CL_MEM_USE_HOST_PTR), under the hood, user pointer
1513
is used if it is properly aligned. when not aligned, runtime has no choice but to create
1614
its own host side Buffer. So it is recommended to use this allocator if user wishes to
17-
create Buffer using CL_MEM_USE_HOST_PTR to align user buffer to page boundary. It will
15+
create Buffer using CL_MEM_USE_HOST_PTR to align user buffer to page boundary. It will
1816
ensure that user buffer is used when user creates Buffer/Mem object with CL_MEM_USE_HOST_PTR */
1917
size_t vector_size_in_bytes = sizeof(V) * this->_kernInputSize;
2018
size_t vector_size_out_bytes = sizeof(W) * this->_kernOutputSize;
2119
for (int ib = 0; ib < this->_numThreads; ib++) {
2220
for (int ik = 0; ik < this->_numCU; ik++) {
23-
cl::Buffer buffer_in_tmp(this->context,
24-
CL_MEM_USE_HOST_PTR | CL_MEM_READ_ONLY,
25-
vector_size_in_bytes,
26-
this->source_in.data() + ((ib*this->_numCU + ik) * this->_kernInputSize));
27-
cl::Buffer buffer_out_tmp(this->context,
28-
CL_MEM_USE_HOST_PTR | CL_MEM_WRITE_ONLY,
29-
vector_size_out_bytes,
30-
this->source_hw_results.data() + ((ib*this->_numCU + ik) * this->_kernOutputSize));
21+
cl::Buffer buffer_in_tmp(this->context, CL_MEM_USE_HOST_PTR | CL_MEM_READ_ONLY, vector_size_in_bytes,
22+
this->source_in.data() + ((ib * this->_numCU + ik) * this->_kernInputSize));
23+
cl::Buffer buffer_out_tmp(this->context, CL_MEM_USE_HOST_PTR | CL_MEM_WRITE_ONLY, vector_size_out_bytes,
24+
this->source_hw_results.data() +
25+
((ib * this->_numCU + ik) * this->_kernOutputSize));
3126
this->buffer_in.push_back(buffer_in_tmp);
3227
this->buffer_out.push_back(buffer_out_tmp);
33-
this->krnl_xil[ib*this->_numCU + ik].setArg(0, this->buffer_in[ib*this->_numCU + ik]);
34-
this->krnl_xil[ib*this->_numCU + ik].setArg(1, this->buffer_out[ib*this->_numCU + ik]);
28+
this->krnl_xil[ib * this->_numCU + ik].setArg(0, this->buffer_in[ib * this->_numCU + ik]);
29+
this->krnl_xil[ib * this->_numCU + ik].setArg(1, this->buffer_out[ib * this->_numCU + ik]);
3530
}
3631
}
3732
}
38-
};
33+
};

0 commit comments

Comments
 (0)