-
Notifications
You must be signed in to change notification settings - Fork 11
Expand file tree
/
Copy pathgraph_ops.cpp
More file actions
400 lines (344 loc) · 14.2 KB
/
graph_ops.cpp
File metadata and controls
400 lines (344 loc) · 14.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
/*****************************************************************************
Copyright (c) 2020 Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*****************************************************************************/
#include "rml/RadeonML.hpp"
#include "rml/RadeonML_utils.hpp"
#include <cstring>
#include <fstream>
#include <iostream>
#include <sstream>
#include <stdexcept>
#include <string>
#include <vector>
/*
* Create operation that holds information aboud input data
*
* @param graph - graph where operation is created
* @param name - unique operation name
* @param shape - shape of input tensor
* @return - created placeholder operation
*/
rml_op CreatePlaceholderOp(rml::Graph& graph,
const std::string& name,
const std::vector<uint32_t>& shape)
{
// Create placeholder operation
rml_op_desc input_desc = {RML_OP_PLACEHOLDER, name.c_str()};
input_desc.placeholder = {
RML_DTYPE_FLOAT32, RML_LAYOUT_NHWC, {shape[0], shape[1], shape[2], shape[3]}};
return graph.CreateOperation(input_desc);
}
/*
* Create operation that stores scalar data
*
* @param graph - graph where operation is created
* @param name - unique operation name
* @param dtype - tensor data type
* @param value - scalar value
* @return - created scalar operation
*/
template<typename T>
rml_op CreateScalarOp(rml::Graph& graph, const std::string& name, rml_dtype dtype, T value)
{
// Create constant operation
rml_op_desc op_desc = {RML_OP_CONST, name.c_str()};
op_desc.constant = {{dtype, RML_LAYOUT_SCALAR}, &value};
return graph.CreateOperation(op_desc);
}
/*
* Create specilalized unary operation
*
* @param graph - graph where operation is created
* @param name - unique operation name
* @param op_type - type of unary operation
* @param input - input operation
* @return - created unary operation
*/
rml_op CreateUnaryOp(rml::Graph& graph, const std::string& name, rml_op_type op_type, rml_op input)
{
// Create unary operation
rml_op_desc op_desc = {op_type, name.c_str()};
op_desc.unary = {input};
return graph.CreateOperation(op_desc);
}
/*
* Create specilalized binary operation
*
* @param graph - graph where operation is created
* @param name - unique operation name
* @param op_type - type of binary operation
* @param input1 - input1 operation
* @param input2 - input2 operation
* @return - created binary operation
*/
rml_op CreateBinaryOp(rml::Graph& graph,
const std::string& name,
rml_op_type op_type,
rml_op input1,
rml_op input2)
{
// Create binary operation
rml_op_desc op_desc = {op_type, name.c_str()};
op_desc.binary = {input1, input2};
return graph.CreateOperation(op_desc);
}
/*
* Create preprocessing graph and connect it with base graph
*
* @param graph - base graph to be connected with preprocesssing graph
* @param input_names - list of unique input names of preprocessing graph
* @param input_shapes - list of input shapes of preprocessing graph
* @return - connected graph
*/
rml::Graph ConnectPreprocessingGraph(const rml::Graph& graph,
const std::vector<std::string>& input_names,
const std::vector<std::vector<uint32_t>>& input_shapes)
{
// Preprocessing graph includes exponential tone-mapping
// ldr_color = beta - exp(alpha * hdr_color)
// alpha = -1.0
// beta = 1.0
auto preprocess_graph = rml::CreateGraph();
// Create placeholder per input
rml_op color_op = CreatePlaceholderOp(preprocess_graph, input_names[0], input_shapes[0]);
rml_op albedo_op = CreatePlaceholderOp(preprocess_graph, input_names[1], input_shapes[1]);
rml_op depth_op = CreatePlaceholderOp(preprocess_graph, input_names[2], input_shapes[2]);
rml_op normal_op = CreatePlaceholderOp(preprocess_graph, input_names[3], input_shapes[3]);
// Create alpha
rml_op alpha_op = CreateScalarOp<float>(preprocess_graph, "alpha", RML_DTYPE_FLOAT32, -1.0f);
// Create beta
rml_op beta_op = CreateScalarOp<float>(preprocess_graph, "beta", RML_DTYPE_FLOAT32, 1.0f);
// Create mul, exp and sub opearions
rml_op mul_op = CreateBinaryOp(preprocess_graph, "mul", RML_OP_MUL, alpha_op, color_op);
rml_op exp_op = CreateUnaryOp(preprocess_graph, "exp", RML_OP_EXP, mul_op);
rml_op sub_op = CreateBinaryOp(preprocess_graph, "sub", RML_OP_SUB, beta_op, exp_op);
// Create axis for concatenation
rml_op axis_op = CreateScalarOp<int32_t>(preprocess_graph, "concat/axis", RML_DTYPE_INT32, -1);
// Create inputs for concatenation
std::vector<rml_op> inputs = {sub_op, albedo_op, depth_op, normal_op};
// Concatenate tone-mapped color with albedo, depth and normal
rml_op_desc concat_desc = {RML_OP_CONCAT, "concat"};
concat_desc.concat = {inputs.size(), inputs.data(), axis_op};
preprocess_graph.CreateOperation(concat_desc);
// Get tail graph inputs
std::vector<const char*> tail_inputs = graph.GetInputNames();
// Get head graph outputs
std::vector<const char*> head_outputs = preprocess_graph.GetOutputNames();
// Connect preprocessing graph with base graph
return rml::ConnectGraphs(preprocess_graph, graph, 1, &head_outputs[0], &tail_inputs[0]);
}
/*
* Create postprocessing graph and connect it with base graph
*
* @param graph - base graph to be connected with postprocessing graph
* @param input_name - unique input name of postprocessing graph
* @param input_shape - input shape of postprocessing graph
* @return connected graph
*/
rml::Graph ConnectPostprocessingGraph(rml::Graph& graph,
const std::string& input_name,
const std::vector<uint32_t>& input_shape)
{
// Postprocessing graph includes gamma-correction
// ldr_color = (clip(ldr_color, 0, 1)) ^ (gamma)
// gamma = 0.4
auto postprocess_graph = rml::CreateGraph();
// Create placeholder for color
rml_op input_op = CreatePlaceholderOp(postprocess_graph, input_name, input_shape);
// Clip color
rml_op_desc clip_desc = {RML_OP_CLIP, "clip"};
clip_desc.clip = {input_op, 0.f, 1.f};
rml_op clip_op = postprocess_graph.CreateOperation(clip_desc);
// Create gamma
rml_op gamma_op = CreateScalarOp<float>(postprocess_graph, "gamma", RML_DTYPE_FLOAT32, 0.4f);
// Create pow operation
CreateBinaryOp(postprocess_graph, "pow", RML_OP_POW, clip_op, gamma_op);
// Get tail graph inputs
std::vector<const char*> tail_inputs = postprocess_graph.GetInputNames();
// Get head graph outputs
std::vector<const char*> head_outputs = graph.GetOutputNames();
// Connect base graph with postprocessing graph
return rml::ConnectGraphs(graph, postprocess_graph, 1, &head_outputs[0], &tail_inputs[0]);
}
/*
* Read input from file
*
* @param input_file - name of input file
* @return - string content of file
*/
std::string ReadInput(const std::string& input_file)
{
std::istream* input_stream;
std::ifstream input_file_stream;
if (input_file.empty())
{
freopen(nullptr, "rb", stdin);
input_stream = &std::cin;
std::cout << "Reading data from stdin...\n";
}
else
{
input_file_stream.open(input_file, std::ios_base::binary);
if (input_file_stream.fail())
{
throw std::runtime_error(std::string("Error reading ") + input_file);
}
input_stream = &input_file_stream;
std::cout << "Reading data from file: " << input_file << "\n";
}
std::ostringstream stream;
stream << input_stream->rdbuf();
auto input = stream.str();
std::cout << "Input data size: " << input.size() << " bytes\n";
return input;
}
/*
* Write output to file
*
* @param output_file - name of output file
* @param output - output data
*/
void WriteOutput(const std::string& output_file, const std::string& output)
{
std::cout << "Output data size: " << output.size() << " bytes\n";
std::ostream* output_stream;
std::ofstream output_file_stream;
if (output_file.empty())
{
freopen(nullptr, "wb", stdout);
output_stream = &std::cout;
std::cout << "Writing result to stdout\n";
}
else
{
output_file_stream.open(output_file, std::ios_base::binary);
if (output_file_stream.fail())
{
throw std::runtime_error(std::string("Error writing ") + output_file);
}
output_stream = &output_file_stream;
std::cout << "Writing result to file: " << output_file << "\n";
}
output_stream->write(output.data(), output.size());
}
/*
* This sample demonstrates how ldr-denosier could be converted to hdr-denoiser
* using tone-mapping as preprocessing and gamma-correction as postprocessing
*/
int main() try
{
// Set model path
#if defined(_WIN32)
std::wstring model_path(L"path/model");
#else
std::string model_path("path/model");
#endif
// Set input files
const std::vector<std::string> input_files = {
"path/color",
"path/albedo",
"path/depth",
"path/normal",
};
// Set output file
const std::string output_file = "path/output";
// Set input names
const std::vector<std::string> input_names = {"hdr-color", "albedo", "depth", "normal"};
// Set input shapes
const std::vector<std::vector<uint32_t>> input_shapes = {
{1, 600, 800, 3},
{1, 600, 800, 3},
{1, 600, 800, 1},
{1, 600, 800, 2},
};
// Create a context
// The handles are released automatically upon scope exit
rml::Context context = rml::CreateDefaultContext();
// Load model as a mutable graph
// model input - 9-channel 800x600 image (3-channel hdr-color,
// 3-channel albedo,
// 1-channel depth,
// 2-channel normal)
// model output - 3-channel 800x600 ldr image
// The handles are released automatically upon scope exit
rml::Graph graph =
rml::LoadGraphFromFile(std::basic_string<rml_char>(model_path.begin(), model_path.end()));
// Add preprocessing of base model inputs
// Before we can use ldr-denoiser for hdr-data, we should adjust hdr-color
// using tone-mapping and concatenate it with albedo, depth and normal
graph = ConnectPreprocessingGraph(graph, input_names, input_shapes);
// Add postprocessing of base model outputs
// We should also apply gamma-correction for denoised image
graph = ConnectPostprocessingGraph(graph, "input", input_shapes[0]);
// Create immutable model from connected graphs
// The handles are released automatically upon scope exit
rml::Model model = context.CreateModel(graph);
// Set up input info
std::vector<rml_tensor_info> input_infos;
for (size_t i = 0; i < input_shapes.size(); i++)
{
rml_tensor_info input_info = {RML_DTYPE_FLOAT32, RML_LAYOUT_NHWC};
std::memcpy(input_info.shape,
input_shapes[i].data(),
std::min(input_shapes[i].size(), RML_TENSOR_MAX_RANK) * sizeof(uint32_t));
input_infos.push_back(input_info);
std::cout << "Input" << i << ": " << input_info << std::endl;
model.SetInputInfo(input_names[i], input_info);
}
// Check memory info
rml_memory_info memory_info = model.GetMemoryInfo();
std::cout << "Memory allocated: " << memory_info.gpu_total << std::endl;
// Create and fill the input tensors
std::vector<rml::Tensor> inputs;
// The handles are released automatically upon scope exit
for (size_t i = 0; i < input_shapes.size(); i++)
{
rml::Tensor input;
input = context.CreateTensor(input_infos[i], RML_ACCESS_MODE_WRITE_ONLY);
input.Write(ReadInput(input_files[i]));
inputs.push_back(std::move(input));
}
// Set model inputs
for (size_t i = 0; i < inputs.size(); i++)
{
model.SetInput(input_names[i], inputs[i]);
}
// Get output tensor information
auto output_info = model.GetOutputInfo();
std::cout << "Output: " << output_info << std::endl;
// Create the output tensor
// The handles are released automatically upon scope exit
auto output_tensor = context.CreateTensor(output_info, RML_ACCESS_MODE_READ_ONLY);
// Set model output
model.SetOutput(output_tensor);
// Run the inference
model.Infer();
// Get data from output tensor
size_t output_size;
void* output_data = output_tensor.Map(&output_size);
// Unmap output data
const std::string output(static_cast<char*>(output_data), output_size);
output_tensor.Unmap(output_data);
// Write the output
WriteOutput(output_file, output);
}
catch (const std::exception& e)
{
std::cerr << e.what() << std::endl;
return 1;
}