Skip to content

Commit 5fddd99

Browse files
committed
move TEST from test_matrixCompare.cpp to cross_map_normal_op_test.cpp
1 parent 148bd4d commit 5fddd99

File tree

6 files changed

+221
-154
lines changed

6 files changed

+221
-154
lines changed

cmake/util.cmake

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -107,6 +107,7 @@ function(link_paddle_exe TARGET_NAME)
107107
paddle_parameter
108108
paddle_proto
109109
paddle_cuda
110+
paddle_test_main
110111
${METRIC_LIBS}
111112
${PROTOBUF_LIBRARY}
112113
${LIBGLOG_LIBRARY}

paddle/function/CMakeLists.txt

Lines changed: 25 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,12 +1,27 @@
1-
file(GLOB FUNCTION_HEADERS . *.h)
2-
3-
if(NOT WITH_GPU)
4-
file(GLOB FUNCTION_SOURCES . *.cpp)
5-
add_library(paddle_function STATIC ${FUNCTION_SOURCES})
6-
else()
7-
file(GLOB FUNCTION_SOURCES . *.cpp *.cu)
8-
cuda_add_library(paddle_function ${FUNCTION_SOURCES})
1+
file(GLOB h_files . *_op.h)
2+
file(GLOB cpp_files . *_op.cpp)
3+
4+
list(APPEND h_files Function.h)
5+
list(APPEND cpp_files Function.cpp)
6+
7+
if(WITH_GPU)
8+
file(GLOB cu_files . *_op_gpu.cu)
9+
cuda_compile(cu_objs ${cu_files})
910
endif()
1011

11-
add_style_check_target(paddle_function ${FUNCTION_SOURCES})
12-
add_style_check_target(paddle_function ${FUNCTION_HEADERS})
12+
add_library(paddle_function STATIC ${cpp_files} ${cu_objs})
13+
14+
add_library(paddle_test_main STATIC TestMain.cpp)
15+
16+
if(WITH_GPU)
17+
# TODO:
18+
# file(GLOB test_files . *_op_test.cpp)
19+
# add_executable(${test_bin} EXCLUDE_FROM_ALL ${test_files})
20+
add_simple_unittest(cross_map_normal_op_test)
21+
endif()
22+
23+
add_style_check_target(paddle_function ${h_files})
24+
add_style_check_target(paddle_function ${cpp_files})
25+
if(WITH_GPU)
26+
add_style_check_target(paddle_function ${cu_files})
27+
endif()

paddle/function/FunctionTest.h

Lines changed: 102 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,102 @@
1+
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#include "Function.h"
16+
#include "paddle/math/Vector.h"
17+
#include "paddle/math/tests/TensorCheck.h"
18+
19+
namespace paddle {
20+
21+
class FunctionCompare {
22+
public:
23+
FunctionCompare(const std::string& name, const FuncConfig& config)
24+
: cpu(FunctionBase::funcRegistrar_.createByType(name + "-CPU")),
25+
gpu(FunctionBase::funcRegistrar_.createByType(name + "-GPU")) {
26+
cpu->init(config);
27+
gpu->init(config);
28+
}
29+
30+
void cmpWithArg(const Arguments& inputs,
31+
const Arguments& outputs,
32+
const Arguments& inouts) {
33+
// init cpu and gpu arguments
34+
auto initArgs = [=](
35+
Arguments& cpuArgs, Arguments& gpuArgs, const Arguments& inArgs) {
36+
for (auto arg : inArgs) {
37+
size_t size = sizeof(real);
38+
for (auto dim : arg.dims_) {
39+
size *= dim;
40+
}
41+
cpuMemory.emplace_back(std::make_shared<CpuMemoryHandle>(size));
42+
gpuMemory.emplace_back(std::make_shared<GpuMemoryHandle>(size));
43+
cpuArgs.emplace_back(
44+
Tensor((real*)cpuMemory.back()->getBuf(), arg.dims_));
45+
gpuArgs.emplace_back(
46+
Tensor((real*)gpuMemory.back()->getBuf(), arg.dims_));
47+
48+
// will use an api to refactor this code.
49+
CpuVector cpuVector(size / sizeof(real),
50+
(real*)cpuArgs.back().getData());
51+
GpuVector gpuVector(size / sizeof(real),
52+
(real*)gpuArgs.back().getData());
53+
cpuVector.uniform(0.001, 1);
54+
gpuVector.copyFrom(cpuVector);
55+
}
56+
};
57+
initArgs(cpuInputs, gpuInputs, inputs);
58+
initArgs(cpuOutputs, gpuOutputs, outputs);
59+
initArgs(cpuInouts, gpuInouts, inouts);
60+
61+
// function calculate
62+
cpu->calc(cpuInputs, cpuOutputs, cpuInouts);
63+
gpu->calc(gpuInputs, gpuOutputs, gpuInouts);
64+
65+
// check outputs and inouts
66+
auto checkArgs = [=](const Arguments& cpuArgs, const Arguments& gpuArgs) {
67+
for (size_t i = 0; i < cpuArgs.size(); i++) {
68+
auto cpu = cpuArgs[i];
69+
auto gpu = gpuArgs[i];
70+
size_t size = 1;
71+
for (auto dim : cpu.dims_) {
72+
size *= dim;
73+
}
74+
CpuVector cpuVector(size, (real*)cpu.getData());
75+
GpuVector gpuVector(size, (real*)gpu.getData());
76+
77+
autotest::TensorCheckErr(cpuVector, gpuVector);
78+
}
79+
};
80+
checkArgs(cpuOutputs, gpuOutputs);
81+
checkArgs(cpuInouts, gpuInouts);
82+
}
83+
84+
protected:
85+
std::shared_ptr<FunctionBase> cpu;
86+
std::shared_ptr<FunctionBase> gpu;
87+
std::vector<CpuMemHandlePtr> cpuMemory;
88+
std::vector<GpuMemHandlePtr> gpuMemory;
89+
Arguments cpuInputs;
90+
Arguments cpuOutputs;
91+
Arguments cpuInouts;
92+
Arguments gpuInputs;
93+
Arguments gpuOutputs;
94+
Arguments gpuInouts;
95+
};
96+
97+
} // namespace paddle
98+
99+
using paddle::FunctionCompare;
100+
using paddle::FuncConfig;
101+
using paddle::Dims;
102+
using paddle::Tensor;

paddle/function/TestMain.cpp

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,22 @@
1+
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#include <gtest/gtest.h>
16+
#include "paddle/utils/Util.h"
17+
18+
int main(int argc, char** argv) {
19+
testing::InitGoogleTest(&argc, argv);
20+
paddle::initMain(argc, argv);
21+
return RUN_ALL_TESTS();
22+
}
Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
2+
3+
Licensed under the Apache License, Version 2.0 (the "License");
4+
you may not use this file except in compliance with the License.
5+
You may obtain a copy of the License at
6+
7+
http://www.apache.org/licenses/LICENSE-2.0
8+
9+
Unless required by applicable law or agreed to in writing, software
10+
distributed under the License is distributed on an "AS IS" BASIS,
11+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12+
See the License for the specific language governing permissions and
13+
limitations under the License. */
14+
15+
#include <gtest/gtest.h>
16+
#include "FunctionTest.h"
17+
18+
TEST(CrossMapNormal, real) {
19+
for (size_t numSamples : {5, 32}) {
20+
for (size_t channels : {1, 5, 32}) {
21+
for (size_t imgSizeH : {5, 33, 100}) {
22+
for (size_t imgSizeW : {5, 32, 96}) {
23+
for (size_t size : {1, 2, 3, 5, 7}) {
24+
VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
25+
<< " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW
26+
<< " size=" << size;
27+
28+
FunctionCompare compare("CrossMapNormal",
29+
FuncConfig()
30+
.set("size", size)
31+
.set("scale", (real)1.5)
32+
.set("pow", (real)0.5));
33+
Dims dims{numSamples, channels, imgSizeH, imgSizeW};
34+
compare.cmpWithArg({Tensor(nullptr, dims)},
35+
{Tensor(nullptr, dims), Tensor(nullptr, dims)},
36+
{});
37+
}
38+
}
39+
}
40+
}
41+
}
42+
}
43+
44+
TEST(CrossMapNormalGrad, real) {
45+
for (size_t numSamples : {5, 32}) {
46+
for (size_t channels : {1, 5, 32}) {
47+
for (size_t imgSizeH : {5, 33, 100}) {
48+
for (size_t imgSizeW : {5, 32, 96}) {
49+
for (size_t size : {1, 2, 3, 5, 7}) {
50+
VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
51+
<< " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW
52+
<< " size=" << size;
53+
54+
FunctionCompare compare("CrossMapNormalGrad",
55+
FuncConfig()
56+
.set("size", size)
57+
.set("scale", (real)1.5)
58+
.set("pow", (real)0.5));
59+
Dims dims{numSamples, channels, imgSizeH, imgSizeW};
60+
compare.cmpWithArg({Tensor(nullptr, dims),
61+
Tensor(nullptr, dims),
62+
Tensor(nullptr, dims),
63+
Tensor(nullptr, dims)},
64+
{Tensor(nullptr, dims)},
65+
{});
66+
}
67+
}
68+
}
69+
}
70+
}
71+
}

paddle/math/tests/test_matrixCompare.cpp

Lines changed: 0 additions & 144 deletions
Original file line numberDiff line numberDiff line change
@@ -1263,150 +1263,6 @@ TEST(Matrix, MaxOutFwdBwd) {
12631263
}
12641264
}
12651265

1266-
void testCrossMapNormalFwd(
1267-
int numSamples, int channels, int imgSizeH, int imgSizeW, int sizeX) {
1268-
float scale = 1.5;
1269-
float pow = 0.5;
1270-
int width = imgSizeH * imgSizeW * channels;
1271-
CpuMatrix inputs(numSamples, width);
1272-
CpuMatrix denoms(numSamples, width);
1273-
CpuMatrix outputs(numSamples, width);
1274-
GpuMatrix inputsGpu(numSamples, width);
1275-
GpuMatrix denomsGpu(numSamples, width);
1276-
GpuMatrix outputsGpu(numSamples, width);
1277-
1278-
inputs.randomizeUniform();
1279-
outputs.randomizeUniform();
1280-
inputsGpu.copyFrom(inputs);
1281-
outputsGpu.copyFrom(outputs);
1282-
1283-
FunctionBase* cpu =
1284-
FunctionBase::funcRegistrar_.createByType(FUNC_NAME(CrossMapNormal, CPU));
1285-
FunctionBase* gpu =
1286-
FunctionBase::funcRegistrar_.createByType(FUNC_NAME(CrossMapNormal, GPU));
1287-
cpu->init(FuncConfig()
1288-
.set("size", (size_t)sizeX)
1289-
.set("scale", scale)
1290-
.set("pow", pow));
1291-
gpu->init(FuncConfig()
1292-
.set("size", (size_t)sizeX)
1293-
.set("scale", scale)
1294-
.set("pow", pow));
1295-
1296-
Dims dims{
1297-
(size_t)numSamples, (size_t)channels, (size_t)imgSizeH, (size_t)imgSizeW};
1298-
cpu->calc({Tensor(inputs.getData(), dims)},
1299-
{Tensor(outputs.getData(), dims), Tensor(denoms.getData(), dims)},
1300-
{});
1301-
1302-
gpu->calc(
1303-
{Tensor(inputsGpu.getData(), dims)},
1304-
{Tensor(outputsGpu.getData(), dims), Tensor(denomsGpu.getData(), dims)},
1305-
{});
1306-
1307-
TensorCheckErr(outputs, outputsGpu);
1308-
TensorCheckErr(denoms, denomsGpu);
1309-
}
1310-
1311-
TEST(Matrix, crossMapNormalFwd) {
1312-
for (auto numSamples : {5, 32}) {
1313-
for (auto channels : {1, 5, 32}) {
1314-
for (auto imgSizeH : {5, 33, 100}) {
1315-
for (auto imgSizeW : {5, 32, 96}) {
1316-
for (auto sizeX : {1, 2, 3, 5, 7}) {
1317-
VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
1318-
<< " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW
1319-
<< " sizeX=" << sizeX;
1320-
testCrossMapNormalFwd(
1321-
numSamples, channels, imgSizeH, imgSizeW, sizeX);
1322-
}
1323-
}
1324-
}
1325-
}
1326-
}
1327-
}
1328-
1329-
void testCrossMapNormalBwd(
1330-
int numSamples, int channels, int imgSizeH, int imgSizeW, int sizeX) {
1331-
float scale = 1.5;
1332-
float pow = 0.5;
1333-
size_t width = imgSizeH * imgSizeW * channels;
1334-
1335-
CpuMatrix inputsGrad(numSamples, width);
1336-
CpuMatrix inputsValue(numSamples, width);
1337-
CpuMatrix outputsGrad(numSamples, width);
1338-
CpuMatrix outputsValue(numSamples, width);
1339-
CpuMatrix denoms(numSamples, width);
1340-
1341-
outputsGrad.randomizeUniform();
1342-
denoms.randomizeUniform();
1343-
inputsValue.randomizeUniform();
1344-
outputsValue.randomizeUniform();
1345-
inputsGrad.randomizeUniform();
1346-
denoms.add(0.01);
1347-
1348-
GpuMatrix inputsGradGpu(numSamples, width);
1349-
GpuMatrix inputsValueGpu(numSamples, width);
1350-
GpuMatrix outputsGradGpu(numSamples, width);
1351-
GpuMatrix outputsValueGpu(numSamples, width);
1352-
GpuMatrix denomsGpu(numSamples, width);
1353-
1354-
outputsGradGpu.copyFrom(outputsGrad);
1355-
denomsGpu.copyFrom(denoms);
1356-
inputsValueGpu.copyFrom(inputsValue);
1357-
outputsValueGpu.copyFrom(outputsValue);
1358-
inputsGradGpu.copyFrom(inputsGrad);
1359-
1360-
FunctionBase* cpu = FunctionBase::funcRegistrar_.createByType(
1361-
FUNC_NAME(CrossMapNormalGrad, CPU));
1362-
FunctionBase* gpu = FunctionBase::funcRegistrar_.createByType(
1363-
FUNC_NAME(CrossMapNormalGrad, GPU));
1364-
cpu->init(FuncConfig()
1365-
.set("size", (size_t)sizeX)
1366-
.set("scale", scale)
1367-
.set("pow", pow));
1368-
gpu->init(FuncConfig()
1369-
.set("size", (size_t)sizeX)
1370-
.set("scale", scale)
1371-
.set("pow", pow));
1372-
1373-
Dims dims{
1374-
(size_t)numSamples, (size_t)channels, (size_t)imgSizeH, (size_t)imgSizeW};
1375-
cpu->calc({Tensor(inputsValue.getData(), dims),
1376-
Tensor(outputsValue.getData(), dims),
1377-
Tensor(outputsGrad.getData(), dims),
1378-
Tensor(denoms.getData(), dims)},
1379-
{Tensor(inputsGrad.getData(), dims)},
1380-
{});
1381-
1382-
gpu->calc({Tensor(inputsValueGpu.getData(), dims),
1383-
Tensor(outputsValueGpu.getData(), dims),
1384-
Tensor(outputsGradGpu.getData(), dims),
1385-
Tensor(denomsGpu.getData(), dims)},
1386-
{Tensor(inputsGradGpu.getData(), dims)},
1387-
{});
1388-
1389-
TensorCheckErr(inputsGrad, inputsGradGpu);
1390-
}
1391-
1392-
TEST(Matrix, crossMapNormalBwd) {
1393-
for (auto numSamples : {5, 32}) {
1394-
for (auto channels : {1, 5, 32}) {
1395-
for (auto imgSizeH : {5, 33, 100}) {
1396-
for (auto imgSizeW : {5, 32, 96}) {
1397-
for (auto sizeX : {1, 2, 3, 5, 7}) {
1398-
VLOG(3) << " numSamples=" << numSamples << " channels=" << channels
1399-
<< " imgSizeH=" << imgSizeH << " imgSizeW=" << imgSizeW
1400-
<< " sizeX=" << sizeX;
1401-
testCrossMapNormalBwd(
1402-
numSamples, channels, imgSizeH, imgSizeW, sizeX);
1403-
}
1404-
}
1405-
}
1406-
}
1407-
}
1408-
}
1409-
14101266
int main(int argc, char** argv) {
14111267
testing::InitGoogleTest(&argc, argv);
14121268
initMain(argc, argv);

0 commit comments

Comments
 (0)