Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 9 additions & 0 deletions cpp/oneapi/dal/algo/linear_regression/test/spmd.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,4 +27,13 @@ TEMPLATE_LIST_TEST_M(lr_spmd_test, "LR common flow", "[lr][spmd]", lr_types) {
this->run_and_check_linear();
}

TEMPLATE_LIST_TEST_M(lr_spmd_test, "RR common flow", "[rr][spmd]", lr_types) {
SKIP_IF(this->not_float64_friendly());
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why would it need to skip this one if it's on CPU?

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This test is common for CPU and GPU and this check is necessary just for GPU without native float64 support

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@DDJHB @Alexandr-Solovev it would be better to add if_gpu condition then.

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
SKIP_IF(this->not_float64_friendly());
#ifdef ONEDAL_DATA_PARALLEL
SKIP_IF(this->not_float64_friendly());
#endif


this->generate(777);
this->set_rank_count(GENERATE(2, 3));

this->run_and_check_ridge();
}

} // namespace oneapi::dal::linear_regression::test
82 changes: 82 additions & 0 deletions samples/oneapi/cpp/ccl/sources/ridge_regression_distr_ccl.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
/*******************************************************************************
* Copyright contributors to the oneDAL project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/

#include <iomanip>
#include <iostream>

#include "oneapi/dal/algo/linear_regression.hpp"
#include "oneapi/dal/io/csv.hpp"
#include "oneapi/dal/spmd/ccl/communicator.hpp"

#include "utils.hpp"

namespace dal = oneapi::dal;
namespace lr = dal::linear_regression;

void run() {
const auto train_data_file_name = get_data_path("data/linear_regression_train_data.csv");
const auto train_response_file_name =
get_data_path("data/linear_regression_train_responses.csv");
const auto test_data_file_name = get_data_path("data/linear_regression_test_data.csv");
const auto test_response_file_name = get_data_path("data/linear_regression_test_responses.csv");

const auto x_train = dal::read<dal::table>(dal::csv::data_source{ train_data_file_name });
const auto y_train = dal::read<dal::table>(dal::csv::data_source{ train_response_file_name });
const auto x_test = dal::read<dal::table>(dal::csv::data_source{ test_data_file_name });
const auto y_test = dal::read<dal::table>(dal::csv::data_source{ test_response_file_name });

auto comm = dal::preview::spmd::make_communicator<dal::preview::spmd::backend::ccl>();
auto rank_id = comm.get_rank();
auto rank_count = comm.get_rank_count();

auto x_train_vec = split_table_by_rows<float>(x_train, rank_count);
auto y_train_vec = split_table_by_rows<float>(y_train, rank_count);
auto x_test_vec = split_table_by_rows<float>(x_test, rank_count);
auto y_test_vec = split_table_by_rows<float>(y_test, rank_count);

const double alpha = 1.0;
const auto rr_desc = lr::descriptor<float>{ true, alpha };

const auto result_train =
dal::preview::train(comm, rr_desc, x_train_vec.at(rank_id), y_train_vec.at(rank_id));

const auto result_infer =
dal::preview::infer(comm, rr_desc, x_test_vec.at(rank_id), result_train.get_model());

if (comm.get_rank() == 0) {
std::cout << "Ridge regression alpha: " << alpha << std::endl;

std::cout << "Prediction results:\n" << result_infer.get_responses() << std::endl;

std::cout << "Ground truth:\n" << y_test_vec.at(rank_id) << std::endl;
}
}

int main(int argc, char const *argv[]) {
ccl::init();
int status = MPI_Init(nullptr, nullptr);
if (status != MPI_SUCCESS) {
throw std::runtime_error{ "Problem occurred during MPI init" };
}

run();

status = MPI_Finalize();
if (status != MPI_SUCCESS) {
throw std::runtime_error{ "Problem occurred during MPI finalize" };
}
return 0;
}
81 changes: 81 additions & 0 deletions samples/oneapi/cpp/mpi/sources/ridge_regression_distr_mpi.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
/*******************************************************************************
* Copyright contributors to the oneDAL project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/

#include <iomanip>
#include <iostream>

#include "oneapi/dal/algo/linear_regression.hpp"
#include "oneapi/dal/io/csv.hpp"
#include "oneapi/dal/spmd/mpi/communicator.hpp"

#include "utils.hpp"

namespace dal = oneapi::dal;
namespace lr = dal::linear_regression;

void run() {
const auto train_data_file_name = get_data_path("data/linear_regression_train_data.csv");
const auto train_response_file_name =
get_data_path("data/linear_regression_train_responses.csv");
const auto test_data_file_name = get_data_path("data/linear_regression_test_data.csv");
const auto test_response_file_name = get_data_path("data/linear_regression_test_responses.csv");

const auto x_train = dal::read<dal::table>(dal::csv::data_source{ train_data_file_name });
const auto y_train = dal::read<dal::table>(dal::csv::data_source{ train_response_file_name });
const auto x_test = dal::read<dal::table>(dal::csv::data_source{ test_data_file_name });
const auto y_test = dal::read<dal::table>(dal::csv::data_source{ test_response_file_name });

auto comm = dal::preview::spmd::make_communicator<dal::preview::spmd::backend::mpi>();
auto rank_id = comm.get_rank();
auto rank_count = comm.get_rank_count();

auto x_train_vec = split_table_by_rows<float>(x_train, rank_count);
auto y_train_vec = split_table_by_rows<float>(y_train, rank_count);
auto x_test_vec = split_table_by_rows<float>(x_test, rank_count);
auto y_test_vec = split_table_by_rows<float>(y_test, rank_count);

const double alpha = 1.0;
const auto rr_desc = lr::descriptor<float>{ true, alpha };

const auto result_train =
dal::preview::train(comm, rr_desc, x_train_vec.at(rank_id), y_train_vec.at(rank_id));

const auto result_infer =
dal::preview::infer(comm, rr_desc, x_test_vec.at(rank_id), result_train.get_model());

if (comm.get_rank() == 0) {
std::cout << "Ridge regression alpha: " << alpha << std::endl;

std::cout << "Prediction results:\n" << result_infer.get_responses() << std::endl;

std::cout << "Ground truth:\n" << y_test_vec.at(rank_id) << std::endl;
}
}

int main(int argc, char const *argv[]) {
int status = MPI_Init(nullptr, nullptr);
if (status != MPI_SUCCESS) {
throw std::runtime_error{ "Problem occurred during MPI init" };
}

run();

status = MPI_Finalize();
if (status != MPI_SUCCESS) {
throw std::runtime_error{ "Problem occurred during MPI finalize" };
}
return 0;
}
Loading