|
| 1 | +/* |
| 2 | + * Copyright (c) Meta Platforms, Inc. and affiliates. |
| 3 | + * All rights reserved. |
| 4 | + * |
| 5 | + * This source code is licensed under the BSD-style license found in the |
| 6 | + * LICENSE file in the root directory of this source tree. |
| 7 | + */ |
| 8 | + |
| 9 | +#include <torchrec/csrc/dynamic_embedding/details/io.h> |
| 10 | +#include <torchrec/csrc/dynamic_embedding/ps.h> |
| 11 | + |
| 12 | +namespace torchrec { |
| 13 | + |
| 14 | +c10::intrusive_ptr<FetchHandle> PS::fetch( |
| 15 | + torch::Tensor ids_to_fetch, |
| 16 | + int64_t time, |
| 17 | + bool reinit, |
| 18 | + double weight_init_min, |
| 19 | + double weight_init_max) { |
| 20 | + std::lock_guard<std::mutex> lock(mu_); |
| 21 | + torch::NoGradGuard no_grad; |
| 22 | + |
| 23 | + auto [local_global_ids, local_cache_ids] = filter_local_ids(ids_to_fetch); |
| 24 | + if (local_global_ids.empty()) { |
| 25 | + return c10::make_intrusive<FetchHandle>(time, c10::intrusive_ptr<PS>()); |
| 26 | + } |
| 27 | + |
| 28 | + fetch_notifications_.emplace_back(time, c10::make_intrusive<Notification>()); |
| 29 | + c10::intrusive_ptr<Notification> notification = |
| 30 | + fetch_notifications_.back().second; |
| 31 | + // Does not support multiple col ids at the moment. |
| 32 | + std::vector<int64_t> col_ids{0}; |
| 33 | + uint32_t num_os_ids = os_ids_.size(); |
| 34 | + io_.fetch( |
| 35 | + table_name_, |
| 36 | + std::move(local_global_ids), |
| 37 | + col_ids, |
| 38 | + num_os_ids, |
| 39 | + torch::kF32, |
| 40 | + [=, this, cache_ids_to_fetch = std::move(local_cache_ids)](auto&& val) { |
| 41 | + TORCH_CHECK(val.size() == cache_ids_to_fetch.size()); |
| 42 | + for (uint32_t i = 0; i < cache_ids_to_fetch.size(); ++i) { |
| 43 | + int64_t cache_id = cache_ids_to_fetch[i]; |
| 44 | + auto& fetched = val[i]; |
| 45 | + if (!fetched.defined()) { |
| 46 | + if (reinit) { |
| 47 | + std::vector<torch::Tensor> tensors = get_tensor_views(cache_id); |
| 48 | + tensors[0].uniform_(weight_init_min, weight_init_max); |
| 49 | + // optimizer states will be set to zero |
| 50 | + for (uint32_t j = 1; j < num_os_ids; ++j) { |
| 51 | + tensors[j].zero_(); |
| 52 | + } |
| 53 | + } |
| 54 | + continue; |
| 55 | + } |
| 56 | + |
| 57 | + std::vector<torch::Tensor> tensors = get_tensor_views(cache_id); |
| 58 | + for (uint32_t j = 0; j < num_os_ids; ++j) { |
| 59 | + tensors[j].copy_(fetched.slice(0, j, j + 1)); |
| 60 | + } |
| 61 | + } |
| 62 | + notification->done(); |
| 63 | + }); |
| 64 | + // `unsafe_reclain_from_nonowning` is the `instrusive_ptr` version of |
| 65 | + // `enable_shared_from_this` |
| 66 | + return c10::make_intrusive<FetchHandle>( |
| 67 | + time, c10::intrusive_ptr<PS>::unsafe_reclaim_from_nonowning(this)); |
| 68 | +} |
| 69 | + |
| 70 | +void PS::evict(torch::Tensor ids_to_evict) { |
| 71 | + std::lock_guard<std::mutex> lock(mu_); |
| 72 | + torch::NoGradGuard no_grad; |
| 73 | + // make sure all previous fetches are done. |
| 74 | + synchronize_fetch(); |
| 75 | + |
| 76 | + auto [local_global_ids, local_cache_ids] = filter_local_ids(ids_to_evict); |
| 77 | + if (local_global_ids.empty()) { |
| 78 | + return; |
| 79 | + } |
| 80 | + |
| 81 | + // Does not support multiple col ids at the moment. |
| 82 | + std::vector<int64_t> col_ids{0}; |
| 83 | + uint32_t num_os_ids = os_ids_.size(); |
| 84 | + uint32_t num_ids_to_fetch = local_global_ids.size(); |
| 85 | + |
| 86 | + Notification notification; |
| 87 | + // Done first so that the Wait after preparing the first chunk won't stuck. |
| 88 | + notification.done(); |
| 89 | + // The shared data for all chunks. |
| 90 | + std::vector<uint64_t> offsets; |
| 91 | + offsets.resize(num_ids_per_chunk_ * num_os_ids * col_ids.size() + 1); |
| 92 | + // Evict by chunks |
| 93 | + for (uint32_t i = 0; i < num_ids_to_fetch; i += num_ids_per_chunk_) { |
| 94 | + uint32_t num_ids_in_chunk = std::min( |
| 95 | + static_cast<uint32_t>(num_ids_per_chunk_), num_ids_to_fetch - i); |
| 96 | + uint32_t data_size = num_ids_in_chunk * num_os_ids * col_ids.size(); |
| 97 | + uint32_t offsets_size = num_ids_in_chunk * num_os_ids * col_ids.size() + 1; |
| 98 | + |
| 99 | + std::vector<torch::Tensor> all_tensors; |
| 100 | + for (uint32_t j = i; j < i + num_ids_in_chunk; ++j) { |
| 101 | + int64_t cache_id = local_cache_ids[j]; |
| 102 | + std::vector<torch::Tensor> tensors = get_tensor_views(cache_id); |
| 103 | + all_tensors.insert(all_tensors.end(), tensors.begin(), tensors.end()); |
| 104 | + } |
| 105 | + torch::Tensor data = torch::cat(all_tensors, 0).cpu(); |
| 106 | + TORCH_CHECK(data.numel() == data_size * col_size_); |
| 107 | + |
| 108 | + offsets[0] = 0; |
| 109 | + for (uint32_t j = 0; j < all_tensors.size(); ++j) { |
| 110 | + offsets[j + 1] = |
| 111 | + offsets[j] + all_tensors[j].numel() * all_tensors[j].element_size(); |
| 112 | + } |
| 113 | + // waiting for the Push of last chunk finishes. |
| 114 | + notification.wait(); |
| 115 | + notification.clear(); |
| 116 | + io_.push( |
| 117 | + table_name_, |
| 118 | + std::span{local_global_ids.data() + i, num_ids_in_chunk}, |
| 119 | + col_ids, |
| 120 | + os_ids_, |
| 121 | + std::span{ |
| 122 | + reinterpret_cast<uint8_t*>(data.data_ptr<float>()), |
| 123 | + data_size * sizeof(float)}, |
| 124 | + std::span{offsets.data(), offsets_size}, |
| 125 | + [¬ification] { notification.done(); }); |
| 126 | + } |
| 127 | + notification.wait(); |
| 128 | +} |
| 129 | + |
| 130 | +void PS::synchronize_fetch(int64_t time) { |
| 131 | + while (!fetch_notifications_.empty()) { |
| 132 | + auto& [t, notification] = fetch_notifications_.front(); |
| 133 | + if (t != time && time >= 0) { |
| 134 | + break; |
| 135 | + } |
| 136 | + notification->wait(); |
| 137 | + fetch_notifications_.pop_front(); |
| 138 | + } |
| 139 | +} |
| 140 | + |
| 141 | +std::vector<torch::Tensor> PS::get_tensor_views(int64_t cache_id) { |
| 142 | + for (auto& shard : *shards_) { |
| 143 | + if (shard.has(cache_id)) { |
| 144 | + return shard.get_tensor_view(cache_id); |
| 145 | + } |
| 146 | + } |
| 147 | + TORCH_CHECK(false, "all local shards do not contain cache id ", cache_id); |
| 148 | +} |
| 149 | + |
| 150 | +std::tuple<std::vector<int64_t>, std::vector<int64_t>> PS::filter_local_ids( |
| 151 | + const torch::Tensor& ids) { |
| 152 | + std::vector<int64_t> local_global_ids; |
| 153 | + std::vector<int64_t> local_cache_ids; |
| 154 | + TORCH_CHECK(ids.is_contiguous()); |
| 155 | + TORCH_CHECK(ids.dim() == 2); |
| 156 | + auto* ids_ptr = ids.data_ptr<int64_t>(); |
| 157 | + int64_t numel = ids.numel(); |
| 158 | + for (int64_t i = 0; i < numel; i += 2) { |
| 159 | + auto cache_id = ids_ptr[i + 1]; |
| 160 | + if (std::any_of(shards_->begin(), shards_->end(), [&](auto&& shard) { |
| 161 | + return shard.has(cache_id); |
| 162 | + })) { |
| 163 | + auto global_id = ids_ptr[i]; |
| 164 | + local_global_ids.emplace_back(global_id); |
| 165 | + local_cache_ids.emplace_back(cache_id); |
| 166 | + } |
| 167 | + } |
| 168 | + return {std::move(local_global_ids), std::move(local_cache_ids)}; |
| 169 | +} |
| 170 | + |
| 171 | +} // namespace torchrec |
0 commit comments