@@ -36,7 +36,7 @@ int BlkLocRegistry::tile_pin_index(const ClusterPinId pin) const {
36
36
}
37
37
38
38
int BlkLocRegistry::net_pin_to_tile_pin_index (const ClusterNetId net_id, int net_pin_index) const {
39
- auto & cluster_ctx = g_vpr_ctx.clustering ();
39
+ const auto & cluster_ctx = g_vpr_ctx.clustering ();
40
40
41
41
// Get the logical pin index of pin within its logical block type
42
42
ClusterPinId pin_id = cluster_ctx.clb_nlist .net_pin (net_id, net_pin_index);
@@ -45,22 +45,22 @@ int BlkLocRegistry::net_pin_to_tile_pin_index(const ClusterNetId net_id, int net
45
45
}
46
46
47
47
void BlkLocRegistry::set_block_location (ClusterBlockId blk_id, const t_pl_loc& location) {
48
- auto & device_ctx = g_vpr_ctx.device ();
49
- auto & cluster_ctx = g_vpr_ctx.clustering ();
48
+ const auto & device_ctx = g_vpr_ctx.device ();
49
+ const auto & cluster_ctx = g_vpr_ctx.clustering ();
50
50
51
51
const std::string& block_name = cluster_ctx.clb_nlist .block_name (blk_id);
52
52
53
- // Check if block location is out of range of grid dimensions
53
+ // Check if block location is out of range of grid dimensions
54
54
if (location.x < 0 || location.x > int (device_ctx.grid .width () - 1 )
55
55
|| location.y < 0 || location.y > int (device_ctx.grid .height () - 1 )) {
56
56
VPR_THROW (VPR_ERROR_PLACE, " Block %s with ID %d is out of range at location (%d, %d). \n " ,
57
57
block_name.c_str (), blk_id, location.x , location.y );
58
58
}
59
59
60
- // Set the location of the block
60
+ // Set the location of the block
61
61
block_locs_[blk_id].loc = location;
62
62
63
- // Check if block is at an illegal location
63
+ // Check if block is at an illegal location
64
64
auto physical_tile = device_ctx.grid .get_physical_type ({location.x , location.y , location.layer });
65
65
auto logical_block = cluster_ctx.clb_nlist .block_type (blk_id);
66
66
@@ -77,13 +77,71 @@ void BlkLocRegistry::set_block_location(ClusterBlockId blk_id, const t_pl_loc& l
77
77
location.layer );
78
78
}
79
79
80
- // Mark the grid location and usage of the block
80
+ // Mark the grid location and usage of the block
81
81
grid_blocks_.set_block_at_location (location, blk_id);
82
82
grid_blocks_.increment_usage ({location.x , location.y , location.layer });
83
83
84
84
place_sync_external_block_connections (blk_id);
85
85
}
86
86
87
+ void BlkLocRegistry::clear_all_grid_locs () {
88
+ const auto & device_ctx = g_vpr_ctx.device ();
89
+
90
+ std::unordered_set<int > blk_types_to_be_cleared;
91
+ const auto & logical_block_types = device_ctx.logical_block_types ;
92
+
93
+ // Insert all the logical block types into the set except the empty type
94
+ // clear_block_type_grid_locs does not expect empty type to be among given types
95
+ for (const t_logical_block_type& logical_type : logical_block_types) {
96
+ if (!is_empty_type (&logical_type)) {
97
+ blk_types_to_be_cleared.insert (logical_type.index );
98
+ }
99
+ }
100
+
101
+ clear_block_type_grid_locs (blk_types_to_be_cleared);
102
+ }
103
+
104
+ void BlkLocRegistry::clear_block_type_grid_locs (const std::unordered_set<int >& unplaced_blk_types_index) {
105
+ const auto & device_ctx = g_vpr_ctx.device ();
106
+ const auto & cluster_ctx = g_vpr_ctx.clustering ();
107
+
108
+ bool clear_all_block_types = false ;
109
+
110
+ /* check if all types should be cleared
111
+ * logical_block_types contain empty type, needs to be ignored.
112
+ * Not having any type in unplaced_blk_types_index means that it is the first iteration, hence all grids needs to be cleared
113
+ */
114
+ if (unplaced_blk_types_index.size () == device_ctx.logical_block_types .size () - 1 ) {
115
+ clear_all_block_types = true ;
116
+ }
117
+
118
+ /* We'll use the grid to record where everything goes. Initialize to the grid has no
119
+ * blocks placed anywhere.
120
+ */
121
+ for (int layer_num = 0 ; layer_num < device_ctx.grid .get_num_layers (); layer_num++) {
122
+ for (int i = 0 ; i < (int )device_ctx.grid .width (); i++) {
123
+ for (int j = 0 ; j < (int )device_ctx.grid .height (); j++) {
124
+ const t_physical_tile_type_ptr type = device_ctx.grid .get_physical_type ({i, j, layer_num});
125
+ int itype = type->index ;
126
+ if (clear_all_block_types || unplaced_blk_types_index.count (itype)) {
127
+ grid_blocks_.set_usage ({i, j, layer_num}, 0 );
128
+ for (int k = 0 ; k < device_ctx.physical_tile_types [itype].capacity ; k++) {
129
+ grid_blocks_.set_block_at_location ({i, j, k, layer_num}, ClusterBlockId::INVALID ());
130
+ }
131
+ }
132
+ }
133
+ }
134
+ }
135
+
136
+ // Similarly, mark all blocks as not being placed yet.
137
+ for (ClusterBlockId blk_id : cluster_ctx.clb_nlist .blocks ()) {
138
+ int blk_type = cluster_ctx.clb_nlist .block_type (blk_id)->index ;
139
+ if (clear_all_block_types || unplaced_blk_types_index.count (blk_type)) {
140
+ block_locs_[blk_id].loc = t_pl_loc ();
141
+ }
142
+ }
143
+ }
144
+
87
145
void BlkLocRegistry::place_sync_external_block_connections (ClusterBlockId iblk) {
88
146
const auto & cluster_ctx = g_vpr_ctx.clustering ();
89
147
const auto & clb_nlist = cluster_ctx.clb_nlist ;
@@ -119,7 +177,7 @@ void BlkLocRegistry::place_sync_external_block_connections(ClusterBlockId iblk)
119
177
}
120
178
121
179
void BlkLocRegistry::apply_move_blocks (const t_pl_blocks_to_be_moved& blocks_affected) {
122
- auto & device_ctx = g_vpr_ctx.device ();
180
+ const auto & device_ctx = g_vpr_ctx.device ();
123
181
124
182
VTR_ASSERT_DEBUG (expected_transaction_ == e_expected_transaction::APPLY);
125
183
@@ -177,7 +235,7 @@ void BlkLocRegistry::commit_move_blocks(const t_pl_blocks_to_be_moved& blocks_af
177
235
}
178
236
179
237
void BlkLocRegistry::revert_move_blocks (const t_pl_blocks_to_be_moved& blocks_affected) {
180
- auto & device_ctx = g_vpr_ctx.device ();
238
+ const auto & device_ctx = g_vpr_ctx.device ();
181
239
182
240
VTR_ASSERT_DEBUG (expected_transaction_ == e_expected_transaction::COMMIT_REVERT);
183
241
0 commit comments