101101T = TypeVar ("T" , bound = "DatasetQuery" )
102102
103103
104- def udf_input_table_name (run_group_id : str , _hash : str ) -> str :
105- return f"udf_{ run_group_id } _{ _hash } _input"
106-
107-
108- def udf_output_table_name (job_id : str , _hash : str ) -> str :
109- return f"udf_{ job_id } _{ _hash } _output"
110-
111-
112- def udf_partial_output_table_name (job_id : str , _hash : str ) -> str :
113- return f"udf_{ job_id } _{ _hash } _output_partial"
114-
115-
116104def detach (
117105 method : "Callable[Concatenate[T, P], T]" ,
118106) -> "Callable[Concatenate[T, P], T]" :
@@ -970,7 +958,7 @@ def get_or_create_input_table(self, query: Select, _hash: str) -> "Table":
970958 Returns the input table.
971959 """
972960 group_id = self .job .run_group_id or self .job .id
973- input_table_name = udf_input_table_name (group_id , _hash )
961+ input_table_name = Checkpoint . input_table_name (group_id , _hash )
974962
975963 # Check if input table already exists (created by ancestor job)
976964 if self .warehouse .db .has_table (input_table_name ):
@@ -1063,10 +1051,10 @@ def _skip_udf(
10631051 checkpoint .job_id ,
10641052 )
10651053 existing_output_table = self .warehouse .get_table (
1066- udf_output_table_name (checkpoint .job_id , checkpoint .hash )
1054+ Checkpoint . output_table_name (checkpoint .job_id , checkpoint .hash )
10671055 )
10681056 output_table = self .warehouse .create_table_from_query (
1069- udf_output_table_name (self .job .id , checkpoint .hash ),
1057+ Checkpoint . output_table_name (self .job .id , checkpoint .hash ),
10701058 sa .select (existing_output_table ),
10711059 create_fn = self .create_output_table ,
10721060 )
@@ -1140,7 +1128,7 @@ def _run_from_scratch(
11401128 input_table = self .get_or_create_input_table (query , hash_input )
11411129
11421130 partial_output_table = self .create_output_table (
1143- udf_partial_output_table_name (self .job .id , partial_hash ),
1131+ Checkpoint . partial_output_table_name (self .job .id , partial_hash ),
11441132 )
11451133
11461134 if self .partition_by is not None :
@@ -1151,7 +1139,7 @@ def _run_from_scratch(
11511139 self .populate_udf_output_table (partial_output_table , input_query )
11521140
11531141 output_table = self .warehouse .rename_table (
1154- partial_output_table , udf_output_table_name (self .job .id , hash_output )
1142+ partial_output_table , Checkpoint . output_table_name (self .job .id , hash_output )
11551143 )
11561144
11571145 if partial_checkpoint :
@@ -1218,7 +1206,7 @@ def _continue_udf(
12181206
12191207 try :
12201208 parent_partial_table = self .warehouse .get_table (
1221- udf_partial_output_table_name (
1209+ Checkpoint . partial_output_table_name (
12221210 self .job .rerun_from_job_id , checkpoint .hash
12231211 )
12241212 )
@@ -1246,7 +1234,9 @@ def _continue_udf(
12461234 len (incomplete_input_ids ),
12471235 )
12481236
1249- partial_table_name = udf_partial_output_table_name (self .job .id , checkpoint .hash )
1237+ partial_table_name = Checkpoint .partial_output_table_name (
1238+ self .job .id , checkpoint .hash
1239+ )
12501240 if incomplete_input_ids :
12511241 # Filter out incomplete inputs - they will be re-processed
12521242 filtered_query = sa .select (parent_partial_table ).where (
@@ -1288,7 +1278,7 @@ def _continue_udf(
12881278 )
12891279
12901280 output_table = self .warehouse .rename_table (
1291- partial_table , udf_output_table_name (self .job .id , hash_output )
1281+ partial_table , Checkpoint . output_table_name (self .job .id , hash_output )
12921282 )
12931283
12941284 self .metastore .remove_checkpoints ([partial_checkpoint .id ])
0 commit comments