@@ -18,14 +18,14 @@ def crawl_tables(self, ctx: RuntimeContext):
18
18
`$inventory_database.tables`. Note that the `inventory_database` is set in the configuration file. The metadata
19
19
stored is then used in the subsequent tasks and workflows to, for example, find all Hive Metastore tables that
20
20
cannot easily be migrated to Unity Catalog."""
21
- ctx .tables_crawler .snapshot ()
21
+ ctx .tables_crawler .snapshot (force_refresh = True )
22
22
23
23
@job_task
24
24
def crawl_udfs (self , ctx : RuntimeContext ):
25
25
"""Iterates over all UDFs in the Hive Metastore of the current workspace and persists their metadata in the
26
26
table named `$inventory_database.udfs`. This inventory is currently used when scanning securable objects for
27
27
issues with grants that cannot be migrated to Unit Catalog."""
28
- ctx .udfs_crawler .snapshot ()
28
+ ctx .udfs_crawler .snapshot (force_refresh = True )
29
29
30
30
@job_task (job_cluster = "tacl" )
31
31
def setup_tacl (self , ctx : RuntimeContext ):
@@ -40,15 +40,15 @@ def crawl_grants(self, ctx: RuntimeContext):
40
40
41
41
Note: This job runs on a separate cluster (named `tacl`) as it requires the proper configuration to have the Table
42
42
ACLs enabled and available for retrieval."""
43
- ctx .grants_crawler .snapshot ()
43
+ ctx .grants_crawler .snapshot (force_refresh = True )
44
44
45
45
@job_task (depends_on = [crawl_tables ])
46
46
def estimate_table_size_for_migration (self , ctx : RuntimeContext ):
47
47
"""Scans the previously created Delta table named `$inventory_database.tables` and locate tables that cannot be
48
48
"synced". These tables will have to be cloned in the migration process.
49
49
Assesses the size of these tables and create `$inventory_database.table_size` table to list these sizes.
50
50
The table size is a factor in deciding whether to clone these tables."""
51
- ctx .table_size_crawler .snapshot ()
51
+ ctx .table_size_crawler .snapshot (force_refresh = True )
52
52
53
53
@job_task
54
54
def crawl_mounts (self , ctx : RuntimeContext ):
@@ -58,7 +58,7 @@ def crawl_mounts(self, ctx: RuntimeContext):
58
58
59
59
The assessment involves scanning the workspace to compile a list of all existing mount points and subsequently
60
60
storing this information in the `$inventory.mounts` table. This is crucial for planning the migration."""
61
- ctx .mounts_crawler .snapshot ()
61
+ ctx .mounts_crawler .snapshot (force_refresh = True )
62
62
63
63
@job_task (depends_on = [crawl_mounts , crawl_tables ])
64
64
def guess_external_locations (self , ctx : RuntimeContext ):
@@ -70,7 +70,7 @@ def guess_external_locations(self, ctx: RuntimeContext):
70
70
- Extracting all the locations associated with tables that do not use DBFS directly, but a mount point instead
71
71
- Scanning all these locations to identify folders that can act as shared path prefixes
72
72
- These identified external locations will be created subsequently prior to the actual table migration"""
73
- ctx .external_locations .snapshot ()
73
+ ctx .external_locations .snapshot (force_refresh = True )
74
74
75
75
@job_task
76
76
def assess_jobs (self , ctx : RuntimeContext ):
@@ -83,7 +83,7 @@ def assess_jobs(self, ctx: RuntimeContext):
83
83
- Clusters with incompatible Spark config tags
84
84
- Clusters referencing DBFS locations in one or more config options
85
85
"""
86
- ctx .jobs_crawler .snapshot ()
86
+ ctx .jobs_crawler .snapshot (force_refresh = True )
87
87
88
88
@job_task
89
89
def assess_clusters (self , ctx : RuntimeContext ):
@@ -96,7 +96,7 @@ def assess_clusters(self, ctx: RuntimeContext):
96
96
- Clusters with incompatible spark config tags
97
97
- Clusters referencing DBFS locations in one or more config options
98
98
"""
99
- ctx .clusters_crawler .snapshot ()
99
+ ctx .clusters_crawler .snapshot (force_refresh = True )
100
100
101
101
@job_task
102
102
def assess_pipelines (self , ctx : RuntimeContext ):
@@ -109,7 +109,7 @@ def assess_pipelines(self, ctx: RuntimeContext):
109
109
110
110
Subsequently, a list of all the pipelines with matching configurations are stored in the
111
111
`$inventory.pipelines` table."""
112
- ctx .pipelines_crawler .snapshot ()
112
+ ctx .pipelines_crawler .snapshot (force_refresh = True )
113
113
114
114
@job_task
115
115
def assess_incompatible_submit_runs (self , ctx : RuntimeContext ):
@@ -122,7 +122,7 @@ def assess_incompatible_submit_runs(self, ctx: RuntimeContext):
122
122
It also combines several submit runs under a single pseudo_id based on hash of the submit run configuration.
123
123
Subsequently, a list of all the incompatible runs with failures are stored in the
124
124
`$inventory.submit_runs` table."""
125
- ctx .submit_runs_crawler .snapshot ()
125
+ ctx .submit_runs_crawler .snapshot (force_refresh = True )
126
126
127
127
@job_task
128
128
def crawl_cluster_policies (self , ctx : RuntimeContext ):
@@ -133,7 +133,7 @@ def crawl_cluster_policies(self, ctx: RuntimeContext):
133
133
134
134
Subsequently, a list of all the policies with matching configurations are stored in the
135
135
`$inventory.policies` table."""
136
- ctx .policies_crawler .snapshot ()
136
+ ctx .policies_crawler .snapshot (force_refresh = True )
137
137
138
138
@job_task (cloud = "azure" )
139
139
def assess_azure_service_principals (self , ctx : RuntimeContext ):
@@ -147,7 +147,7 @@ def assess_azure_service_principals(self, ctx: RuntimeContext):
147
147
Subsequently, the list of all the Azure Service Principals referred in those configurations are saved
148
148
in the `$inventory.azure_service_principals` table."""
149
149
if ctx .is_azure :
150
- ctx .azure_service_principal_crawler .snapshot ()
150
+ ctx .azure_service_principal_crawler .snapshot (force_refresh = True )
151
151
152
152
@job_task
153
153
def assess_global_init_scripts (self , ctx : RuntimeContext ):
@@ -156,7 +156,7 @@ def assess_global_init_scripts(self, ctx: RuntimeContext):
156
156
157
157
It looks in:
158
158
- the list of all the global init scripts are saved in the `$inventory.global_init_scripts` table."""
159
- ctx .global_init_scripts_crawler .snapshot ()
159
+ ctx .global_init_scripts_crawler .snapshot (force_refresh = True )
160
160
161
161
@job_task
162
162
def workspace_listing (self , ctx : RuntimeContext ):
@@ -168,7 +168,7 @@ def workspace_listing(self, ctx: RuntimeContext):
168
168
if not ctx .config .use_legacy_permission_migration :
169
169
logger .info ("Skipping workspace listing as legacy permission migration is disabled." )
170
170
return
171
- ctx .workspace_listing .snapshot ()
171
+ ctx .workspace_listing .snapshot (force_refresh = True )
172
172
173
173
@job_task (depends_on = [crawl_grants , workspace_listing ])
174
174
def crawl_permissions (self , ctx : RuntimeContext ):
@@ -182,22 +182,22 @@ def crawl_permissions(self, ctx: RuntimeContext):
182
182
return
183
183
permission_manager = ctx .permission_manager
184
184
permission_manager .reset ()
185
- permission_manager .snapshot ()
185
+ permission_manager .snapshot (force_refresh = True )
186
186
187
187
@job_task
188
188
def crawl_groups (self , ctx : RuntimeContext ):
189
189
"""Scans all groups for the local group migration scope"""
190
- ctx .group_manager .snapshot ()
190
+ ctx .group_manager .snapshot (force_refresh = True )
191
191
192
192
@job_task
193
193
def crawl_redash_dashboards (self , ctx : RuntimeContext ):
194
194
"""Scans all Redash dashboards."""
195
- ctx .redash_crawler .snapshot ()
195
+ ctx .redash_crawler .snapshot (force_refresh = True )
196
196
197
197
@job_task
198
198
def crawl_lakeview_dashboards (self , ctx : RuntimeContext ):
199
199
"""Scans all Lakeview dashboards."""
200
- ctx .lakeview_crawler .snapshot ()
200
+ ctx .lakeview_crawler .snapshot (force_refresh = True )
201
201
202
202
@job_task (depends_on = [crawl_redash_dashboards , crawl_lakeview_dashboards ])
203
203
def assess_dashboards (self , ctx : RuntimeContext ):
0 commit comments