@@ -114,6 +114,7 @@ def load_parquet(
114
114
destination_table_ref : bigquery .TableReference ,
115
115
location : Optional [str ],
116
116
schema : Optional [Dict [str , Any ]],
117
+ billing_project : Optional [str ] = None ,
117
118
):
118
119
job_config = bigquery .LoadJobConfig ()
119
120
job_config .write_disposition = "WRITE_APPEND"
@@ -126,7 +127,11 @@ def load_parquet(
126
127
127
128
try :
128
129
client .load_table_from_dataframe (
129
- dataframe , destination_table_ref , job_config = job_config , location = location ,
130
+ dataframe ,
131
+ destination_table_ref ,
132
+ job_config = job_config ,
133
+ location = location ,
134
+ project = billing_project ,
130
135
).result ()
131
136
except pyarrow .lib .ArrowInvalid as exc :
132
137
raise exceptions .ConversionError (
@@ -162,6 +167,7 @@ def load_csv_from_dataframe(
162
167
location : Optional [str ],
163
168
chunksize : Optional [int ],
164
169
schema : Optional [Dict [str , Any ]],
170
+ billing_project : Optional [str ] = None ,
165
171
):
166
172
bq_schema = None
167
173
@@ -171,7 +177,11 @@ def load_csv_from_dataframe(
171
177
172
178
def load_chunk (chunk , job_config ):
173
179
client .load_table_from_dataframe (
174
- chunk , destination_table_ref , job_config = job_config , location = location ,
180
+ chunk ,
181
+ destination_table_ref ,
182
+ job_config = job_config ,
183
+ location = location ,
184
+ project = billing_project ,
175
185
).result ()
176
186
177
187
return load_csv (dataframe , chunksize , bq_schema , load_chunk )
@@ -184,6 +194,7 @@ def load_csv_from_file(
184
194
location : Optional [str ],
185
195
chunksize : Optional [int ],
186
196
schema : Optional [Dict [str , Any ]],
197
+ billing_project : Optional [str ] = None ,
187
198
):
188
199
"""Manually encode a DataFrame to CSV and use the buffer in a load job.
189
200
@@ -204,6 +215,7 @@ def load_chunk(chunk, job_config):
204
215
destination_table_ref ,
205
216
job_config = job_config ,
206
217
location = location ,
218
+ project = billing_project ,
207
219
).result ()
208
220
finally :
209
221
chunk_buffer .close ()
@@ -219,19 +231,39 @@ def load_chunks(
219
231
schema = None ,
220
232
location = None ,
221
233
api_method = "load_parquet" ,
234
+ billing_project : Optional [str ] = None ,
222
235
):
223
236
if api_method == "load_parquet" :
224
- load_parquet (client , dataframe , destination_table_ref , location , schema )
237
+ load_parquet (
238
+ client ,
239
+ dataframe ,
240
+ destination_table_ref ,
241
+ location ,
242
+ schema ,
243
+ billing_project = billing_project ,
244
+ )
225
245
# TODO: yield progress depending on result() with timeout
226
246
return [0 ]
227
247
elif api_method == "load_csv" :
228
248
if FEATURES .bigquery_has_from_dataframe_with_csv :
229
249
return load_csv_from_dataframe (
230
- client , dataframe , destination_table_ref , location , chunksize , schema
250
+ client ,
251
+ dataframe ,
252
+ destination_table_ref ,
253
+ location ,
254
+ chunksize ,
255
+ schema ,
256
+ billing_project = billing_project ,
231
257
)
232
258
else :
233
259
return load_csv_from_file (
234
- client , dataframe , destination_table_ref , location , chunksize , schema
260
+ client ,
261
+ dataframe ,
262
+ destination_table_ref ,
263
+ location ,
264
+ chunksize ,
265
+ schema ,
266
+ billing_project = billing_project ,
235
267
)
236
268
else :
237
269
raise ValueError (
0 commit comments