| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| from typing import Dict, Optional |
|
|
|
|
| def run_natality_tutorial(override_values: Optional[Dict[str, str]] = None) -> None: |
| if override_values is None: |
| override_values = {} |
|
|
| |
| """Create a Google BigQuery linear regression input table. |
| |
| In the code below, the following actions are taken: |
| * A new dataset is created "natality_regression." |
| * A query is run against the public dataset, |
| bigquery-public-data.samples.natality, selecting only the data of |
| interest to the regression, the output of which is stored in a new |
| "regression_input" table. |
| * The output table is moved over the wire to the user's default project via |
| the built-in BigQuery Connector for Spark that bridges BigQuery and |
| Cloud Dataproc. |
| """ |
|
|
| from google.cloud import bigquery |
|
|
| |
| |
| client = bigquery.Client() |
|
|
| |
| dataset_id = "natality_regression" |
| dataset_id_full = f"{client.project}.{dataset_id}" |
| |
| |
| |
| dataset_id = override_values.get("dataset_id", dataset_id) |
| dataset_id_full = f"{client.project}.{dataset_id}" |
| |
|
|
| dataset = bigquery.Dataset(dataset_id_full) |
|
|
| |
| dataset = client.create_dataset(dataset) |
|
|
| |
| job_config = bigquery.QueryJobConfig() |
|
|
| |
| |
| |
| job_config.destination = f"{dataset_id_full}.regression_input" |
|
|
| |
| |
| |
| query = """ |
| SELECT |
| weight_pounds, mother_age, father_age, gestation_weeks, |
| weight_gain_pounds, apgar_5min |
| FROM |
| `bigquery-public-data.samples.natality` |
| WHERE |
| weight_pounds IS NOT NULL |
| AND mother_age IS NOT NULL |
| AND father_age IS NOT NULL |
| AND gestation_weeks IS NOT NULL |
| AND weight_gain_pounds IS NOT NULL |
| AND apgar_5min IS NOT NULL |
| """ |
|
|
| |
| client.query_and_wait(query, job_config=job_config) |
| |
|
|
|
|
| if __name__ == "__main__": |
| run_natality_tutorial() |
|
|