From e3f9c8b83bdcd17d781b0e061d58b64528575390 Mon Sep 17 00:00:00 2001 From: Wilfred Tyler Gee Date: Mon, 20 Jun 2022 19:25:08 +0000 Subject: [PATCH 1/6] Compatible with 3.7 Make compatible with python 3.7 so can run some google services. Remove arcsec separation. --- setup.cfg | 4 +- src/panoptes/pipeline/observation.py | 13 +- src/panoptes/pipeline/scripts/image.py | 14 ++- src/panoptes/pipeline/utils/metadata.py | 156 ++++++++++++++---------- src/panoptes/pipeline/utils/sources.py | 20 +-- 5 files changed, 110 insertions(+), 97 deletions(-) diff --git a/setup.cfg b/setup.cfg index 44c6359..baa1df2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -23,7 +23,7 @@ classifiers = License :: OSI Approved :: MIT License Operating System :: POSIX Programming Language :: Python :: 3 - Programming Language :: Python :: 3.8 + Programming Language :: Python :: 3.7 Programming Language :: Python :: 3 :: Only Topic :: Scientific/Engineering :: Astronomy Topic :: Scientific/Engineering :: Physics @@ -67,7 +67,7 @@ install_requires = # The usage of test_requires is discouraged, see `Dependency Management` docs # tests_require = pytest; pytest-cov # Require a specific Python version, e.g. Python 2.7 or >= 3.4 -python_requires = >=3.8 +python_requires = >=3.7 [options.packages.find] where = src diff --git a/src/panoptes/pipeline/observation.py b/src/panoptes/pipeline/observation.py index 2ef281e..7e160a9 100644 --- a/src/panoptes/pipeline/observation.py +++ b/src/panoptes/pipeline/observation.py @@ -1,7 +1,6 @@ from typing import List from urllib.error import HTTPError -import numpy.typing as npt import pandas import pandas as pd from google.cloud import firestore @@ -28,7 +27,7 @@ def get_stamp_locations(sources_file_list: List[str]) -> pandas.DataFrame: pos_df = pd.read_parquet(url, columns=[settings.COLUMN_X, settings.COLUMN_Y]) position_dfs.append(pos_df) except HTTPError as e: - logger.warning(f'Problem loading parquet at {url=} {e!r}') + logger.warning(f'Problem loading parquet at {url} {e!r}') num_frames = len(position_dfs) print(f'Combining {num_frames} position files') @@ -50,14 +49,14 @@ def get_stamp_locations(sources_file_list: List[str]) -> pandas.DataFrame: y_catalog_diff = (xy_catalog.catalog_wcs_y.max() - xy_catalog.catalog_wcs_y.min()).max() if x_catalog_diff >= 18 or y_catalog_diff >= 18: - raise RuntimeError(f'Too much drift! {x_catalog_diff=} {y_catalog_diff}') + raise RuntimeError(f'Too much drift! {x_catalog_diff} {y_catalog_diff}') stamp_width = 10 if x_catalog_diff < 10 else 18 stamp_height = 10 if y_catalog_diff < 10 else 18 # Determine stamp size stamp_size = (stamp_width, stamp_height) - print(f'Using {stamp_size=}.') + print(f'Using {stamp_size}.') # Get the mean positions xy_mean = xy_catalog.mean() @@ -94,14 +93,12 @@ def get_stamp_locations(sources_file_list: List[str]) -> pandas.DataFrame: return stamp_positions -def make_stamps(stamp_positions: pandas.DataFrame, - data: npt.DTypeLike, - ) -> pandas.DataFrame: +def make_stamps(stamp_positions, data) -> pandas.DataFrame: stamp_width = int(stamp_positions.stamp_x_max.mean() - stamp_positions.stamp_x_min.mean()) stamp_height = int(stamp_positions.stamp_y_max.mean() - stamp_positions.stamp_y_min.mean()) total_stamp_size = int(stamp_width * stamp_height) logger.debug( - f'Making stamps of {total_stamp_size=} for {len(stamp_positions)} sources from data {data.shape}') + f'Making stamps of {total_stamp_size} for {len(stamp_positions)} sources from data {data.shape}') stamps = [] for picid, row in stamp_positions.iterrows(): diff --git a/src/panoptes/pipeline/scripts/image.py b/src/panoptes/pipeline/scripts/image.py index 916fdce..5509b3a 100644 --- a/src/panoptes/pipeline/scripts/image.py +++ b/src/panoptes/pipeline/scripts/image.py @@ -203,6 +203,13 @@ def match_sources(detected_sources: pandas.DataFrame, solved_wcs0: WCS, settings f'catalog_wcs_y_int > {image_edge} and ' f'catalog_wcs_y_int < {settings.params.camera.image_height - image_edge}' ).copy() + + # Remove catalog matches that are too far away. + if settings.params.catalog.max_separation_arcsec is not None: + max_separation_arcsec = settings.params.catalog.max_separation_arcsec + print(f'Removing matches > {max_separation_arcsec} arcsec from catalog.') + matched_sources = matched_sources.query('catalog_sep <= @max_separation_arcsec') + typer.secho(f'Found {len(matched_sources)} matching sources') # There should not be too many duplicates at this point and they are returned in order @@ -243,14 +250,14 @@ def detect_sources(solved_wcs0, reduced_data, combined_bg_data, combined_bg_resi image_segments = segmentation.detect_sources(reduced_data, threshold, npixels=settings.params.catalog.num_detect_pixels, - filter_kernel=kernel, + kernel=kernel, mask=reduced_data.mask ) typer.secho(f'De-blending image segments') deblended_segments = segmentation.deblend_sources(reduced_data, image_segments, npixels=settings.params.catalog.num_detect_pixels, - filter_kernel=kernel, + kernel=kernel, nlevels=32, contrast=0.01) typer.secho( @@ -288,8 +295,7 @@ def detect_sources(solved_wcs0, reduced_data, combined_bg_data, combined_bg_resi return detected_sources -def plate_solve(settings: Settings, filename=None): - filename = filename or settings.files.reduced_filename +def plate_solve(filename): typer.secho(f'Plate solving {filename}') solved_headers = fits_utils.get_solve_field(str(filename), skip_solved=False, diff --git a/src/panoptes/pipeline/utils/metadata.py b/src/panoptes/pipeline/utils/metadata.py index 300c9d4..9ac968b 100644 --- a/src/panoptes/pipeline/utils/metadata.py +++ b/src/panoptes/pipeline/utils/metadata.py @@ -1,4 +1,5 @@ import re +import warnings import traceback from enum import IntEnum, auto from contextlib import suppress @@ -17,6 +18,8 @@ from astropy.time import Time from astropy.io.fits.header import Header from astropy.utils.data import download_file +from astropy.nddata import CCDData, Cutout2D +from astropy.wcs import WCS, FITSFixedWarning from loguru import logger @@ -25,6 +28,9 @@ from panoptes.utils.images import fits as fits_utils +warnings.filterwarnings('ignore', category=FITSFixedWarning) + + class SequenceStatus(IntEnum): RECEIVING = 0 RECEIVED = 10 @@ -57,6 +63,7 @@ class ObservationStatus(IntEnum): PROCESSED = 35 +IMG_BASE_URL = 'https://storage.googleapis.com/' OBS_BASE_URL = 'https://storage.googleapis.com/panoptes-observations' OBSERVATIONS_URL = 'https://storage.googleapis.com/panoptes-exp.appspot.com/observations.csv' @@ -121,7 +128,7 @@ def __post_init__(self): if self.path is not None: path_match = PATH_MATCHER.match(self.path) if path_match is None: - raise ValueError(f'Invalid path received: {self.path=}') + raise ValueError(f'Invalid path received: {self.path}') self.unit_id = path_match.group('unit_id') self.camera_id = path_match.group('camera_id') @@ -189,6 +196,37 @@ def from_fits_header(cls, header): return new_instance +class ObservationInfo(): + def __init__(self, sequence_id): + """Initialize the observation info with a sequence_id""" + self.firestore_db = firestore.Client() + + self.sequence_id = sequence_id + self.image_metadata = get_observation_metadata(self.sequence_id, firestore_db=self.firestore_db) + self.raw_images = get_observation_images(self.image_metadata) + self.processed_images = get_observation_images(self.image_metadata, raw=False) + + def get_image_data(self, idx=0, coords=None, box_size=None, use_raw=True): + """Downloads the image data.""" + + if use_raw: + image_list = self.raw_images + else: + image_list = self.processed_images + + data_img = image_list[idx] + wcs_img = self.processed_images[idx] + + data0 = fits_utils.getdata(data_img) + wcs0 = fits_utils.getwcs(wcs_img) + ccd0 = CCDData(data0, wcs=wcs0, unit='adu') + + if coords is not None and box_size is not None: + ccd0 = Cutout2D(ccd0, coords, box_size) + + return ccd0 + + def extract_metadata(header: Header) -> dict: """Get the metadata from a FITS image.""" path_info = ObservationPathInfo.from_fits_header(header) @@ -263,66 +301,50 @@ def extract_metadata(header: Header) -> dict: return dict(unit=unit_info, sequence=sequence_info, image=image_info) -def get_observation_metadata(sequence_ids, fields=None, show_progress=False): - """Get the metadata for the given sequence_id(s). - - NOTE: This is slated for removal soon. - - This function will search for pre-processed observations that have a stored - parquet file. - - Note that since the files are stored in parquet format, specifying the `fields` - does in fact save on the size of the returned data. If requesting many `sequence_ids` - it may be worth figuring out exactly what columns you need first. - - Args: - sequence_ids (list): A list of sequence_ids as strings. - fields (list|None): A list of fields to fetch from the database in addition - to the 'time' and 'sequence_id' columns. If None, returns all fields. - show_progress (bool): If True, show a progress bar, default False. - - Returns: - `pandas.DataFrame`: DataFrame containing the observation metadata. - """ - sequence_ids = listify(sequence_ids) - - observation_dfs = list() +def get_observation_metadata(sequence_id, firestore_db = None): + """Download the image metadata associated with the observation.""" + firestore_db = firestore_db or firestore.Client() - if show_progress: - iterator = tqdm(sequence_ids, desc='Getting image metadata') + unit_id, camera_id, sequence_time = sequence_id.split('_') + + # Get sequence information + sequence_doc_path = f'units/{unit_id}/observations/{sequence_id}' + sequence_doc_ref = firestore_db.document(sequence_doc_path) + + sequence_info = sequence_doc_ref.get().to_dict() + + exptime = sequence_info['total_exptime'] / sequence_info['num_images'] + sequence_info['exptime'] = int(exptime) + + # Get and show the metadata about the observation. + matched_query = sequence_doc_ref.collection('images') + matched_docs = [d.to_dict() for d in matched_query.stream()] + images_df = pd.json_normalize(matched_docs, sep='_') + + # Set a time index. + images_df.time = pd.to_datetime(images_df.time) + images_df = images_df.set_index(['time']).sort_index() + + num_frames = len(images_df) + print(f'Found {num_frames} images in observation') + + return images_df + + +def get_observation_images(images_df, raw=True): + """Get the images for the observation.""" + if raw: + bucket = 'panoptes-images-raw' + file_ext = '.fits.fz' else: - iterator = sequence_ids - - logger.debug(f'Getting images metadata for {len(sequence_ids)} files') - for sequence_id in iterator: - df_file = f'{OBS_BASE_URL}/{sequence_id}-metadata.parquet' - if fields: - fields = listify(fields) - # Always return the ID fields. - fields.insert(0, 'time') - fields.insert(1, 'sequence_id') - fields = list(set(fields)) - try: - df = pd.read_parquet(df_file, columns=fields) - except Exception as e: - logger.warning(f'Problem reading {df_file}: {e!r}') - else: - observation_dfs.append(df) - - if len(observation_dfs) == 0: - logger.info(f'No documents found for sequence_ids={sequence_ids}') - return - - df = pd.concat(observation_dfs) - - # Return column names in sorted order - df = df.reindex(sorted(df.columns), axis=1) - - # TODO(wtgee) any data cleaning or preparation for observations here. - - logger.success(f'Returning {len(df)} rows of metadata sorted by time') - return df.sort_values(by=['time']) - + bucket = 'panoptes-images-processed' + file_ext = '-reduced.fits.fz' + + image_list = [IMG_BASE_URL + bucket + '/' + str(s).replace("_", "/") + file_ext for s in images_df.uid.values] + + return image_list + + def search_observations( coords=None, @@ -332,10 +354,12 @@ def search_observations( ra=None, dec=None, radius=10, # degrees - status='matched', + status='CREATED', min_num_images=1, source_url=OBSERVATIONS_URL, - source=None + source=None, + ra_col='coordinates_mount_ra', + dec_col='coordinates_mount_dec', ): """Search PANOPTES observations. @@ -428,9 +452,9 @@ def search_observations( # Perform filtering on other fields here. logger.debug(f'Filtering observations') obs_df.query( - f'dec >= {dec_min} and dec <= {dec_max}' + f'{dec_col} >= {dec_min} and {dec_col} <= {dec_max}' ' and ' - f'ra >= {ra_min} and ra <= {ra_max}' + f'{ra_col} >= {ra_min} and {ra_col} <= {ra_max}' ' and ' f'time >= "{start_date}"' ' and ' @@ -475,7 +499,7 @@ def search_observations( ] logger.success(f'Returning {len(obs_df)} observations') - return obs_df.reindex(columns=columns) + return obs_df #.reindex(columns=columns) def get_firestore_refs( @@ -518,7 +542,7 @@ def record_metadata(bucket_path: str, metadata: dict, **kwargs) -> str: if not metadata: raise RuntimeError('Need valid metadata') - print(f'Recording header metadata in firestore for {bucket_path=}') + print(f'Recording header metadata in firestore for {bucket_path}') path_info = ObservationPathInfo(path=bucket_path) sequence_id = path_info.sequence_id @@ -546,5 +570,5 @@ def record_metadata(bucket_path: str, metadata: dict, **kwargs) -> str: print(f'Error in adding record: {traceback.format_exc()!r}') raise e else: - print(f'Recorded metadata for {path_info.get_full_id()} with {image_doc_ref.id=}') + print(f'Recorded metadata for {path_info.get_full_id()} with id={image_doc_ref.id}') return image_doc_ref.path diff --git a/src/panoptes/pipeline/utils/sources.py b/src/panoptes/pipeline/utils/sources.py index efaac6f..aeaf70d 100644 --- a/src/panoptes/pipeline/utils/sources.py +++ b/src/panoptes/pipeline/utils/sources.py @@ -14,7 +14,7 @@ def get_stars_from_coords(ra: float, dec: float, radius: float = 8.0, **kwargs) dec_min=dec - radius, ) - print(f'Using {limits=} for get_stars') + print(f'Using limits={limits} for get_stars') catalog_stars = get_stars(shape=limits, **kwargs) return catalog_stars @@ -49,7 +49,7 @@ def get_stars_from_wcs(wcs0: WCS, round_to: int = 0, pad: float = 1.0, pad_size= dec_min=dec_min ) - print(f'Searching square shape with {round_to=} and {pad=}: {limits!r}') + print(f'Searching square shape with round_to={round_to} and pad={pad}: {limits!r}') catalog_stars = get_stars(shape=limits, **kwargs) return catalog_stars @@ -121,7 +121,7 @@ def get_stars( (vmag_partition BETWEEN {vmag_min} AND {vmag_max - 1}) """ - print(f'{sql=}') + print(f'sql={sql}') if bq_client is None or bqstorage_client is None: bq_client, bqstorage_client = get_bq_clients() @@ -142,7 +142,6 @@ def get_stars( def get_catalog_match(point_sources, wcs=None, catalog_stars=None, - max_separation_arcsec=None, ra_column='measured_ra', dec_column='measured_dec', **kwargs): @@ -201,10 +200,6 @@ def get_catalog_match(point_sources, The best policy would be to try to minimize calls to this function. The resulting dataframe can be saved locally with `point_sources.to_csv(path_name)`. - If a `max_separation_arcsec` is given then results will be filtered if their - match with `source-extractor` was larger than the number given. Typical values would - be in the range of 20-30 arcsecs, which corresponds to 2-3 pixels. - Returns: `pandas.DataFrame`: A dataframe with the catalog information added to the sources. @@ -220,8 +215,6 @@ def get_catalog_match(point_sources, ra_column (str): The column name to use for the RA coordinates, default `measured_ra`. dec_column (str): The column name to use for the Dec coordinates, default `measured_dec`. origin (int, optional): The origin for catalog matching, either 0 or 1 (default). - max_separation_arcsec (float|None, optional): If not None, sources more - than this many arcsecs from catalog will be filtered. return_unmatched (bool, optional): If all results from catalog should be returned, not just those with a positive match. origin (int): The origin for the WCS. See `all_world2pix`. Default 1. @@ -289,13 +282,6 @@ def get_catalog_match(point_sources, # new_column_order.insert(i, col) # matched_sources = matched_sources.reindex(columns=new_column_order) - print(f'Point sources: {len(matched_sources)} for wcs={wcs.wcs.crval!r}') - - # Remove catalog matches that are too far away. - if max_separation_arcsec is not None: - print(f'Removing matches > {max_separation_arcsec} arcsec from catalog.') - matched_sources = matched_sources.query('catalog_sep <= @max_separation_arcsec') - print(f'Returning matched sources: {len(matched_sources)} for wcs={wcs.wcs.crval!r}') return matched_sources From e2a3f9f139c993ecb24369f400d9569a96c8ac33 Mon Sep 17 00:00:00 2001 From: Wilfred Tyler Gee Date: Mon, 20 Jun 2022 19:42:18 +0000 Subject: [PATCH 2/6] Removing unused items. Clean up repo so loads in mybinder easier. --- Dockerfile | 91 +- deploy.sh | 39 - docker-compose.yaml | 15 - environment.yaml | 30 - notebooks/ProcessFITS.ipynb | 1019 +++++++++++++++-- notebooks/ProcessObservation.ipynb | 1619 +++++++++++++++++++++++++--- notebooks/tess_sectors_north.yaml | 208 ---- notebooks/tess_sectors_south.yaml | 208 ---- 8 files changed, 2466 insertions(+), 763 deletions(-) delete mode 100755 deploy.sh delete mode 100644 docker-compose.yaml delete mode 100644 environment.yaml delete mode 100644 notebooks/tess_sectors_north.yaml delete mode 100644 notebooks/tess_sectors_south.yaml diff --git a/Dockerfile b/Dockerfile index 8b39925..7353b11 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,57 +1,36 @@ -ARG image_url=gcr.io/panoptes-exp/panoptes-pocs -ARG image_tag=develop -FROM ${image_url}:${image_tag} AS pipeline-base - -LABEL description="Development environment for working with the PIPELINE" -LABEL maintainers="developers@projectpanoptes.org" -LABEL repo="github.com/panoptes/panoptes-pipeline" - -ARG userid=1000 -ENV USERID $userid - -ENV DEBIAN_FRONTEND=noninteractive -ENV LANG=C.UTF-8 LC_ALL=C.UTF-8 -ENV PYTHONUNBUFFERED True - -ENV PORT 8080 - -USER "${userid}" - -USER "${USERID}" -WORKDIR /build -COPY --chown="${USERID}:${USERID}" . . -RUN echo "Building wheel" && \ - sudo chown -R "${userid}:${userid}" /build && \ - python setup.py bdist_wheel -d /build/dist - -FROM pipeline-base AS panoptes-pipeline - -USER "${USERID}" -WORKDIR /build -COPY --from=pipeline-base /build/dist/ /build/dist -RUN echo "Installing module" && \ - pip install --no-cache-dir "$(ls /build/dist/*.whl)" && \ +FROM debian:11-slim + +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + astrometry.net source-extractor dcraw exiftool \ + libcfitsio-dev libcfitsio-bin \ + libpng-dev libjpeg-dev \ + libfreetype6-dev \ + libffi-dev && \ # Cleanup - pip cache purge && \ - conda clean -fay && \ - sudo apt-get autoremove --purge --yes \ - gcc pkg-config git && \ - sudo apt-get autoclean --yes && \ - sudo apt-get --yes clean && \ - sudo rm -rf /var/lib/apt/lists/* - -USER "${USERID}" -WORKDIR /app -COPY --chown="${USERID}:${USERID}" ./services/* /app/ -COPY ./notebooks/ProcessFITS.ipynb . -COPY ./notebooks/ProcessObservation.ipynb . - -RUN echo "Creating /input and /output directories" && \ - sudo mkdir -p /input && \ - sudo mkdir -p /output && \ - sudo chown -R "${USERID}:${USERID}" /input && \ - sudo chown -R "${USERID}:${USERID}" /output && \ - sudo chmod -R 777 /input && \ - sudo chmod -R 777 /output - -CMD [ "gunicorn --workers 1 --threads 8 --timeout 0 -k uvicorn.workers.UvicornWorker --bind :${PORT:-8080} pipeline:app" ] + apt-get autoremove --purge --yes && \ + apt-get autoclean --yes && \ + rm -rf /var/lib/apt/lists/* + +ADD http://data.astrometry.net/4100/index-4108.fits /usr/share/astrometry +ADD http://data.astrometry.net/4100/index-4110.fits /usr/share/astrometry +ADD http://data.astrometry.net/4100/index-4111.fits /usr/share/astrometry +ADD http://data.astrometry.net/4100/index-4112.fits /usr/share/astrometry +ADD http://data.astrometry.net/4100/index-4113.fits /usr/share/astrometry +ADD http://data.astrometry.net/4100/index-4114.fits /usr/share/astrometry +ADD http://data.astrometry.net/4100/index-4115.fits /usr/share/astrometry +ADD http://data.astrometry.net/4100/index-4116.fits /usr/share/astrometry +ADD http://data.astrometry.net/4100/index-4117.fits /usr/share/astrometry +ADD http://data.astrometry.net/4100/index-4118.fits /usr/share/astrometry +ADD http://data.astrometry.net/4100/index-4119.fits /usr/share/astrometry + +RUN conda update -n base conda && \ + conda init && \ + conda create -n pipeline -c conda-forge \ + astropy astroplan astroquery photutils \ + scipy numpy pandas scikit-learn scikit-image numexpr \ + bokeh seaborn plotly panel \ + jupyterlab ipywidgets ipython-autotime \ + gcsfs google-cloud-storage \ + h5py \ + pip diff --git a/deploy.sh b/deploy.sh deleted file mode 100755 index 67f0c46..0000000 --- a/deploy.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash - -set -e - -TOPIC=${1:-panoptes-pipeline-image} -BASE_TAG=$(git rev-parse HEAD) -PROJECT_ID=panoptes-exp - -echo "Building image" -gcloud builds submit --tag "gcr.io/${PROJECT_ID}/${TOPIC}:${BASE_TAG}" . - -#gcloud compute instances update-container \ -# pipeline-processing \ -# --zone us-central1-a \ -# --container-image "gcr.io/${PROJECT_ID}/${TOPIC}:${BASE_TAG}" - -echo "Deploying ${TOPIC} to Cloud Run" -gcloud run deploy "${TOPIC}" \ - --region "us-west1" \ - --image "gcr.io/${PROJECT_ID}/${TOPIC}:${BASE_TAG}" \ - --no-allow-unauthenticated \ - --platform managed \ - --cpu 2 \ - --memory "8Gi" \ - --max-instances 500 \ - --concurrency 1 \ - --timeout "20m" - -echo "Deploying ${TOPIC/image/observation} to Cloud Run" -gcloud run deploy "${TOPIC/image/observation}" \ - --region "us-west1" \ - --image "gcr.io/${PROJECT_ID}/${TOPIC}:${BASE_TAG}" \ - --no-allow-unauthenticated \ - --platform managed \ - --cpu 2 \ - --memory "8Gi" \ - --max-instances 50 \ - --concurrency 1 \ - --timeout "20m" diff --git a/docker-compose.yaml b/docker-compose.yaml deleted file mode 100644 index 9df978c..0000000 --- a/docker-compose.yaml +++ /dev/null @@ -1,15 +0,0 @@ -services: - panoptes-pipeline: - image: "${IMAGE_NAME:-gcr.io/panoptes-exp/panoptes-pipeline}:${TAG_NAME:-latest}" - build: - context: . - dockerfile: ./Dockerfile - restart: never - init: true - tty: true - container_name: panoptes-pipeline - hostname: panoptes-pipeline - environment: - GOOGLE_APPLICATION_CREDENTIALS: - volumes: - - .:/output diff --git a/environment.yaml b/environment.yaml deleted file mode 100644 index 2423bc6..0000000 --- a/environment.yaml +++ /dev/null @@ -1,30 +0,0 @@ -channels: - - https://conda.anaconda.org/conda-forge -dependencies: - - astropy - - bottleneck - - fastapi - - gcsfs - - google-cloud-bigquery - - google-cloud-bigquery-storage - - google-cloud-pubsub - - google-cloud-storage - - h5py - - ipython-autotime - - matplotlib-base - - numexpr - - numpy - - pandas - - papermill - - photutils - - pip - - pyarrow - - tables - - tabulate - - scikit-learn - - scipy - - seaborn - - uvicorn - - pip: - - "git+https://github.com/panoptes/panoptes-utils@develop#egg=panoptes-utils[config,images,social]" - - google-cloud-firestore diff --git a/notebooks/ProcessFITS.ipynb b/notebooks/ProcessFITS.ipynb index d661c12..5a1c608 100644 --- a/notebooks/ProcessFITS.ipynb +++ b/notebooks/ProcessFITS.ipynb @@ -2,7 +2,7 @@ "cells": [ { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -11,11 +11,19 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 2, "metadata": { "tags": [] }, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "time: 1.87 s\n" + ] + } + ], "source": [ "import tempfile\n", "from pathlib import Path\n", @@ -25,6 +33,9 @@ "import seaborn as sb\n", "from IPython.display import JSON, display, Markdown\n", "from astropy.wcs import WCS\n", + "from astropy.io import fits\n", + "from astropy.table import Table\n", + "from astropy.nddata import CCDData, Cutout2D\n", "from loguru import logger\n", "from panoptes.utils.images import bayer\n", "from panoptes.utils.images import fits as fits_utils\n", @@ -41,25 +52,51 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "