This is part 3 in my series on writing modern idiomatic pandas.


Indexes can be a difficult concept to grasp at first. I suspect this is partly becuase they’re somewhat peculiar to pandas. These aren’t like the indexes put on relational database tables for performance optimizations. Rather, they’re more like the row_labels of an R DataFrame, but much more capable.

Indexes offer

  • metadata container
  • easy label-based row selection
  • easy label-based alignment in operations
  • label-based concatenation

To demonstrate these, we’ll first fetch some more data. This will be weather data from sensors at a bunch of airports across the US. See here for the example scraper I based this off of.

%matplotlib inline

import json
import glob
import datetime
from io import StringIO

import requests
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt

sns.set_style('ticks')

# States are broken into networks. The networks have a list of ids, each representing a station.
# We will take that list of ids and pass them as query parameters to the URL we built up ealier.
states = """AK AL AR AZ CA CO CT DE FL GA HI IA ID IL IN KS KY LA MA MD ME
 MI MN MO MS MT NC ND NE NH NJ NM NV NY OH OK OR PA RI SC SD TN TX UT VA VT
 WA WI WV WY""".split()

# IEM has Iowa AWOS sites in its own labeled network
networks = ['AWOS'] + ['{}_ASOS'.format(state) for state in states]
def get_weather(stations, start=pd.Timestamp('2014-01-01'),
                end=pd.Timestamp('2014-01-31')):
    '''
    Fetch weather data from MESONet between ``start`` and ``stop``.
    '''
    url = ("http://mesonet.agron.iastate.edu/cgi-bin/request/asos.py?"
           "&data=tmpf&data=relh&data=sped&data=mslp&data=p01i&data=vsby&data=gust_mph&data=skyc1&data=skyc2&data=skyc3"
           "&tz=Etc/UTC&format=comma&latlon=no"
           "&{start:year1=%Y&month1=%m&day1=%d}"
           "&{end:year2=%Y&month2=%m&day2=%d}&{stations}")
    stations = "&".join("station=%s" % s for s in stations)
    weather = (pd.read_csv(url.format(start=start, end=end, stations=stations),
                           comment="#")
                 .rename(columns={"valid": "date"})
                 .rename(columns=str.strip)
                 .assign(date=lambda df: pd.to_datetime(df['date']))
                 .set_index(["station", "date"])
                 .sort_index())
    float_cols = ['tmpf', 'relh', 'sped', 'mslp', 'p01i', 'vsby', "gust_mph"]
    weather[float_cols] = weather[float_cols].apply(pd.to_numeric, errors="corce")
    return weather
def get_ids(network):
    url = "http://mesonet.agron.iastate.edu/geojson/network.php?network={}"
    r = requests.get(url.format(network))
    md = pd.io.json.json_normalize(r.json()['features'])
    md['network'] = network
    return md

Talk briefly about the gem of a method that is json_normalize.

url = "http://mesonet.agron.iastate.edu/geojson/network.php?network={}"
r = requests.get(url.format("AWOS"))
js = r.json()
js['features'][:2]
[{'geometry': {'coordinates': [-94.2723694444, 43.0796472222],
   'type': 'Point'},
  'id': 'AXA',
  'properties': {'sid': 'AXA', 'sname': 'ALGONA'},
  'type': 'Feature'},
 {'geometry': {'coordinates': [-93.569475, 41.6878083333], 'type': 'Point'},
  'id': 'IKV',
  'properties': {'sid': 'IKV', 'sname': 'ANKENY'},
  'type': 'Feature'}]
pd.DataFrame(js['features']).head().to_html()
geometryidpropertiestype
0{\'coordinates\': [-94.2723694444, 43.0796472222...AXA{\'sname\': \'ALGONA\', \'sid\': \'AXA\'}Feature
1{\'coordinates\': [-93.569475, 41.6878083333], \'...IKV{\'sname\': \'ANKENY\', \'sid\': \'IKV\'}Feature
2{\'coordinates\': [-95.0465277778, 41.4058805556...AIO{\'sname\': \'ATLANTIC\', \'sid\': \'AIO\'}Feature
3{\'coordinates\': [-94.9204416667, 41.6993527778...ADU{\'sname\': \'AUDUBON\', \'sid\': \'ADU\'}Feature
4{\'coordinates\': [-93.848575, 42.0485694444], \'...BNW{\'sname\': \'BOONE MUNI\', \'sid\': \'BNW\'}Feature
js['features'][0]
{
    'geometry': {
        'coordinates': [-94.2723694444, 43.0796472222],
        'type': 'Point'
    },
    'id': 'AXA',
    'properties': {
        'sid': 'AXA',
        'sname': 'ALGONA'
    },
    'type': 'Feature'
}
js['features']

[{'geometry': {'coordinates': [-94.2723694444, 43.0796472222],
  'type': 'Point'},
  'id': 'AXA',
  'properties': {'sid': 'AXA', 'sname': 'ALGONA'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-93.569475, 41.6878083333], 'type': 'Point'},
  'id': 'IKV',
  'properties': {'sid': 'IKV', 'sname': 'ANKENY'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-95.0465277778, 41.4058805556],
  'type': 'Point'},
  'id': 'AIO',
  'properties': {'sid': 'AIO', 'sname': 'ATLANTIC'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-94.9204416667, 41.6993527778],
  'type': 'Point'},
  'id': 'ADU',
  'properties': {'sid': 'ADU', 'sname': 'AUDUBON'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-93.848575, 42.0485694444], 'type': 'Point'},
  'id': 'BNW',
  'properties': {'sid': 'BNW', 'sname': 'BOONE MUNI'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-94.7888805556, 42.0443611111],
  'type': 'Point'},
  'id': 'CIN',
  'properties': {'sid': 'CIN', 'sname': 'CARROLL'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-92.8983388889, 40.6831805556],
  'type': 'Point'},
  'id': 'TVK',
  'properties': {'sid': 'TVK', 'sname': 'Centerville'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-93.3607694444, 41.0184305556],
  'type': 'Point'},
  'id': 'CNC',
  'properties': {'sid': 'CNC', 'sname': 'CHARITON'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-92.6132222222, 43.0730055556],
  'type': 'Point'},
  'id': 'CCY',
  'properties': {'sid': 'CCY', 'sname': 'CHARLES CITY'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-95.553775, 42.7304194444], 'type': 'Point'},
  'id': 'CKP',
  'properties': {'sid': 'CKP', 'sname': 'Cherokee'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-95.0222722222, 40.7241527778],
  'type': 'Point'},
  'id': 'ICL',
  'properties': {'sid': 'ICL', 'sname': 'CLARINDA'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-93.7592583333, 42.7430416667],
  'type': 'Point'},
  'id': 'CAV',
  'properties': {'sid': 'CAV', 'sname': 'CLARION'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-90.332796, 41.829504], 'type': 'Point'},
  'id': 'CWI',
  'properties': {'sid': 'CWI', 'sname': 'CLINTON'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-95.7604083333, 41.2611111111],
  'type': 'Point'},
  'id': 'CBF',
  'properties': {'sid': 'CBF', 'sname': 'COUNCIL BLUFFS'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-94.3607972222, 41.0187888889],
  'type': 'Point'},
  'id': 'CSQ',
  'properties': {'sid': 'CSQ', 'sname': 'CRESTON'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-91.7433138889, 43.2755194444],
  'type': 'Point'},
  'id': 'DEH',
  'properties': {'sid': 'DEH', 'sname': 'DECORAH'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-95.3799888889, 41.9841944444],
  'type': 'Point'},
  'id': 'DNS',
  'properties': {'sid': 'DNS', 'sname': 'DENISON'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-91.9834111111, 41.0520888889],
  'type': 'Point'},
  'id': 'FFL',
  'properties': {'sid': 'FFL', 'sname': 'FAIRFIELD'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-93.6236694444, 43.2323166667],
  'type': 'Point'},
  'id': 'FXY',
  'properties': {'sid': 'FXY', 'sname': 'Forest City'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-94.203203, 42.549741], 'type': 'Point'},
  'id': 'FOD',
  'properties': {'sid': 'FOD', 'sname': 'FORT DODGE'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-91.3267166667, 40.6614833333],
  'type': 'Point'},
  'id': 'FSW',
  'properties': {'sid': 'FSW', 'sname': 'FORT MADISON'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-92.7331972222, 41.7097305556],
  'type': 'Point'},
  'id': 'GGI',
  'properties': {'sid': 'GGI', 'sname': 'Grinnell'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-95.3354555556, 41.5834194444],
  'type': 'Point'},
  'id': 'HNR',
  'properties': {'sid': 'HNR', 'sname': 'HARLAN'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-91.9504, 42.4544277778], 'type': 'Point'},
  'id': 'IIB',
  'properties': {'sid': 'IIB', 'sname': 'INDEPENDENCE'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-93.2650805556, 42.4690972222],
  'type': 'Point'},
  'id': 'IFA',
  'properties': {'sid': 'IFA', 'sname': 'Iowa Falls'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-91.4273916667, 40.4614611111],
  'type': 'Point'},
  'id': 'EOK',
  'properties': {'sid': 'EOK', 'sname': 'KEOKUK MUNI'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-93.1113916667, 41.2984472222],
  'type': 'Point'},
  'id': 'OXV',
  'properties': {'sid': 'OXV', 'sname': 'Knoxville'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-96.19225, 42.775375], 'type': 'Point'},
  'id': 'LRJ',
  'properties': {'sid': 'LRJ', 'sname': 'LE MARS'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-91.1604555556, 42.2203611111],
  'type': 'Point'},
  'id': 'MXO',
  'properties': {'sid': 'MXO', 'sname': 'MONTICELLO MUNI'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-91.5122277778, 40.9452527778],
  'type': 'Point'},
  'id': 'MPZ',
  'properties': {'sid': 'MPZ', 'sname': 'MOUNT PLEASANT'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-91.140575, 41.3669944444], 'type': 'Point'},
  'id': 'MUT',
  'properties': {'sid': 'MUT', 'sname': 'MUSCATINE'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-93.0190416667, 41.6701111111],
  'type': 'Point'},
  'id': 'TNU',
  'properties': {'sid': 'TNU', 'sname': 'NEWTON MUNI'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-91.9759888889, 42.6831388889],
  'type': 'Point'},
  'id': 'OLZ',
  'properties': {'sid': 'OLZ', 'sname': 'OELWEIN'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-96.0605861111, 42.9894916667],
  'type': 'Point'},
  'id': 'ORC',
  'properties': {'sid': 'ORC', 'sname': 'Orange City'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-93.6876138889, 41.0471722222],
  'type': 'Point'},
  'id': 'I75',
  'properties': {'sid': 'I75', 'sname': 'Osceola'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-92.4918666667, 41.227275], 'type': 'Point'},
  'id': 'OOA',
  'properties': {'sid': 'OOA', 'sname': 'Oskaloosa'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-92.9431083333, 41.3989138889],
  'type': 'Point'},
  'id': 'PEA',
  'properties': {'sid': 'PEA', 'sname': 'PELLA'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-94.1637083333, 41.8277916667],
  'type': 'Point'},
  'id': 'PRO',
  'properties': {'sid': 'PRO', 'sname': 'Perry'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-95.2624111111, 41.01065], 'type': 'Point'},
  'id': 'RDK',
  'properties': {'sid': 'RDK', 'sname': 'RED OAK'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-95.8353138889, 43.2081611111],
  'type': 'Point'},
  'id': 'SHL',
  'properties': {'sid': 'SHL', 'sname': 'SHELDON'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-95.4112333333, 40.753275], 'type': 'Point'},
  'id': 'SDA',
  'properties': {'sid': 'SDA', 'sname': 'SHENANDOAH MUNI'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-95.2399194444, 42.5972277778],
  'type': 'Point'},
  'id': 'SLB',
  'properties': {'sid': 'SLB', 'sname': 'Storm Lake'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-92.0248416667, 42.2175777778],
  'type': 'Point'},
  'id': 'VTI',
  'properties': {'sid': 'VTI', 'sname': 'VINTON'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-91.6748111111, 41.2751444444],
  'type': 'Point'},
  'id': 'AWG',
  'properties': {'sid': 'AWG', 'sname': 'WASHINGTON'},
  'type': 'Feature'},
{'geometry': {'coordinates': [-93.8690777778, 42.4392305556],
  'type': 'Point'},
  'id': 'EBS',
  'properties': {'sid': 'EBS', 'sname': 'Webster City'},
  'type': 'Feature'}]
stations = pd.io.json.json_normalize(js['features']).id
url = ("http://mesonet.agron.iastate.edu/cgi-bin/request/asos.py?"
       "&data=tmpf&data=relh&data=sped&data=mslp&data=p01i&data=vsby&data=gust_mph&data=skyc1&data=skyc2&data=skyc3"
       "&tz=Etc/UTC&format=comma&latlon=no"
       "&{start:year1=%Y&month1=%m&day1=%d}"
       "&{end:year2=%Y&month2=%m&day2=%d}&{stations}")
stations = "&".join("station=%s" % s for s in stations)
start = pd.Timestamp('2014-01-01')
end=pd.Timestamp('2014-01-31')

weather = (pd.read_csv(url.format(start=start, end=end, stations=stations),
                       comment="#"))
import os
ids = pd.concat([get_ids(network) for network in networks], ignore_index=True)
gr = ids.groupby('network')

os.makedirs("weather", exist_ok=True)

for i, (k, v) in enumerate(gr):
    print("{}/{}".format(i, len(network)), end='\r')
    weather = get_weather(v['id'])
    weather.to_csv("weather/{}.csv".format(k))

weather = pd.concat([
    pd.read_csv(f, parse_dates='date', index_col=['station', 'date'])
    for f in glob.glob('weather/*.csv')])

weather.to_hdf("weather.h5", "weather")
weather = pd.read_hdf("weather.h5", "weather").sort_index()

weather.head()
tmpfrelhspedmslpp01ivsbygust_mphskyc1skyc2skyc3
stationdate
01M2014-01-01 00:15:0033.8085.860.0NaN0.010.0NaNCLRMM
2014-01-01 00:35:0033.4487.110.0NaN0.010.0NaNCLRMM
2014-01-01 00:55:0032.5490.970.0NaN0.010.0NaNCLRMM
2014-01-01 01:15:0031.8293.650.0NaN0.010.0NaNCLRMM
2014-01-01 01:35:0032.0092.970.0NaN0.010.0NaNCLRMM

OK, that was a bit of work. Here’s a plot to reward ourselves.

airports = ['DSM', 'ORD', 'JFK', 'PDX']

g = sns.FacetGrid(weather.sort_index().loc[airports].reset_index(),
                  col='station', hue='station', col_wrap=2, size=4)
g.map(sns.regplot, 'sped', 'gust_mph')
plt.savefig('../content/images/indexes_wind_gust_facet.png');
airports = ['DSM', 'ORD', 'JFK', 'PDX']

g = sns.FacetGrid(weather.sort_index().loc[airports].reset_index(),
                  col='station', hue='station', col_wrap=2, size=4)
g.map(sns.regplot, 'sped', 'gust_mph')
plt.savefig('../content/images/indexes_wind_gust_facet.svg', transparent=True);

png

Set Operations

Indexes are set-like (technically multisets, since you can have duplicates), so they support most python set operations. Indexes are immutable so you won’t find any of the inplace set operations. One other difference is that since Indexes are also array like, you can’t use some infix operators like - for difference. If you have a numeric index it is unclear whether you intend to perform math operations or set operations. You can use & for intersetion, | for union, and ^ for symmetric difference though, since there’s no ambiguity.

For example, lets find the set of airports that we have weather and flight information on. Since weather had a MultiIndex of airport,datetime, we’ll use the levels attribute to get at the airport data, separate from the date data.

# Bring in the flights data

flights = pd.read_hdf('flights.h5', 'flights')

weather_locs = weather.index.levels[0]
# The `categories` attribute of a Categorical is an Index
origin_locs = flights.origin.cat.categories
dest_locs = flights.dest.cat.categories

airports = weather_locs & origin_locs & dest_locs
airports
Index(['ABE', 'ABI', 'ABQ', 'ABR', 'ABY', 'ACT', 'ACV', 'AEX', 'AGS', 'ALB',
       ...
       'TUL', 'TUS', 'TVC', 'TWF', 'TXK', 'TYR', 'TYS', 'VLD', 'VPS', 'XNA'],
      dtype='object', length=267)
print("Weather, no flights:\n\t", weather_locs.difference(origin_locs | dest_locs), end='\n\n')

print("Flights, no weather:\n\t", (origin_locs | dest_locs).difference(weather_locs), end='\n\n')

print("Dropped Stations:\n\t", (origin_locs | dest_locs) ^ weather_locs)
Weather, no flights:
	 Index(['01M', '04V', '04W', '05U', '06D', '08D', '0A9', '0CO', '0E0', '0F2',
       ...
       'Y50', 'Y51', 'Y63', 'Y70', 'YIP', 'YKM', 'YKN', 'YNG', 'ZPH', 'ZZV'],
      dtype='object', length=1909)

Flights, no weather:
	 Index(['ADK', 'ADQ', 'ANC', 'BET', 'BKG', 'BQN', 'BRW', 'CDV', 'CLD', 'FAI',
       'FCA', 'GUM', 'HNL', 'ITO', 'JNU', 'KOA', 'KTN', 'LIH', 'MQT', 'OGG',
       'OME', 'OTZ', 'PPG', 'PSE', 'PSG', 'SCC', 'SCE', 'SIT', 'SJU', 'STT',
       'STX', 'WRG', 'YAK', 'YUM'],
      dtype='object')

Dropped Stations:
	 Index(['01M', '04V', '04W', '05U', '06D', '08D', '0A9', '0CO', '0E0', '0F2',
       ...
       'Y63', 'Y70', 'YAK', 'YIP', 'YKM', 'YKN', 'YNG', 'YUM', 'ZPH', 'ZZV'],
      dtype='object', length=1943)

Flavors

Pandas has many subclasses of the regular Index, each tailored to a specific kind of data. Most of the time these will be created for you automatically, so you don’t have to worry about which one to choose.

  1. Index
  2. Int64Index
  3. RangeIndex (Memory-saving special case of Int64Index)
  4. FloatIndex
  5. DatetimeIndex: Datetime64[ns] precision data
  6. PeriodIndex: Regularly-spaced, arbitrary precision datetime data.
  7. TimedeltaIndex: Timedelta data
  8. CategoricalIndex:

Some of these are purely optimizations, others use information about the data to provide additional methods. And while sometimes you might work with indexes directly (like the set operations above), most of they time you’ll be operating on a Series or DataFrame, which in turn makes use of its Index.

Row Slicing

We saw in part one that they’re great for making row subsetting as easy as column subsetting.

weather.loc['DSM'].head()
tmpfrelhspedmslpp01ivsbygust_mphskyc1skyc2skyc3
date
2014-01-01 00:54:0010.9472.7910.31024.90.010.0NaNFEWMM
2014-01-01 01:54:0010.9472.7911.41025.40.010.0NaNOVCMM
2014-01-01 02:54:0010.9472.798.01025.30.010.0NaNBKNMM
2014-01-01 03:54:0010.9472.799.11025.30.010.0NaNOVCMM
2014-01-01 04:54:0010.0472.699.11024.70.010.0NaNBKNMM

Without indexes we’d probably resort to boolean masks.

weather2 = weather.reset_index()
weather2[weather2['station'] == 'DSM'].head()
stationdatetmpfrelhspedmslpp01ivsbygust_mphskyc1skyc2skyc3
884855DSM2014-01-01 00:54:0010.9472.7910.31024.90.010.0NaNFEWMM
884856DSM2014-01-01 01:54:0010.9472.7911.41025.40.010.0NaNOVCMM
884857DSM2014-01-01 02:54:0010.9472.798.01025.30.010.0NaNBKNMM
884858DSM2014-01-01 03:54:0010.9472.799.11025.30.010.0NaNOVCMM
884859DSM2014-01-01 04:54:0010.0472.699.11024.70.010.0NaNBKNMM

Slightly less convenient, but still doable.

Indexes for Easier Arithmetic, Analysis

It’s nice to have your metadata (labels on each observation) next to you actual values. But if you store them in an array, they’ll get in the way. Say we wanted to translate the farenheit temperature to celcius.

# With indecies
temp = weather['tmpf']

c = (temp - 32) * 5 / 9
c.to_frame()
tmpf
stationdate
01M2014-01-01 00:15:001.0
2014-01-01 00:35:000.8
2014-01-01 00:55:000.3
2014-01-01 01:15:00-0.1
2014-01-01 01:35:000.0
.........
ZZV2014-01-30 19:53:00-2.8
2014-01-30 20:53:00-2.2
2014-01-30 21:53:00-2.2
2014-01-30 22:53:00-2.8
2014-01-30 23:53:00-1.7

3303647 rows × 1 columns

# without
temp2 = weather.reset_index()[['station', 'date', 'tmpf']]

temp2['tmpf'] = (temp2['tmpf'] - 32) * 5 / 9
temp2.head()
stationdatetmpf
001M2014-01-01 00:15:001.0
101M2014-01-01 00:35:000.8
201M2014-01-01 00:55:000.3
301M2014-01-01 01:15:00-0.1
401M2014-01-01 01:35:000.0

Again, not terrible, but not as good. And, what if you had wanted to keep farenheit around as well, instead of overwriting it like we did? Then you’d need to make a copy of everything, including the station and date columns. We don’t have that problem, since indexes are mutable and safely shared between DataFrames / Series.

temp.index is c.index
True

Indexes for Alignment

I’ve saved the best for last. Automatic alignment, or reindexing, is fundamental to pandas.

All binary operations (add, multiply, etc…) between Series/DataFrames first align and then proceed.

Let’s suppose we have hourly observations on temperature and windspeed. And suppose some of the observations were invalid, and not reported (simulated below by sampling from the full dataset). We’ll assume the missing windspeed observations were potentially different from the missing temperature observations.

dsm = weather.loc['DSM']

hourly = dsm.resample('H').mean()

temp = hourly['tmpf'].sample(frac=.5, random_state=1).sort_index()
sped = hourly['sped'].sample(frac=.5, random_state=2).sort_index()
temp.head().to_frame()
tmpf
date
2014-01-01 00:00:0010.94
2014-01-01 02:00:0010.94
2014-01-01 03:00:0010.94
2014-01-01 04:00:0010.04
2014-01-01 05:00:0010.04
sped.head()
date
2014-01-01 01:00:00    11.4
2014-01-01 02:00:00     8.0
2014-01-01 03:00:00     9.1
2014-01-01 04:00:00     9.1
2014-01-01 05:00:00    10.3
Name: sped, dtype: float64

Notice that the two indexes aren’t identical.

Suppose that the windspeed : temperature ratio is meaningful. When we go to compute that, pandas will automatically align the two by index label.

sped / temp
date
2014-01-01 00:00:00         NaN
2014-01-01 01:00:00         NaN
2014-01-01 02:00:00    0.731261
2014-01-01 03:00:00    0.831810
2014-01-01 04:00:00    0.906375
                         ...   
2014-01-30 13:00:00         NaN
2014-01-30 14:00:00    0.584712
2014-01-30 17:00:00         NaN
2014-01-30 21:00:00         NaN
2014-01-30 23:00:00         NaN
dtype: float64

This lets you focus on doing the operation, rather than manually aligning things, ensuring that the arrays are the same length and in the same order. By deault, missing values are inserted where the two don’t align. You can use the method version of any binary operation to specify a fill_value

sped.div(temp, fill_value=1)
date
2014-01-01 00:00:00     0.091408
2014-01-01 01:00:00    11.400000
2014-01-01 02:00:00     0.731261
2014-01-01 03:00:00     0.831810
2014-01-01 04:00:00     0.906375
                         ...    
2014-01-30 13:00:00     0.027809
2014-01-30 14:00:00     0.584712
2014-01-30 17:00:00     0.023267
2014-01-30 21:00:00     0.035663
2014-01-30 23:00:00    13.700000
dtype: float64

And since I couldn’t find anywhere else to put it, you can control the axis the operation is aligned along as well.

hourly.div(sped, axis='index')
tmpfrelhspedmslpp01ivsbygust_mph
date
2014-01-01 00:00:00NaNNaNNaNNaNNaNNaNNaN
2014-01-01 01:00:000.9596496.3850881.089.9473680.00.877193NaN
2014-01-01 02:00:001.3675009.0987501.0128.1625000.01.250000NaN
2014-01-01 03:00:001.2021987.9989011.0112.6703300.01.098901NaN
2014-01-01 04:00:001.1032977.9879121.0112.6043960.01.098901NaN
........................
2014-01-30 19:00:00NaNNaNNaNNaNNaNNaNNaN
2014-01-30 20:00:00NaNNaNNaNNaNNaNNaNNaN
2014-01-30 21:00:00NaNNaNNaNNaNNaNNaNNaN
2014-01-30 22:00:00NaNNaNNaNNaNNaNNaNNaN
2014-01-30 23:00:001.6000004.5350361.073.9708030.00.729927NaN

720 rows × 7 columns

The non row-labeled version of this is messy.

temp2 = temp.reset_index()
sped2 = sped.reset_index()

# Find rows where the operation is defined
common_dates = pd.Index(temp2.date) & sped2.date
pd.concat([
    # concat to not lose date information
    sped2.loc[sped2['date'].isin(common_dates), 'date'],
    (sped2.loc[sped2.date.isin(common_dates), 'sped'] /
     temp2.loc[temp2.date.isin(common_dates), 'tmpf'])],
    axis=1).dropna(how='all')
date0
12014-01-01 02:00:000.731261
22014-01-01 03:00:000.831810
32014-01-01 04:00:000.906375
42014-01-01 05:00:001.025896
82014-01-01 13:00:00NaN
.........
3512014-01-29 23:00:000.535609
3542014-01-30 05:00:000.487735
3562014-01-30 09:00:00NaN
3572014-01-30 10:00:000.618939
3582014-01-30 14:00:00NaN

170 rows × 2 columns

Yeah, I prefer the temp / sped version.

Alignment isn’t limited to arithmetic operations, although those are the most obvious and easiest to demonstrate.

Merging

There are two ways of merging DataFrames / Series in pandas

  1. Relational Database style with pd.merge
  2. Array style with pd.concat

Personally, I think in terms of the concat style. I learned pandas before I ever really used SQL, so it comes more naturally to me I suppose. pd.merge has more flexibilty, though I think most of the time you don’t need this flexibilty.

Concat Version

pd.concat([temp, sped], axis=1).head()
tmpfsped
date
2014-01-01 00:00:0010.94NaN
2014-01-01 01:00:00NaN11.4
2014-01-01 02:00:0010.948.0
2014-01-01 03:00:0010.949.1
2014-01-01 04:00:0010.049.1

The axis parameter controls how the data should be stacked, 0 for vertically, 1 for horizontally. The join parameter controls the merge behavior on the shared axis, (the Index for axis=1). By default it’s like a union of the two indexes, or an outer join.

pd.concat([temp, sped], axis=1, join='inner')
tmpfsped
date
2014-01-01 02:00:0010.948.000
2014-01-01 03:00:0010.949.100
2014-01-01 04:00:0010.049.100
2014-01-01 05:00:0010.0410.300
2014-01-01 13:00:008.9613.675
.........
2014-01-29 23:00:0035.9618.200
2014-01-30 05:00:0033.9817.100
2014-01-30 09:00:0035.0616.000
2014-01-30 10:00:0035.0621.700
2014-01-30 14:00:0035.0620.500

170 rows × 2 columns

Merge Version

Since we’re joining by index here the merge version is quite similar. We’ll see an example later of a one-to-many join where the two differ.

pd.merge(temp.to_frame(), sped.to_frame(), left_index=True, right_index=True).head()
tmpfsped
date
2014-01-01 02:00:0010.948.000
2014-01-01 03:00:0010.949.100
2014-01-01 04:00:0010.049.100
2014-01-01 05:00:0010.0410.300
2014-01-01 13:00:008.9613.675
pd.merge(temp.to_frame(), sped.to_frame(), left_index=True, right_index=True,
         how='outer').head()
tmpfsped
date
2014-01-01 00:00:0010.94NaN
2014-01-01 01:00:00NaN11.4
2014-01-01 02:00:0010.948.0
2014-01-01 03:00:0010.949.1
2014-01-01 04:00:0010.049.1

Like I said, I typically prefer concat to merge. The exception here is one-to-many type joins. Let’s walk through one of those, where we join the flight data to the weather data. To focus just on the merge, we’ll aggregate hour weather data to be daily, rather than trying to find the closest recorded weather observation to each departure (you could do that, but it’s not the focus right now). We’ll then join the one (airport, date) record to the many (airport, date, flight) records.

Quick tangent, to get the weather data to daily frequency, we’ll need to resample (more on that in the timeseries section). The resample essentially involves breaking the recorded values into daily buckets and computing the aggregation function on each bucket. The only wrinkle is that we have to resample by station, so we’ll use the pd.TimeGrouper helper.

idx_cols = ['unique_carrier', 'origin', 'dest', 'tail_num', 'fl_num', 'fl_date']
data_cols = ['crs_dep_time', 'dep_delay', 'crs_arr_time', 'arr_delay',
             'taxi_out', 'taxi_in', 'wheels_off', 'wheels_on', 'distance']

df = flights.set_index(idx_cols)[data_cols].sort_index()
def mode(x):
    '''
    Arbitrarily break ties.
    '''
    return x.value_counts().index[0]

aggfuncs = {'tmpf': 'mean', 'relh': 'mean',
            'sped': 'mean', 'mslp': 'mean',
            'p01i': 'mean', 'vsby': 'mean',
            'gust_mph': 'mean', 'skyc1': mode,
            'skyc2': mode, 'skyc3': mode}
# TimeGrouper works on a DatetimeIndex, so we move `station` to the
# columns and then groupby it as well.
daily = (weather.reset_index(level="station")
                .groupby([pd.TimeGrouper('1d'), "station"])
                .agg(aggfuncs))

daily.head()
gust_mphvsbyspedrelhskyc1tmpfskyc2mslpp01iskyc3
datestation
2014-01-0101MNaN9.2291672.26250081.117917CLR35.747500MNaN0.0M
04V31.3071439.86111111.13194472.697778CLR18.350000MNaN0.0M
04WNaN10.0000003.60138969.908056OVC-9.075000MNaN0.0M
05UNaN9.9295773.77042371.519859CLR26.321127MNaN0.0M
06DNaN9.5763895.27916773.784179CLR-11.388060MNaN0.0M

The merge version

m = pd.merge(flights, daily.reset_index().rename(columns={'date': 'fl_date', 'station': 'origin'}),
             on=['fl_date', 'origin']).set_index(idx_cols).sort_index()

m.head()
airline_idorigin_airport_idorigin_airport_seq_idorigin_city_market_idorigin_city_nameorigin_state_nmdest_airport_iddest_airport_seq_iddest_city_market_iddest_city_name...gust_mphvsbyspedrelhskyc1tmpfskyc2mslpp01iskyc3
unique_carrierorigindesttail_numfl_numfl_date
AAABQDFWN200AA10902014-01-271980510140101400230140Albuquerque, NMNew Mexico11298112980330194Dallas/Fort Worth, TX...NaN10.06.73750034.267500SCT41.8325M1014.6208330.0M
16622014-01-061980510140101400230140Albuquerque, NMNew Mexico11298112980330194Dallas/Fort Worth, TX...NaN10.09.27083327.249167CLR28.7900M1029.0166670.0M
N202AA13322014-01-271980510140101400230140Albuquerque, NMNew Mexico11298112980330194Dallas/Fort Worth, TX...NaN10.06.73750034.267500SCT41.8325M1014.6208330.0M
N426AA14672014-01-151980510140101400230140Albuquerque, NMNew Mexico11298112980330194Dallas/Fort Worth, TX...NaN10.06.21666734.580000FEW40.2500M1027.8000000.0M
16622014-01-091980510140101400230140Albuquerque, NMNew Mexico11298112980330194Dallas/Fort Worth, TX...NaN10.03.08750042.162500FEW34.6700M1018.3791670.0M

5 rows × 40 columns

m.sample(n=10000).pipe((sns.jointplot, 'data'), 'sped', 'dep_delay')
plt.savefig('../content/images/indexes_sped_delay_join.svg', transparent=True)

png

m.groupby('skyc1').dep_delay.agg(['mean', 'count']).sort_values(by='mean')
meancount
skyc1
M-1.94805277
CLR11.222288115121
FEW16.863177161727
SCT17.80304819289
BKN18.63803454030
OVC21.66776252643
VV30.4870089583
import statsmodels.api as sm
mod = sm.OLS.from_formula('dep_delay ~ C(skyc1) + distance + tmpf + relh + sped + mslp', data=m)
res = mod.fit()
res.summary()
OLS Regression Results
Dep. Variable:dep_delayR-squared:0.026
Model:OLSAdj. R-squared:0.025
Method:Least SquaresF-statistic:976.4
Date:Sun, 10 Apr 2016Prob (F-statistic):0.00
Time:16:06:15Log-Likelihood:-2.1453e+06
No. Observations:410372AIC:4.291e+06
Df Residuals:410360BIC:4.291e+06
Df Model:11
Covariance Type:nonrobust
coefstd errtP>|t|[95.0% Conf. Int.]
Intercept-331.103210.828-30.5770.000-352.327 -309.880
C(skyc1)[T.CLR]-4.40410.249-17.6620.000-4.893 -3.915
C(skyc1)[T.FEW]-0.73300.226-3.2400.001-1.176 -0.290
C(skyc1)[T.M]-16.43418.681-1.8930.058-33.448 0.580
C(skyc1)[T.OVC]0.38180.2811.3580.174-0.169 0.933
C(skyc1)[T.SCT]0.85890.3802.2600.0240.114 1.604
C(skyc1)[T.VV ]8.86030.50917.4140.0007.863 9.858
distance0.00080.0006.1740.0000.001 0.001
tmpf-0.18410.005-38.3900.000-0.193 -0.175
relh0.16260.00438.2680.0000.154 0.171
sped0.60960.01833.7160.0000.574 0.645
mslp0.33400.01031.9600.0000.313 0.354
Omnibus:456713.147Durbin-Watson:1.872
Prob(Omnibus):0.000Jarque-Bera (JB):76162962.824
Skew:5.535Prob(JB):0.00
Kurtosis:68.816Cond. No.2.07e+05
fig, ax = plt.subplots()
ax.scatter(res.fittedvalues, res.resid, color='k', marker='.', alpha=.25)
ax.set(xlabel='Predicted', ylabel='Residual')
sns.despine()
plt.savefig('../content/images/indexes_resid_fit.png', transparent=True)

png

weather.head()
tmpfrelhspedmslpp01ivsbygust_mphskyc1skyc2skyc3
stationdate
01M2014-01-01 00:15:0033.8085.860.0NaN0.010.0NaNCLRMM
2014-01-01 00:35:0033.4487.110.0NaN0.010.0NaNCLRMM
2014-01-01 00:55:0032.5490.970.0NaN0.010.0NaNCLRMM
2014-01-01 01:15:0031.8293.650.0NaN0.010.0NaNCLRMM
2014-01-01 01:35:0032.0092.970.0NaN0.010.0NaNCLRMM
import numpy as np
import pandas as pd


def read(fp):
    df = (pd.read_csv(fp)
            .rename(columns=str.lower)
            .drop('unnamed: 36', axis=1)
            .pipe(extract_city_name)
            .pipe(time_to_datetime, ['dep_time', 'arr_time', 'crs_arr_time', 'crs_dep_time'])
            .assign(fl_date=lambda x: pd.to_datetime(x['fl_date']),
                    dest=lambda x: pd.Categorical(x['dest']),
                    origin=lambda x: pd.Categorical(x['origin']),
                    tail_num=lambda x: pd.Categorical(x['tail_num']),
                    unique_carrier=lambda x: pd.Categorical(x['unique_carrier']),
                    cancellation_code=lambda x: pd.Categorical(x['cancellation_code'])))
    return df

def extract_city_name(df):
    '''
    Chicago, IL -> Chicago for origin_city_name and dest_city_name
    '''
    cols = ['origin_city_name', 'dest_city_name']
    city = df[cols].apply(lambda x: x.str.extract("(.*), \w{2}", expand=False))
    df = df.copy()
    df[['origin_city_name', 'dest_city_name']] = city
    return df

def time_to_datetime(df, columns):
    '''
    Combine all time items into datetimes.
    
    2014-01-01,0914 -> 2014-01-01 09:14:00
    '''
    df = df.copy()
    def converter(col):
        timepart = (col.astype(str)
                       .str.replace('\.0$', '')  # NaNs force float dtype
                       .str.pad(4, fillchar='0'))
        return  pd.to_datetime(df['fl_date'] + ' ' +
                               timepart.str.slice(0, 2) + ':' +
                               timepart.str.slice(2, 4),
                               errors='coerce')
        return datetime_part
    df[columns] = df[columns].apply(converter)
    return df


flights = read("878167309_T_ONTIME.csv")
locs = weather.index.levels[0] & flights.origin.unique()
(weather.reset_index(level='station')
 .query('station in @locs')
 .groupby(['station', pd.TimeGrouper('H')])).mean()
tmpfrelhspedmslpp01ivsbygust_mph
stationdate
ABE2014-01-01 00:00:0026.0647.8214.81024.40.010.021.7
2014-01-01 01:00:0024.0851.938.01025.20.010.0NaN
2014-01-01 02:00:0024.0849.876.81025.70.010.0NaN
2014-01-01 03:00:0023.0052.189.11026.20.010.0NaN
2014-01-01 04:00:0023.0052.184.61026.40.010.0NaN
...........................
XNA2014-01-30 19:00:0044.9638.2316.01009.70.010.025.1
2014-01-30 20:00:0046.0441.7416.01010.30.010.0NaN
2014-01-30 21:00:0046.0441.7413.71010.90.010.020.5
2014-01-30 22:00:0042.9846.9111.41011.50.010.0NaN
2014-01-30 23:00:0039.9254.813.41012.20.010.0NaN

191445 rows × 7 columns

df = (flights.copy()[['unique_carrier', 'tail_num', 'origin', 'dep_time']]
      .query('origin in @locs'))
weather.loc['DSM']
tmpfrelhspedmslpp01ivsbygust_mphskyc1skyc2skyc3
date
2014-01-01 00:54:0010.9472.7910.31024.90.010.0NaNFEWMM
2014-01-01 01:54:0010.9472.7911.41025.40.010.0NaNOVCMM
2014-01-01 02:54:0010.9472.798.01025.30.010.0NaNBKNMM
2014-01-01 03:54:0010.9472.799.11025.30.010.0NaNOVCMM
2014-01-01 04:54:0010.0472.699.11024.70.010.0NaNBKNMM
.................................
2014-01-30 19:54:0030.9255.9928.51006.30.010.035.3FEWFEWM
2014-01-30 20:54:0030.0255.4214.81008.40.010.028.5FEWFEWM
2014-01-30 21:54:0028.0455.1218.21010.40.010.026.2FEWFEWM
2014-01-30 22:54:0026.0657.0413.71011.80.010.0NaNFEWFEWM
2014-01-30 23:54:0021.9262.1313.71013.40.010.0NaNFEWFEWM

896 rows × 10 columns

df = df
fl_dateunique_carrierairline_idtail_numfl_numorigin_airport_idorigin_airport_seq_idorigin_city_market_idoriginorigin_city_name...arr_delaycancelledcancellation_codediverteddistancecarrier_delayweather_delaynas_delaysecurity_delaylate_aircraft_delay
02014-01-01AA19805N338AA112478124780231703JFKNew York...13.00.0NaN0.02475.0NaNNaNNaNNaNNaN
12014-01-01AA19805N339AA212892128920332575LAXLos Angeles...111.00.0NaN0.02475.0111.00.00.00.00.0
22014-01-01AA19805N335AA312478124780231703JFKNew York...13.00.0NaN0.02475.0NaNNaNNaNNaNNaN
32014-01-01AA19805N367AA511298112980330194DFWDallas/Fort Worth...1.00.0NaN0.03784.0NaNNaNNaNNaNNaN
42014-01-01AA19805N364AA613830138300233830OGGKahului...-8.00.0NaN0.03711.0NaNNaNNaNNaNNaN
..................................................................
4719442014-01-31OO20304N292SW531312889128890332211LASLas Vegas...-7.00.0NaN0.0259.0NaNNaNNaNNaNNaN
4719452014-01-31OO20304N580SW531412892128920332575LAXLos Angeles...-12.00.0NaN0.089.0NaNNaNNaNNaNNaN
4719462014-01-31OO20304N580SW531414689146890234689SBASanta Barbara...11.00.0NaN0.089.0NaNNaNNaNNaNNaN
4719472014-01-31OO20304N216SW531511292112920230325DENDenver...56.00.0NaN0.0260.036.00.013.00.07.0
4719482014-01-31OO20304N216SW531514543145430234543RKSRock Springs...47.00.0NaN0.0260.00.00.04.00.043.0

471949 rows × 36 columns

dep.head()
0        2014-01-01 09:14:00
1        2014-01-01 11:32:00
2        2014-01-01 11:57:00
3        2014-01-01 13:07:00
4        2014-01-01 17:53:00
                 ...        
163906   2014-01-11 16:57:00
163910   2014-01-11 11:04:00
181062   2014-01-12 17:02:00
199092   2014-01-13 23:36:00
239150   2014-01-16 16:46:00
Name: dep_time, dtype: datetime64[ns]
flights.dep_time
0        2014-01-01 09:14:00
1        2014-01-01 11:32:00
2        2014-01-01 11:57:00
3        2014-01-01 13:07:00
4        2014-01-01 17:53:00
                 ...        
471944   2014-01-31 09:05:00
471945   2014-01-31 09:24:00
471946   2014-01-31 10:39:00
471947   2014-01-31 09:28:00
471948   2014-01-31 11:22:00
Name: dep_time, dtype: datetime64[ns]
flights.dep_time.unique()
array(['2014-01-01T03:14:00.000000000-0600',
       '2014-01-01T05:32:00.000000000-0600',
       '2014-01-01T05:57:00.000000000-0600', ...,
       '2014-01-30T18:44:00.000000000-0600',
       '2014-01-31T17:16:00.000000000-0600',
       '2014-01-30T18:47:00.000000000-0600'], dtype='datetime64[ns]')
stations
flights.dep_time.head()
0   2014-01-01 09:14:00
1   2014-01-01 11:32:00
2   2014-01-01 11:57:00
3   2014-01-01 13:07:00
4   2014-01-01 17:53:00
Name: dep_time, dtype: datetime64[ns]