Skip to content

Commit

Permalink
Merge pull request #46 from pennlabs/laundry-refactor
Browse files Browse the repository at this point in the history
Laundry refactor looks good to me. Begins to fix #4.
  • Loading branch information
afischer committed Nov 13, 2015
2 parents b2fc26a + 3786ceb commit 3e8534b
Show file tree
Hide file tree
Showing 2 changed files with 86 additions and 59 deletions.
120 changes: 67 additions & 53 deletions penn/laundry.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,44 +18,51 @@ class Laundry(object):
def __init__(self):
pass

@staticmethod
def get_hall_no(href):
return int(re.search(r"Halls=(\d+)", href).group(1))

def all_status(self):
"""Return names, hall numbers, and the washers/dryers available for all
rooms in the system
>>> all_laundry = l.all_status()
"""
r = requests.get(ALL_URL)
if r.status_code == 200:
parsed = BeautifulSoup(r.text)
info_table = parsed.find_all('table')[2]

hall_dict = dict()

# This bit of code generates a dict of hallname->hall number by
# parsing the link href of each room
for link in info_table.find_all('a', class_='buttlink'):
def get_hall_no(href):
return int(re.search(r"Halls=(\d+)", href).group(1))
hall_dict[link.get_text().strip()] = get_hall_no(link.get('href'))

# Parse the table into the relevant data
data = [list(filter(lambda x: len(x) > 0, [val.get_text().strip() for val in row.find_all('td')])) for row in info_table.find_all('tr')]

# Remove the header row and all empty rows
data_improved = list(filter(lambda x: len(list(x)) > 0, data))[1:]

# Construct the final JSON
laundry_rooms = []
for row in data_improved[1:]:
room_dict = dict()
room_dict['washers_available'] = int(row[1])
room_dict['dryers_available'] = int(row[2])
room_dict['washers_in_use'] = int(row[3])
room_dict['dryers_in_use'] = int(row[4])
room_dict['hall_no'] = hall_dict[row[0]]
room_dict['name'] = row[0]
laundry_rooms.append(room_dict)
return laundry_rooms
r.raise_for_status()

parsed = BeautifulSoup(r.text)
info_table = parsed.find_all('table')[2]

# This bit of code generates a dict of hallname->hall number by
# parsing the link href of each room
hall_dict = {}
for link in info_table.find_all('a', class_='buttlink'):
clean_link = link.get_text().strip()
hall_dict[clean_link] = self.get_hall_no(link.get('href'))

# Parse the table into the relevant data
data = []
for row in info_table.find_all('tr'):
row_data = (val.get_text().strip() for val in row.find_all('td'))
clean_row = [val for val in row_data if len(val) > 0]
data.append(clean_row)

# Remove the header row, service row, and all empty rows
data_improved = [row for row in data if len(row) > 0][2:]

# Construct the final JSON
laundry_rooms = []
for row in data_improved:
room_dict = dict()
room_dict['washers_available'] = int(row[1])
room_dict['dryers_available'] = int(row[2])
room_dict['washers_in_use'] = int(row[3])
room_dict['dryers_in_use'] = int(row[4])
room_dict['hall_no'] = hall_dict[row[0]]
room_dict['name'] = row[0]
laundry_rooms.append(room_dict)
return laundry_rooms

def hall_status(self, hall_no):
"""Return the status of each specific washer/dryer in a particular
Expand All @@ -73,25 +80,32 @@ def hall_status(self, hall_no):
raise ValueError("Room Number must be integer")

r = requests.get(HALL_BASE_URL + str(num))
if r.status_code == 200:
parsed = BeautifulSoup(r.text, 'html5lib')
tables = parsed.find_all('table')
hall_name = tables[2].get_text().strip()
info_table = tables[4]
data = [list(filter(lambda x: len(x) > 0, [val.get_text().strip() for val in row.find_all('td')])) for row in info_table.find_all('tr')]
data_improved = list(filter(lambda x: len(x) > 0, data))[1:]

def toDict(data_row):
d = dict()
d['number'] = data_row[0]
d['machine_type'] = data_row[1]
d['available'] = data_row[2] == u'Available'
if len(data_row) == 4:
d['time_left'] = data_row[3]
else:
d['time_left'] = None
return d

return {'machines': list(map(toDict, data_improved)), 'hall_name': hall_name}
else:
return {'error': 'The laundry api is currently unavailable.'}
r.raise_for_status()

parsed = BeautifulSoup(r.text, 'html5lib')
tables = parsed.find_all('table')
hall_name = tables[2].get_text().strip()
info_table = tables[4]

# Parse the table into the relevant data
data = []
for row in info_table.find_all('tr'):
row_data = (val.get_text().strip() for val in row.find_all('td'))
clean_row = [val for val in row_data if len(val) > 0]
data.append(clean_row)

# Remove the header row and all empty rows
data_improved = [row for row in data if len(row) > 0][1:]

def toDict(data_row):
d = dict()
d['number'] = data_row[0]
d['machine_type'] = data_row[1]
d['available'] = data_row[2] == u'Available'
if len(data_row) == 4:
d['time_left'] = data_row[3]
else:
d['time_left'] = None
return d

return {'machines': list(map(toDict, data_improved)), 'hall_name': hall_name}
25 changes: 19 additions & 6 deletions tests/laundry_test.py
Original file line number Diff line number Diff line change
@@ -1,18 +1,31 @@
import unittest
from nose.tools import ok_, eq_
from penn import Laundry


class TestLaundry(unittest.TestCase):
class TestLaundry():

def setUp(self):
self.laundry = Laundry()

def test_all(self):
data = self.laundry.all_status()
self.assertEquals('Class of 1925 House', data[0]['name'])
self.assertEquals(55, len(data))
eq_(55, len(data))
eq_('Class of 1925 House', data[0]['name'])
# Check all halls have appropriate data points
for i, hall in enumerate(data):
eq_(hall['hall_no'], i)
ok_(hall['dryers_available'] >= 0)
ok_(hall['dryers_in_use'] >= 0)
ok_(hall['washers_available'] >= 0)
ok_(hall['washers_in_use'] >= 0)

def test_single_hall(self):
for i in range(5):
for i in range(1):
data = self.laundry.hall_status(i)
self.assertEquals(data['machines'][0]['number'], '1')
machines = data['machines']
# Check all machines have appropriate data points
for i, machine in enumerate(machines):
eq_(machine['number'], str(i + 1))
ok_('available' in machine)
ok_('machine_type' in machine)
ok_('time_left' in machine)

0 comments on commit 3e8534b

Please sign in to comment.