I've created a public site containing topics related to mobile wireless test automation. I hope you find it useful, your feedback and comments are welcome.
Julian
Tuesday, 14 July 2009
Monday, 3 December 2007
""" Markup Example 12
Adding a user-agent string to emulate a Nokia 6230
author: Julian Harty
edited: 03 December 2007
"""
import urllib
import re
import amara
def getHrefFromXML(doc, search_regex):
"""Returns the href link if the in search_regex is
found in any
Adding a user-agent string to emulate a Nokia 6230
author: Julian Harty
edited: 03 December 2007
"""
import urllib
import re
import amara
def getHrefFromXML(doc, search_regex):
"""Returns the href link if the in search_regex is
found in any
tags.
Assumes the links are in the html body's div tags.
Args:
doc: an amara xml object
search_regex: the regular expression to match in
the href text
Returns:
the href as a string if the pattern is found, else None.
"""
ru1 = re.compile(search_regex)
for item in doc.html.body.div:
try:
# print str(item.a.xml_children[0])
# print type(item.a.xml_children[0])
p = ru1.search(item.a.xml_children[0])
if p:
return item.a.href
except:
pass
return None
if __name__ == "__main__":
request = urllib.FancyURLopener()
request.addheader('Accept',
'application/xhtml+xml')
request.addheader('User-Agent',
'Nokia6230/2.0+(04.43)'
'+Profile/MIDP-2.0+Configuration'
'/CLDC-1.1+UP.Link/6.3.0.0.0')
response = request.open("http://www.google.co.uk/m")
content = response.read()
# Use the live content
doc = amara.parse(content)
print "should return: '/gmm?source=m&dc=mobile-promotion'"
print getHrefFromXML(doc, "Maps")
Assumes the links are in the html body's div tags.
Args:
doc: an amara xml object
search_regex: the regular expression to match in
the href text
Returns:
the href as a string if the pattern is found, else None.
"""
ru1 = re.compile(search_regex)
for item in doc.html.body.div:
try:
# print str(item.a.xml_children[0])
# print type(item.a.xml_children[0])
p = ru1.search(item.a.xml_children[0])
if p:
return item.a.href
except:
pass
return None
if __name__ == "__main__":
request = urllib.FancyURLopener()
request.addheader('Accept',
'application/xhtml+xml')
request.addheader('User-Agent',
'Nokia6230/2.0+(04.43)'
'+Profile/MIDP-2.0+Configuration'
'/CLDC-1.1+UP.Link/6.3.0.0.0')
response = request.open("http://www.google.co.uk/m")
content = response.read()
# Use the live content
doc = amara.parse(content)
print "should return: '/gmm?source=m&dc=mobile-promotion'"
print getHrefFromXML(doc, "Maps")
Markup exercise 12 using cached file
""" Markup Example 12 local
Adding a user-agent string to emulate a Nokia 6230
Reads the content from a cached file
author: Julian Harty
edited: 03 December 2007
"""
import re
import amara
def getHrefFromXML(doc, search_regex):
"""Returns the href link if the in search_regex is
found in any div tags.
Assumes the links are in the html body's div tags.
Args:
doc: an amara xml object
search_regex: the regular expression to match in
the href text
Returns:
the href as a string if the pattern is found, else None.
"""
ru1 = re.compile(search_regex)
for item in doc.html.body.div:
try:
# print str(item.a.xml_children[0])
# print type(item.a.xml_children[0])
p = ru1.search(item.a.xml_children[0])
if p:
return item.a.href
except:
pass
return None
if __name__ == "__main__":
# use a local copy of the content
doc = amara.parse(open("mobile-homepage.xhtml"))
print "should return: '/gmm?source=m&dc=mobile-promotion'"
print getHrefFromXML(doc, "Maps")
Adding a user-agent string to emulate a Nokia 6230
Reads the content from a cached file
author: Julian Harty
edited: 03 December 2007
"""
import re
import amara
def getHrefFromXML(doc, search_regex):
"""Returns the href link if the in search_regex is
found in any div tags.
Assumes the links are in the html body's div tags.
Args:
doc: an amara xml object
search_regex: the regular expression to match in
the href text
Returns:
the href as a string if the pattern is found, else None.
"""
ru1 = re.compile(search_regex)
for item in doc.html.body.div:
try:
# print str(item.a.xml_children[0])
# print type(item.a.xml_children[0])
p = ru1.search(item.a.xml_children[0])
if p:
return item.a.href
except:
pass
return None
if __name__ == "__main__":
# use a local copy of the content
doc = amara.parse(open("mobile-homepage.xhtml"))
print "should return: '/gmm?source=m&dc=mobile-promotion'"
print getHrefFromXML(doc, "Maps")
Markup exercise 11b prettified
""" Markup Example 11
Defines the method getlLinkFromXhtml() which is later used to return the href
for a given regular expression, if it exists.
(Includes the code form markup example 07)
author: Julian Harty
edited: 03 December 2007
"""
import re
import sys
import urllib
import BeautifulSoup
from xml.dom import minidom
def getLinkFromXhtml(content, text_regex):
"""getLink returns the href link for a given text_label.
Args:
content: the source content e.g. an xHTML response.
text_regex: the text to match as a regluar experession.
Returns:
The href if the test is found, else None.
"""
doc = minidom.parseString(content)
links = doc.getElementsByTagName('a')
rx = re.compile(text_regex)
for i in links:
if i.hasAttribute('href'):
t = i.firstChild
text = ""
while t:
if t.nodeType == t.TEXT_NODE:
text += t.data
t = t.nextSibling
match = rx.search(text)
if match:
return str(i.toxml())
return None
request = urllib.FancyURLopener()
request.addheader('Accept', 'application/xhtml+xml')
request.addheader('User-Agent',
'Nokia6230/2.0+(04.43)+Profile/MIDP-2.0+'
'Configuration/CLDC-1.1+UP.Link/6.3.0.0.0')
response = request.open("http://www.google.co.uk/m")
content = response.read()
print getLinkFromXhtml(content, 'Maps with My Location')
soup = BeautifulSoup.BeautifulSoup(content)
#print soup.prettify()
# write the content to a file so it can be displayed in a browser
f = open("markup_ex11b_prettified.xml", "wb")
f.write(content)
f.close()
Defines the method getlLinkFromXhtml() which is later used to return the href
for a given regular expression, if it exists.
(Includes the code form markup example 07)
author: Julian Harty
edited: 03 December 2007
"""
import re
import sys
import urllib
import BeautifulSoup
from xml.dom import minidom
def getLinkFromXhtml(content, text_regex):
"""getLink returns the href link for a given text_label.
Args:
content: the source content e.g. an xHTML response.
text_regex: the text to match as a regluar experession.
Returns:
The href if the test is found, else None.
"""
doc = minidom.parseString(content)
links = doc.getElementsByTagName('a')
rx = re.compile(text_regex)
for i in links:
if i.hasAttribute('href'):
t = i.firstChild
text = ""
while t:
if t.nodeType == t.TEXT_NODE:
text += t.data
t = t.nextSibling
match = rx.search(text)
if match:
return str(i.toxml())
return None
request = urllib.FancyURLopener()
request.addheader('Accept', 'application/xhtml+xml')
request.addheader('User-Agent',
'Nokia6230/2.0+(04.43)+Profile/MIDP-2.0+'
'Configuration/CLDC-1.1+UP.Link/6.3.0.0.0')
response = request.open("http://www.google.co.uk/m")
content = response.read()
print getLinkFromXhtml(content, 'Maps with My Location')
soup = BeautifulSoup.BeautifulSoup(content)
#print soup.prettify()
# write the content to a file so it can be displayed in a browser
f = open("markup_ex11b_prettified.xml", "wb")
f.write(content)
f.close()
Sample python code Exercise 04 prettified
""" Markup Example 04
Adding the accept header for xHTML
author: Julian Harty
edited: 03 December 2007
"""
import urllib
import BeautifulSoup
request = urllib.FancyURLopener()
request.addheader('Accept', 'application/xhtml+xml')
response = request.open("http://www.google.co.uk/m")
content = response.read()
# Display the formatted contents
soup = BeautifulSoup.BeautifulSoup(content)
print soup.prettify()
# write the content to a file so it can be displayed in a browser
f = open("markup_ex04_prettified.xml", "wb")
f.write(content)
f.close()
Mobile Wireless Test Automation at EuroSTAR2007
I will be presenting on this topic at the EuroSTAR 2007 conference as a 1/2 day tutorial on Tuesday 4th December 2007. I expect to learn as much as I present from the 70+ participants who have booked to join me. Thank you all :)
Subscribe to:
Posts (Atom)