from __future__ import division import datetime import os import cStringIO # *much* faster than StringIO import urllib from django.contrib.contenttypes.models import ContentType from django.template.defaultfilters import slugify from django.core.exceptions import ObjectDoesNotExist from django.utils.encoding import force_unicode,smart_unicode from django.conf import settings # Required PIL classes may or may not be available from the root namespace # depending on the installation try: import Image import ImageFile import ImageFilter import ImageEnhance except ImportError: try: from PIL import Image from PIL import ImageFile from PIL import ImageFilter from PIL import ImageEnhance except ImportError: raise ImportError("Could not import the Python Imaging Library.") from strutils import safestr from photos.models import Photo,PhotoGallery # Flickr Sync stuffs API_KEY = settings.FLICKR_API_KEY from APIClients import FlickrClient EXIF_PARAMS = {"Aperture":'f/2.8',"Make":'Apple',"Model":'iPhone',"Exposure":'',"ISO Speed":'',"Focal Length":'',"Shutter Speed":'','Date and Time (Original)':'2008:07:03 22:44:25'} def sync_flickr_photos(*args, **kwargs): cur_page = 1 # Start on the first page of the stream paginate_by = 100 # Get 100 photos at a time dupe = False # Set our dupe flag for the following loop BASE_PATH = 'http://flickr.com/services/rest/' client = FlickrClient(BASE_PATH, API_KEY) data = client.flickr_people_getPublicPhotos(user_id=settings.FLICKR_USER_ID, page=cur_page, per_page=paginate_by,extras="date_upload,date_taken,geo") #photos.photos.photo.reverse() for post in data.findall('photos/photo'): info = dict((k, smart_unicode(post.get(k))) for k in post.keys()) try: row = Photo.objects.get(flickr_id=info['id'], flickr_secret=info['secret']) # If the row exists already, set the dupe flag dupe = True print 'already have '+info['id']+' moving on' except ObjectDoesNotExist: taglist = [] place = place_handler(force_unicode(info['latitude'])+","+force_unicode(info['longitude'])) details = client.flickr_photos_getInfo(user_id=settings.FLICKR_USER_ID, photo_id=force_unicode(info['id'])) for t in details.findall('photo/tags/tag'): tag = dict((k, smart_unicode(t.get(k))) for k in t.keys()) taglist.append(tag['raw']) exif = exif_handler(client.flickr_photos_getExif(user_id=API_KEY, photo_id=safestr(info['id']))) photo = Photo.objects.create( title = info['title'], flickr_id = info['id'], flickr_owner = info['owner'], flickr_server = info['server'], flickr_secret = info['secret'], flickr_originalsecret = force_unicode(details[0].attrib['originalsecret']), flickr_farm = info['farm'], pub_date = flickr_datetime_to_datetime(info['datetaken']), description = force_unicode(details[0].findtext('description')), exif_aperture = exif['Aperture'], exif_make = exif['Make'], exif_model = exif['Model'], exif_shutter = exif['Exposure'], exif_iso = exif['ISO Speed'], exif_lens = exif['Focal Length'], exif_date = flickr_datetime_to_datetime(exif["Date and Time (Original)"].replace(':', '-', 2)), gps = force_unicode(info['latitude'])+","+force_unicode(info['longitude']), place = place, tags = ", ".join(t for t in taglist) ) photo.save() make_local_size(photo) def exif_handler(data): converted = {} try: for t in data.findall('photo/exif'): e = dict((k, smart_unicode(t.get(k))) for k in t.keys()) if safestr(e['label']) == "Aperture": if not converted.has_key("Aperture"): converted["Aperture"] = safestr(t.findtext('clean')) else: converted[safestr(e['label'])] = safestr(t.findtext('raw')) except: pass for k,v in EXIF_PARAMS.items(): if not converted.has_key(k): converted[k] = v return converted def flickr_datetime_to_datetime(fdt): from datetime import datetime from time import strptime date_parts = strptime(fdt, '%Y-%m-%d %H:%M:%S') return datetime(*date_parts[0:6]) def place_handler(gps): place = Place.objects.all() count = Place.objects.count() num = 1 for p in place: if p.within_bounds(gps) is True: return p elif p.within_bounds(gps) is False and count == num: return Place.objects.filter(name='Default').get() num += 1 ImageFile.MAXBLOCK = 1000000 def make_local_size(photo,set): crop_dir = settings.IMAGES_ROOT + '/gallery_thumbs/' if not os.path.isdir(crop_dir): os.makedirs(crop_dir) remote = photo.get_original_url() print remote fname = urllib.urlopen(remote) im = cStringIO.StringIO(fname.read()) # constructs a StringIO holding the image img = Image.open(im) #calculate crop: cur_width, cur_height = img.size new_width, new_height = 210, 210 ratio = max(float(new_width)/cur_width,float(new_height)/cur_height) x = (cur_width * ratio) y = (cur_height * ratio) xd = abs(new_width - x) yd = abs(new_height - y) x_diff = int(xd / 2) y_diff = int(yd / 2) box = (int(x_diff), int(y_diff), int(x_diff+new_width), int(y_diff+new_height)) #create resized file resized = img.resize((int(x), int(y)), Image.ANTIALIAS).crop(box) # save resized file resized_filename = '%s/%s.jpg' %(crop_dir, set.id) try: if img.format == 'JPEG': resized.save(resized_filename, 'JPEG', quality=95, optimize=True) else: resized.save(resized_filename) except IOError, e: if os.path.isfile(resized_filename): os.unlink(resized_filename) raise e #os.unlink(img) def sync_sets(*args, **kwargs): dupe = False # Set our dupe flag for the following loop BASE_PATH = 'http://flickr.com/services/rest/' client = FlickrClient(BASE_PATH, API_KEY) data = client.flickr_photosets_getList(user_id=settings.FLICKR_USER_ID) nodes = data.findall('photosets/photoset') for post in nodes: info = dict((k, smart_unicode(post.get(k))) for k in post.keys()) try: row = PhotoGallery.objects.get(set_id__exact=info['id']) # okay it already exists, but is it up-to-date? #get_photos_in_set(row,set.id) except ObjectDoesNotExist: s = PhotoGallery.objects.create ( set_id = force_unicode(info['id']), set_title = force_unicode(post.findtext('title')), set_desc = force_unicode(post.findtext('description')), set_slug = slugify(force_unicode(post.findtext('title'))), primary = force_unicode(info['primary']), ) get_photos_in_set(s) def get_photos_in_set(set): BASE_PATH = 'http://flickr.com/services/rest/' client = FlickrClient(BASE_PATH, API_KEY) data = client.flickr_photosets_getPhotos(user_id=settings.FLICKR_USER_ID, photoset_id=str(set.set_id)) for post in data.findall('photoset/photo'): info = dict((k, smart_unicode(post.get(k))) for k in post.keys()) photo = Photo.objects.get(flickr_id__exact=str(info['id'])) set.photos.add(photo) """ def sync_flickr_comments(*args, **kwargs): cur_page = 1 # Start on the first page of the stream paginate_by = 100 # Get 100 photos at a time inc = 1 # Set our dupe flag for the following loop # Get our flickr client running client = FlickrClient(settings.FLICKR_API_KEY) # get total number of photos in stream total = client.flickr_people_getInfo(user_id=settings.FLICKR_USER_ID) total_num = safestr(total.person.photos.count) while (inc < int(total_num)): photos = client.flickr_people_getPublicPhotos(user_id=settings.FLICKR_USER_ID, page=cur_page, per_page=paginate_by,extras="date_upload,date_taken,geo") incr = 1 for photo in photos.photos: do_photo_comments(photo("id"),photo("secret")) inc = inc+1 incr = incr+1 print cur_page if incr == 100: cur_page = cur_page+1 def do_photo_comments(id, secret): # Get our flickr client running client = FlickrClient(settings.FLICKR_API_KEY) photo = Photo.objects.get(flickr_id=id, flickr_secret=secret) comments = client.flickr_photos_comments_getList(user_id=settings.FLICKR_USER_ID, photo_id=id) for item in comments.comments: try: item print item('authorname') dt = datetime.datetime.fromtimestamp(float(item('datecreate'))) ctype = ContentType.objects.get_for_model(Photo) try: f = FreeComment.objects.get(content_type=ctype, object_id=photo.id, submit_date=dt) #print f.id except ObjectDoesNotExist: if safestr(item('authorname')) == 'luxagraf': mail = 'hyper@luxagraf.net' else: mail = slugify(item('authorname'))+'@flickr.com' c = FreeComment.objects.create ( content_type = ctype, object_id = photo.id, comment = safestr(item), person_name = safestr(item('authorname')), person_email = mail, person_url = item('permalink'), is_public = True, site_id = 1, approved = 0, submit_date = dt ) except AttributeError: pass def get_country_name(lat,long): name = GeoClient.findCountrySubdivision(lat,long, http_proxy=None) if name.countrySubdivision.countryCode.PCDATA == "US": r_name = "US-%s" %(force_unicode(STATE_LIST[name.countrySubdivision.adminCode1.PCDATA])) else: r_name = force_unicode(name.countrySubdivision.countryName.PCDATA) return r_name def create_size(self, photosize): if self.size_exists(photosize): return if not os.path.isdir(self.cache_path()): os.makedirs(self.cache_path()) try: im = Image.open(self.get_image_filename()) except IOError: return if im.size == photosize.size(): shutil.copy(self.get_image_filename(), self._get_SIZE_path(photosize)) return cur_width, cur_height = im.size new_width, new_height = photosize.size() if photosize.crop: ratio = max(float(new_width)/cur_width,float(new_height)/cur_height) x = (cur_width * ratio) y = (cur_height * ratio) xd = abs(new_width - x) yd = abs(new_height - y) x_diff = int(xd / 2) y_diff = int(yd / 2) if self.crop_from == 'top': box = (int(x_diff), 0, int(x_diff+new_width), new_height) elif self.crop_from == 'left': box = (0, int(y_diff), new_width, int(y_diff+new_height)) elif self.crop_from == 'bottom': box = (int(x_diff), int(yd), int(x_diff+new_width), int(y)) # y - yd = new_height elif self.crop_from == 'right': box = (int(xd), int(y_diff), int(x), int(y_diff+new_height)) # x - xd = new_width else: box = (int(x_diff), int(y_diff), int(x_diff+new_width), int(y_diff+new_height)) resized = im.resize((int(x), int(y)), Image.ANTIALIAS).crop(box) else: if not new_width == 0 and not new_height == 0: if cur_width > cur_height: ratio = float(new_width)/cur_width else: ratio = float(new_height)/cur_height else: if new_width == 0: ratio = float(new_height)/cur_height else: ratio = float(new_width)/cur_width resized = im.resize((int(cur_width*ratio), int(cur_height*ratio)), Image.ANTIALIAS) # Apply effect if found if self.effect is not None: resized = self.effect.process(resized) elif photosize.effect is not None: resized = photosize.effect.process(resized) # save resized file resized_filename = getattr(self, "get_%s_path" % photosize.name)() try: if im.format == 'JPEG': resized.save(resized_filename, 'JPEG', quality=int(photosize.quality), optimize=True) else: resized.save(resized_filename) except IOError, e: if os.path.isfile(resized_filename): os.unlink(resized_filename) raise e """ """ for p in photos.photos.photo: try: row = Photo.objects.get(flickr_id=p.id, flickr_secret=p.secret) # If the row exists already, set the dupe flag dupe = True #print 'already have '+p.id+' moving on' except ObjectDoesNotExist: #assign the photo to a place, uses "default" if the photo isn't near anything place = place_handler(force_unicode(p.latitude)+","+force_unicode(p.longitude)) #Grab tags tags = client.flickr_photos_getInfo(user_id=settings.FLICKR_USER_ID, photo_id=force_unicode(p.id)) # grab exif data exif = exif_handler(client.flickr_photos_getExif(user_id=API_KEY, photo_id=safestr(p.id))) # sort the exif data if it's available if exif.has_key("Aperture"): aperture = exif["Aperture"] else: aperture = '' if exif.has_key("Make"): make = exif["Make"] else: make = '' if exif.has_key("Model"): model = exif["Model"] else: model = '' if exif.has_key("Exposure"): exposure = exif["Exposure"] else: exposure = '' if exif.has_key("ISO Speed"): iso = exif["ISO Speed"] else: iso = '' if exif.has_key("Focal Length"): length = exif["Focal Length"] else: length = '' if exif.has_key("Date and Time (Original)"): dto = flickr_datetime_to_datetime(exif["Date and Time (Original)"].replace(':', '-', 2)) else: dto = flickr_datetime_to_datetime(p.datetaken) photo = Photo.objects.create( title = p.title, flickr_id = p.id, flickr_owner = p.owner, flickr_server = p.server, flickr_secret = p.secret, flickr_originalsecret = tags.photo.originalsecret, flickr_farm = p.farm, pub_date = flickr_datetime_to_datetime(p.datetaken), description = force_unicode(tags.photo.description.PCDATA), exif_aperture = aperture, exif_make = make, exif_model = model, exif_shutter = exposure, exif_iso = iso, exif_lens = length, exif_date = dto, gps = force_unicode(p.latitude)+","+force_unicode(p.longitude), place = place, tags = str(", ".join(t.raw for t in tags.photo.tags.tag)).lower() ) make_local_size(photo) #if (dupe): #break """ from locations.models import Location from photos.models import Photo from django.contrib.gis.geos import Point def find_loc(photos): for photo in photos: p = Point(photo.lon, photo.lat, srid=4326) try: loc = Location.objects.filter(geometry__contains=p).get() photo.location = loc print photo.id photo.save() except Location.DoesNotExist: print "photo %s does not fall within any exisiting location" %(photo.id)