11 KiB
11 KiB
Geographical pronunciation statistics¶
In [ ]:
import pandas import MySQLdb import numpy import json db = MySQLdb.connect(user='root', passwd='Nmmxhjgt1@', db='stimmen', charset='utf8') %matplotlib notebook from matplotlib import pyplot import folium from IPython.display import display from shapely.geometry import Polygon, MultiPolygon, shape, Point from jupyter_progressbar import ProgressBar from collections import defaultdict from ipy_table import make_table from html import escape import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import LogNorm from sklearn import mixture from skimage.measure import find_contours from collections import Counter from random import shuffle
In [ ]:
# Borders of Frysian municipalities with open('Friesland_AL8.GeoJson') as f: gemeentes = json.load(f)
In [ ]:
shapes = [shape(feature['geometry']) for feature in gemeentes['features']] gemeente_names = [feature['properties']['name'] for feature in gemeentes['features']] def get_gemeente(point): for i, shape in enumerate(shapes): if shape.contains(point): return i return -1
In [ ]:
# Answers to how participants state a word should be pronounces. answers = pandas.read_sql(''' SELECT prediction_quiz_id, user_lat, user_lng, question_text, answer_text FROM core_surveyresult as survey INNER JOIN core_predictionquizresult as result ON survey.id = result.survey_result_id INNER JOIN core_predictionquizresultquestionanswer as answer ON result.id = answer.prediction_quiz_id ''', db)
In [ ]:
# Takes approximately 2 minutes gemeente_map = { (lng, lat): get_gemeente(Point(lng, lat)) for lng, lat in set(zip(answers['user_lng'], answers['user_lat'])) } answers['gemeente'] = [ gemeente_map[(lng, lat)] for lat, lng in zip(answers['user_lat'], answers['user_lng']) ]
Mapping pronunciations¶
The idea is to plot each pronunciation as a point of a different color, now only seems to show participation density.
Slow, so started with the first question.
In [ ]:
# cmap = pyplot.get_cmap('gist_rainbow') # std = (1.89, 1.35) # for _, (question, rows) in zip(range(3), answers.groupby('question_text')): # plt.figure() # n_answers = len(rows.groupby('answer_text').count()) # colors = cmap(range(256))[::256 // n_answers] # for (answer, rows_), color in zip(rows.groupby('answer_text'), colors): # if len(rows_) < 100: # continue # color = '#%02x%02x%02x' % tuple(int(c*255) for c in color[:3]) # X = rows_[['user_lat', 'user_lng']].as_matrix() # clf = mixture.GaussianMixture(n_components=5, covariance_type='full') # clf.fit(X) # xlim = numpy.percentile(X[:, 0], [1, 99.5]) # ylim = numpy.percentile(X[:, 1], [1, 99.5]) # xlim = [2*xlim[0] - xlim[1], 2*xlim[1] - xlim[0]] # ylim = [2*ylim[0] - ylim[1], 2*ylim[1] - ylim[0]] # x = np.linspace(*xlim, 1000) # y = np.linspace(*ylim, 1000) # xx, yy = np.meshgrid(x, y) # xxyy = np.array([xx.ravel(), yy.ravel()]).T # z = np.exp(clf.score_samples(xxyy)) # z = z.reshape(xx.shape) # z_sorted = sorted(z.ravel(), reverse=True) # z_sorted_cumsum = np.cumsum(z_sorted) # split = np.where(z_sorted_cumsum > (z_sorted_cumsum[-1] * 0.5))[0][0] # threshold = z_sorted[split] # threshold # # p = list(range(0, 100, 5)) # p = [80] # plt.contour(xx, yy, z, levels=[threshold], colors=[color]) # plt.plot(X[:, 0], X[:, 1], '.', c=color) # plt.xlim(*xlim) # plt.ylim(*ylim)
In [ ]:
zero_latlng_questions = { q for q, row in answers.groupby('question_text').agg('std').iterrows() if row['user_lat'] == 0 and row['user_lng'] == 0 } answers_filtered = answers[answers['question_text'].map(lambda x: x not in zero_latlng_questions)]
In [ ]:
answers_filtered['question_text_url'] = answers_filtered['question_text'].map( lambda x: x.replace('"', '').replace('*', ''))
In [ ]:
def get_palette(n, no_black=True, no_white=True): with open('glasbey/{}_colors.txt'.format(n + no_black + no_white)) as f: return [ '#%02x%02x%02x' % tuple(int(c) for c in line.replace('\n', '').split(',')) for line in f if not no_black or line != '0,0,0\n' if not no_white or line != '255,255,255\n' ]
In [ ]:
options = [x[1] for x in sorted([ (row['user_lng'], answer_text) for answer_text, row in rows.groupby('answer_text').agg({'user_lng': 'count'}).iterrows() ], reverse=True)] groups = [options[:len(options) // 2], options[len(options) // 2:]] groups
In [ ]:
80000 / 350
In [ ]:
import glob with open('index.html', 'w') as f: f.write('<html><head></head><body>' + '<br/>\n'.join( '\t<a href="http://herbertkruitbosch.com/pronunciation_maps/{}">{}<a>'.format(fn, fn[:-4].replace('_', ' ')) for fn in sorted( glob.glob('*_all.html') + glob.glob('*_larger.html') + glob.glob('*_smaller.html') ) ) + "</body></html>")
In [ ]:
# cmap = pyplot.get_cmap('gist_rainbow') # colors = pyplot.get_cmap('tab20') # colors = ['#e6194b', '#3cb44b', '#ffe119', '#0082c8', '#f58231', '#911eb4', '#46f0f0', '#f032e6', '#d2f53c', '#fabebe', '#008080', '#e6beff', '#aa6e28', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000080', '#808080'] std = (1.89, 1.35) for question, rows in answers_filtered.groupby('question_text_url'): # question = rows['question_text_url'][0] n_answers = len(rows.groupby('answer_text').count()) options = [x[1] for x in sorted([ (row['user_lng'], answer_text) for answer_text, row in rows.groupby('answer_text').agg({'user_lng': 'count'}).iterrows() ], reverse=True)] groups = [options] if n_answers > 6: groups.extend([options[:6], options[6:]]) for group, group_name in zip(groups, ['all', 'larger', 'smaller']): m = folium.Map((rows['user_lat'].median(), rows['user_lng'].median()), tiles='stamentoner', zoom_start=9) # colors = cmap(range(256))[::256 // n_answers] colors = get_palette(len(group)) for answer, color in zip(group, colors): rows_ = rows[rows['answer_text'] == answer] # color = '#%02x%02x%02x' % tuple(int(c*255) for c in color[:3]) name = '<span style=\\"color:{}; \\">{} ({})'.format(color, escape(answer), len(rows_)) group = folium.FeatureGroup(name=name) colormap[name] = color for point in zip(rows_['user_lat'], rows_['user_lng']): point = tuple(p + 0.01 * s * numpy.random.randn() for p, s in zip(point, std)) folium.Circle( point, color=None, fill_color=color, radius=400*min(1, 100 / len(rows_)), fill_opacity=1 #1 - 0.5 * len(rows_) / len(rows) ).add_to(group) group.add_to(m) folium.map.LayerControl('topright', collapsed=False).add_to(m) print(group_name, question) if group_name == 'larger': display(m) m.save('{}_{}.html'.format(question, group_name))