INSTRUCTION
stringlengths 1
8.43k
| RESPONSE
stringlengths 75
104k
|
|---|---|
gate ch a b { h b ; sdg b ; cx a b ; h b ; t b ; cx a b ; t b ; h b ; s b ; x b ; s a ; }
|
def _define(self):
"""
gate ch a,b {
h b;
sdg b;
cx a,b;
h b;
t b;
cx a,b;
t b;
h b;
s b;
x b;
s a;}
"""
definition = []
q = QuantumRegister(2, "q")
rule = [
(HGate(), [q[1]], []),
(SdgGate(), [q[1]], []),
(CnotGate(), [q[0], q[1]], []),
(HGate(), [q[1]], []),
(TGate(), [q[1]], []),
(CnotGate(), [q[0], q[1]], []),
(TGate(), [q[1]], []),
(HGate(), [q[1]], []),
(SGate(), [q[1]], []),
(XGate(), [q[1]], []),
(SGate(), [q[0]], [])
]
for inst in rule:
definition.append(inst)
self.definition = definition
|
The backend configuration widget.
|
def config_tab(backend):
"""The backend configuration widget.
Args:
backend (IBMQbackend): The backend.
Returns:
grid: A GridBox widget.
"""
status = backend.status().to_dict()
config = backend.configuration().to_dict()
config_dict = {**status, **config}
upper_list = ['n_qubits', 'operational',
'status_msg', 'pending_jobs',
'basis_gates', 'local', 'simulator']
lower_list = list(set(config_dict.keys()).difference(upper_list))
# Remove gates because they are in a different tab
lower_list.remove('gates')
upper_str = "<table>"
upper_str += """<style>
table {
border-collapse: collapse;
width: auto;
}
th, td {
text-align: left;
padding: 8px;
}
tr:nth-child(even) {background-color: #f6f6f6;}
</style>"""
footer = "</table>"
# Upper HBox widget data
upper_str += "<tr><th>Property</th><th>Value</th></tr>"
for key in upper_list:
upper_str += "<tr><td><font style='font-weight:bold'>%s</font></td><td>%s</td></tr>" % (
key, config_dict[key])
upper_str += footer
upper_table = widgets.HTML(
value=upper_str, layout=widgets.Layout(width='100%', grid_area='left'))
image_widget = widgets.Output(
layout=widgets.Layout(display='flex-inline', grid_area='right',
padding='10px 10px 10px 10px',
width='auto', max_height='300px',
align_items='center'))
if not config['simulator']:
with image_widget:
gate_map = plot_gate_map(backend)
display(gate_map)
plt.close(gate_map)
lower_str = "<table>"
lower_str += """<style>
table {
border-collapse: collapse;
width: auto;
}
th, td {
text-align: left;
padding: 8px;
}
tr:nth-child(even) {background-color: #f6f6f6;}
</style>"""
lower_str += "<tr><th></th><th></th></tr>"
for key in lower_list:
if key != 'name':
lower_str += "<tr><td>%s</td><td>%s</td></tr>" % (
key, config_dict[key])
lower_str += footer
lower_table = widgets.HTML(value=lower_str,
layout=widgets.Layout(
width='auto',
grid_area='bottom'))
grid = widgets.GridBox(children=[upper_table, image_widget, lower_table],
layout=widgets.Layout(
grid_template_rows='auto auto',
grid_template_columns='25% 25% 25% 25%',
grid_template_areas='''
"left right right right"
"bottom bottom bottom bottom"
''',
grid_gap='0px 0px'))
return grid
|
The qubits properties widget
|
def qubits_tab(backend):
"""The qubits properties widget
Args:
backend (IBMQbackend): The backend.
Returns:
VBox: A VBox widget.
"""
props = backend.properties().to_dict()
header_html = "<div><font style='font-weight:bold'>{key}</font>: {value}</div>"
header_html = header_html.format(key='last_update_date',
value=props['last_update_date'])
update_date_widget = widgets.HTML(value=header_html)
qubit_html = "<table>"
qubit_html += """<style>
table {
border-collapse: collapse;
width: auto;
}
th, td {
text-align: left;
padding: 8px;
}
tr:nth-child(even) {background-color: #f6f6f6;}
</style>"""
qubit_html += "<tr><th></th><th>Frequency</th><th>T1</th><th>T2</th>"
qubit_html += "<th>U1 gate error</th><th>U2 gate error</th><th>U3 gate error</th>"
qubit_html += "<th>Readout error</th></tr>"
qubit_footer = "</table>"
for qub in range(len(props['qubits'])):
name = 'Q%s' % qub
qubit_data = props['qubits'][qub]
gate_data = props['gates'][3*qub:3*qub+3]
t1_info = qubit_data[0]
t2_info = qubit_data[1]
freq_info = qubit_data[2]
readout_info = qubit_data[3]
freq = str(round(freq_info['value'], 5))+' '+freq_info['unit']
T1 = str(round(t1_info['value'], # pylint: disable=invalid-name
5))+' ' + t1_info['unit']
T2 = str(round(t2_info['value'], # pylint: disable=invalid-name
5))+' ' + t2_info['unit']
# pylint: disable=invalid-name
U1 = str(round(gate_data[0]['parameters'][0]['value'], 5))
# pylint: disable=invalid-name
U2 = str(round(gate_data[1]['parameters'][0]['value'], 5))
# pylint: disable=invalid-name
U3 = str(round(gate_data[2]['parameters'][0]['value'], 5))
readout_error = round(readout_info['value'], 5)
qubit_html += "<tr><td><font style='font-weight:bold'>%s</font></td><td>%s</td>"
qubit_html += "<td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>"
qubit_html = qubit_html % (name, freq, T1, T2, U1, U2, U3, readout_error)
qubit_html += qubit_footer
qubit_widget = widgets.HTML(value=qubit_html)
out = widgets.VBox([update_date_widget,
qubit_widget])
return out
|
The multiple qubit gate error widget.
|
def gates_tab(backend):
"""The multiple qubit gate error widget.
Args:
backend (IBMQbackend): The backend.
Returns:
VBox: A VBox widget.
"""
config = backend.configuration().to_dict()
props = backend.properties().to_dict()
multi_qubit_gates = props['gates'][3*config['n_qubits']:]
header_html = "<div><font style='font-weight:bold'>{key}</font>: {value}</div>"
header_html = header_html.format(key='last_update_date',
value=props['last_update_date'])
update_date_widget = widgets.HTML(value=header_html,
layout=widgets.Layout(grid_area='top'))
gate_html = "<table>"
gate_html += """<style>
table {
border-collapse: collapse;
width: auto;
}
th, td {
text-align: left;
padding: 8px;
}
tr:nth-child(even) {background-color: #f6f6f6;};
</style>"""
gate_html += "<tr><th></th><th>Type</th><th>Gate error</th></tr>"
gate_footer = "</table>"
# Split gates into two columns
left_num = math.ceil(len(multi_qubit_gates)/3)
mid_num = math.ceil((len(multi_qubit_gates)-left_num)/2)
left_table = gate_html
for qub in range(left_num):
gate = multi_qubit_gates[qub]
name = gate['name']
ttype = gate['gate']
error = round(gate['parameters'][0]['value'], 5)
left_table += "<tr><td><font style='font-weight:bold'>%s</font>"
left_table += "</td><td>%s</td><td>%s</td></tr>"
left_table = left_table % (name, ttype, error)
left_table += gate_footer
middle_table = gate_html
for qub in range(left_num, left_num+mid_num):
gate = multi_qubit_gates[qub]
name = gate['name']
ttype = gate['gate']
error = round(gate['parameters'][0]['value'], 5)
middle_table += "<tr><td><font style='font-weight:bold'>%s</font>"
middle_table += "</td><td>%s</td><td>%s</td></tr>"
middle_table = middle_table % (name, ttype, error)
middle_table += gate_footer
right_table = gate_html
for qub in range(left_num+mid_num, len(multi_qubit_gates)):
gate = multi_qubit_gates[qub]
name = gate['name']
ttype = gate['gate']
error = round(gate['parameters'][0]['value'], 5)
right_table += "<tr><td><font style='font-weight:bold'>%s</font>"
right_table += "</td><td>%s</td><td>%s</td></tr>"
right_table = right_table % (name, ttype, error)
right_table += gate_footer
left_table_widget = widgets.HTML(value=left_table,
layout=widgets.Layout(grid_area='left'))
middle_table_widget = widgets.HTML(value=middle_table,
layout=widgets.Layout(grid_area='middle'))
right_table_widget = widgets.HTML(value=right_table,
layout=widgets.Layout(grid_area='right'))
grid = widgets.GridBox(children=[update_date_widget,
left_table_widget,
middle_table_widget,
right_table_widget],
layout=widgets.Layout(
grid_template_rows='auto auto',
grid_template_columns='33% 33% 33%',
grid_template_areas='''
"top top top"
"left middle right"
''',
grid_gap='0px 0px'))
return grid
|
Widget for displaying detailed noise map.
|
def detailed_map(backend):
"""Widget for displaying detailed noise map.
Args:
backend (IBMQbackend): The backend.
Returns:
GridBox: Widget holding noise map images.
"""
props = backend.properties().to_dict()
config = backend.configuration().to_dict()
single_gate_errors = [q['parameters'][0]['value']
for q in props['gates'][2:3*config['n_qubits']:3]]
single_norm = matplotlib.colors.Normalize(
vmin=min(single_gate_errors), vmax=max(single_gate_errors))
q_colors = [cm.viridis(single_norm(err)) for err in single_gate_errors]
cmap = config['coupling_map']
cx_errors = []
for line in cmap:
for item in props['gates'][3*config['n_qubits']:]:
if item['qubits'] == line:
cx_errors.append(item['parameters'][0]['value'])
break
else:
continue
cx_norm = matplotlib.colors.Normalize(
vmin=min(cx_errors), vmax=max(cx_errors))
line_colors = [cm.viridis(cx_norm(err)) for err in cx_errors]
single_widget = widgets.Output(layout=widgets.Layout(display='flex-inline', grid_area='left',
align_items='center'))
cmap_widget = widgets.Output(layout=widgets.Layout(display='flex-inline', grid_area='top',
width='auto', height='auto',
align_items='center'))
cx_widget = widgets.Output(layout=widgets.Layout(display='flex-inline', grid_area='right',
align_items='center'))
tick_locator = mpl.ticker.MaxNLocator(nbins=5)
with cmap_widget:
noise_map = plot_gate_map(backend, qubit_color=q_colors,
line_color=line_colors,
qubit_size=28,
plot_directed=True)
width, height = noise_map.get_size_inches()
noise_map.set_size_inches(1.25*width, 1.25*height)
display(noise_map)
plt.close(noise_map)
with single_widget:
cbl_fig = plt.figure(figsize=(3, 1))
ax1 = cbl_fig.add_axes([0.05, 0.80, 0.9, 0.15])
single_cb = mpl.colorbar.ColorbarBase(ax1, cmap=cm.viridis,
norm=single_norm,
orientation='horizontal')
single_cb.locator = tick_locator
single_cb.update_ticks()
ax1.set_title('Single-qubit U3 error rate')
display(cbl_fig)
plt.close(cbl_fig)
with cx_widget:
cx_fig = plt.figure(figsize=(3, 1))
ax2 = cx_fig.add_axes([0.05, 0.80, 0.9, 0.15])
cx_cb = mpl.colorbar.ColorbarBase(ax2, cmap=cm.viridis,
norm=cx_norm,
orientation='horizontal')
cx_cb.locator = tick_locator
cx_cb.update_ticks()
ax2.set_title('CNOT error rate')
display(cx_fig)
plt.close(cx_fig)
out_box = widgets.GridBox([single_widget, cmap_widget, cx_widget],
layout=widgets.Layout(
grid_template_rows='auto auto',
grid_template_columns='33% 33% 33%',
grid_template_areas='''
"top top top"
"left . right"
''',
grid_gap='0px 0px'))
return out_box
|
Widget for displaying job history
|
def job_history(backend):
"""Widget for displaying job history
Args:
backend (IBMQbackend): The backend.
Returns:
Tab: A tab widget for history images.
"""
year = widgets.Output(layout=widgets.Layout(display='flex-inline',
align_items='center',
min_height='400px'))
month = widgets.Output(layout=widgets.Layout(display='flex-inline',
align_items='center',
min_height='400px'))
week = widgets.Output(layout=widgets.Layout(display='flex-inline',
align_items='center',
min_height='400px'))
tabs = widgets.Tab(layout=widgets.Layout(max_height='620px'))
tabs.children = [year, month, week]
tabs.set_title(0, 'Year')
tabs.set_title(1, 'Month')
tabs.set_title(2, 'Week')
tabs.selected_index = 1
_build_job_history(tabs, backend)
return tabs
|
Plots the job history of the user from the given list of jobs.
|
def plot_job_history(jobs, interval='year'):
"""Plots the job history of the user from the given list of jobs.
Args:
jobs (list): A list of jobs with type IBMQjob.
interval (str): Interval over which to examine.
Returns:
fig: A Matplotlib figure instance.
"""
def get_date(job):
"""Returns a datetime object from a IBMQJob instance.
Args:
job (IBMQJob): A job.
Returns:
dt: A datetime object.
"""
return datetime.datetime.strptime(job.creation_date(),
'%Y-%m-%dT%H:%M:%S.%fZ')
current_time = datetime.datetime.now()
if interval == 'year':
bins = [(current_time - datetime.timedelta(days=k*365/12))
for k in range(12)]
elif interval == 'month':
bins = [(current_time - datetime.timedelta(days=k)) for k in range(30)]
elif interval == 'week':
bins = [(current_time - datetime.timedelta(days=k)) for k in range(7)]
binned_jobs = [0]*len(bins)
if interval == 'year':
for job in jobs:
for ind, dat in enumerate(bins):
date = get_date(job)
if date.month == dat.month:
binned_jobs[ind] += 1
break
else:
continue
else:
for job in jobs:
for ind, dat in enumerate(bins):
date = get_date(job)
if date.day == dat.day and date.month == dat.month:
binned_jobs[ind] += 1
break
else:
continue
nz_bins = []
nz_idx = []
for ind, val in enumerate(binned_jobs):
if val != 0:
nz_idx.append(ind)
nz_bins.append(val)
total_jobs = sum(binned_jobs)
colors = ['#003f5c', '#ffa600', '#374c80', '#ff764a',
'#7a5195', '#ef5675', '#bc5090']
if interval == 'year':
labels = ['{}-{}'.format(str(bins[b].year)[2:], bins[b].month) for b in nz_idx]
else:
labels = ['{}-{}'.format(bins[b].month, bins[b].day) for b in nz_idx]
fig, ax = plt.subplots(1, 1, figsize=(5, 5)) # pylint: disable=invalid-name
ax.pie(nz_bins[::-1], labels=labels, colors=colors, textprops={'fontsize': 14},
rotatelabels=True, counterclock=False)
ax.add_artist(Circle((0, 0), 0.7, color='white', zorder=1))
ax.text(0, 0, total_jobs, horizontalalignment='center',
verticalalignment='center', fontsize=26)
fig.tight_layout()
return fig
|
Return a new circuit that has been optimized.
|
def run(self, dag):
"""Return a new circuit that has been optimized."""
resets = dag.op_nodes(Reset)
for reset in resets:
predecessor = next(dag.predecessors(reset))
if predecessor.type == 'in':
dag.remove_op_node(reset)
return dag
|
Plot the interpolated envelope of pulse.
|
def draw(self, **kwargs):
"""Plot the interpolated envelope of pulse.
Keyword Args:
dt (float): Time interval of samples.
interp_method (str): Method of interpolation
(set `None` for turn off the interpolation).
filename (str): Name required to save pulse image.
interactive (bool): When set true show the circuit in a new window
(this depends on the matplotlib backend being used supporting this).
dpi (int): Resolution of saved image.
nop (int): Data points for interpolation.
size (tuple): Size of figure.
"""
from qiskit.tools.visualization import pulse_drawer
return pulse_drawer(self._samples, self.duration, **kwargs)
|
Apply cu3 from ctl to tgt with angle theta phi lam.
|
def cu3(self, theta, phi, lam, ctl, tgt):
"""Apply cu3 from ctl to tgt with angle theta, phi, lam."""
return self.append(Cu3Gate(theta, phi, lam), [ctl, tgt], [])
|
gate cu3 ( theta phi lambda ) c t { u1 (( lambda - phi )/ 2 ) t ; cx c t ; u3 ( - theta/ 2 0 - ( phi + lambda )/ 2 ) t ; cx c t ; u3 ( theta/ 2 phi 0 ) t ; }
|
def _define(self):
"""
gate cu3(theta,phi,lambda) c, t
{ u1((lambda-phi)/2) t; cx c,t;
u3(-theta/2,0,-(phi+lambda)/2) t; cx c,t;
u3(theta/2,phi,0) t;
}
"""
definition = []
q = QuantumRegister(2, "q")
rule = [
(U1Gate((self.params[2] - self.params[1]) / 2), [q[1]], []),
(CnotGate(), [q[0], q[1]], []),
(U3Gate(-self.params[0] / 2, 0, -(self.params[1] + self.params[2]) / 2), [q[1]], []),
(CnotGate(), [q[0], q[1]], []),
(U3Gate(self.params[0] / 2, self.params[1], 0), [q[1]], [])
]
for inst in rule:
definition.append(inst)
self.definition = definition
|
Returns a circuit putting 2 qubits in the Bell state.
|
def build_bell_circuit():
"""Returns a circuit putting 2 qubits in the Bell state."""
q = QuantumRegister(2)
c = ClassicalRegister(2)
qc = QuantumCircuit(q, c)
qc.h(q[0])
qc.cx(q[0], q[1])
qc.measure(q, c)
return qc
|
transpile one or more circuits according to some desired transpilation targets.
|
def transpile(circuits,
backend=None,
basis_gates=None, coupling_map=None, backend_properties=None,
initial_layout=None, seed_transpiler=None,
optimization_level=None,
pass_manager=None,
seed_mapper=None): # deprecated
"""transpile one or more circuits, according to some desired
transpilation targets.
All arguments may be given as either singleton or list. In case of list,
the length must be equal to the number of circuits being transpiled.
Transpilation is done in parallel using multiprocessing.
Args:
circuits (QuantumCircuit or list[QuantumCircuit]):
Circuit(s) to transpile
backend (BaseBackend):
If set, transpiler options are automatically grabbed from
backend.configuration() and backend.properties().
If any other option is explicitly set (e.g. coupling_map), it
will override the backend's.
Note: the backend arg is purely for convenience. The resulting
circuit may be run on any backend as long as it is compatible.
basis_gates (list[str]):
List of basis gate names to unroll to.
e.g:
['u1', 'u2', 'u3', 'cx']
If None, do not unroll.
coupling_map (CouplingMap or list):
Coupling map (perhaps custom) to target in mapping.
Multiple formats are supported:
a. CouplingMap instance
b. list
Must be given as an adjacency matrix, where each entry
specifies all two-qubit interactions supported by backend
e.g:
[[0, 1], [0, 3], [1, 2], [1, 5], [2, 5], [4, 1], [5, 3]]
backend_properties (BackendProperties):
properties returned by a backend, including information on gate
errors, readout errors, qubit coherence times, etc. For a backend
that provides this information, it can be obtained with:
``backend.properties()``
initial_layout (Layout or dict or list):
Initial position of virtual qubits on physical qubits.
If this layout makes the circuit compatible with the coupling_map
constraints, it will be used.
The final layout is not guaranteed to be the same, as the transpiler
may permute qubits through swaps or other means.
Multiple formats are supported:
a. Layout instance
b. dict
virtual to physical:
{qr[0]: 0,
qr[1]: 3,
qr[2]: 5}
physical to virtual:
{0: qr[0],
3: qr[1],
5: qr[2]}
c. list
virtual to physical:
[0, 3, 5] # virtual qubits are ordered (in addition to named)
physical to virtual:
[qr[0], None, None, qr[1], None, qr[2]]
seed_transpiler (int):
sets random seed for the stochastic parts of the transpiler
optimization_level (int):
How much optimization to perform on the circuits.
Higher levels generate more optimized circuits,
at the expense of longer transpilation time.
0: no optimization
1: light optimization
2: heavy optimization
pass_manager (PassManager):
The pass manager to use for a custom pipeline of transpiler passes.
If this arg is present, all other args will be ignored and the
pass manager will be used directly (Qiskit will not attempt to
auto-select a pass manager based on transpile options).
seed_mapper (int):
DEPRECATED in 0.8: use ``seed_transpiler`` kwarg instead
Returns:
QuantumCircuit or list[QuantumCircuit]: transpiled circuit(s).
Raises:
TranspilerError: in case of bad inputs to transpiler or errors in passes
"""
# Deprecation matter
if seed_mapper:
warnings.warn("seed_mapper has been deprecated and will be removed in the "
"0.9 release. Instead use seed_transpiler to set the seed "
"for all stochastic parts of the.", DeprecationWarning)
seed_transpiler = seed_mapper
# transpiling schedules is not supported yet.
if isinstance(circuits, Schedule) or \
(isinstance(circuits, list) and all(isinstance(c, Schedule) for c in circuits)):
return circuits
# Get TranspileConfig(s) to configure the circuit transpilation job(s)
circuits = circuits if isinstance(circuits, list) else [circuits]
transpile_configs = _parse_transpile_args(circuits, backend, basis_gates, coupling_map,
backend_properties, initial_layout,
seed_transpiler, optimization_level,
pass_manager)
# Transpile circuits in parallel
circuits = parallel_map(_transpile_circuit, list(zip(circuits, transpile_configs)))
if len(circuits) == 1:
return circuits[0]
return circuits
|
Select a PassManager and run a single circuit through it.
|
def _transpile_circuit(circuit_config_tuple):
"""Select a PassManager and run a single circuit through it.
Args:
circuit_config_tuple (tuple):
circuit (QuantumCircuit): circuit to transpile
transpile_config (TranspileConfig): configuration dictating how to transpile
Returns:
QuantumCircuit: transpiled circuit
"""
circuit, transpile_config = circuit_config_tuple
# if the pass manager is not already selected, choose an appropriate one.
if transpile_config.pass_manager:
pass_manager = transpile_config.pass_manager
elif transpile_config.coupling_map:
pass_manager = default_pass_manager(transpile_config.basis_gates,
transpile_config.coupling_map,
transpile_config.initial_layout,
transpile_config.seed_transpiler)
else:
pass_manager = default_pass_manager_simulator(transpile_config.basis_gates)
return pass_manager.run(circuit)
|
Resolve the various types of args allowed to the transpile () function through duck typing overriding args etc. Refer to the transpile () docstring for details on what types of inputs are allowed.
|
def _parse_transpile_args(circuits, backend,
basis_gates, coupling_map, backend_properties,
initial_layout, seed_transpiler, optimization_level,
pass_manager):
"""Resolve the various types of args allowed to the transpile() function through
duck typing, overriding args, etc. Refer to the transpile() docstring for details on
what types of inputs are allowed.
Here the args are resolved by converting them to standard instances, and prioritizing
them in case a transpile option is passed through multiple args (explicitly setting an
arg has more priority than the arg set by backend)
Returns:
list[TranspileConfig]: a transpile config for each circuit, which is a standardized
object that configures the transpiler and determines the pass manager to use.
"""
# Each arg could be single or a list. If list, it must be the same size as
# number of circuits. If single, duplicate to create a list of that size.
num_circuits = len(circuits)
basis_gates = _parse_basis_gates(basis_gates, backend, circuits)
coupling_map = _parse_coupling_map(coupling_map, backend, num_circuits)
backend_properties = _parse_backend_properties(backend_properties, backend, num_circuits)
initial_layout = _parse_initial_layout(initial_layout, circuits)
seed_transpiler = _parse_seed_transpiler(seed_transpiler, num_circuits)
optimization_level = _parse_optimization_level(optimization_level, num_circuits)
pass_manager = _parse_pass_manager(pass_manager, num_circuits)
transpile_configs = []
for args in zip(basis_gates, coupling_map, backend_properties, initial_layout,
seed_transpiler, optimization_level, pass_manager):
transpile_config = TranspileConfig(basis_gates=args[0],
coupling_map=args[1],
backend_properties=args[2],
initial_layout=args[3],
seed_transpiler=args[4],
optimization_level=args[5],
pass_manager=args[6])
transpile_configs.append(transpile_config)
return transpile_configs
|
Execute a list of circuits or pulse schedules on a backend.
|
def execute(experiments, backend,
basis_gates=None, coupling_map=None, # circuit transpile options
backend_properties=None, initial_layout=None,
seed_transpiler=None, optimization_level=None, pass_manager=None,
qobj_id=None, qobj_header=None, shots=1024, # common run options
memory=False, max_credits=10, seed_simulator=None,
default_qubit_los=None, default_meas_los=None, # schedule run options
schedule_los=None, meas_level=2, meas_return='avg',
memory_slots=None, memory_slot_size=100, rep_time=None, parameter_binds=None,
seed=None, seed_mapper=None, # deprecated
config=None, circuits=None,
**run_config):
"""Execute a list of circuits or pulse schedules on a backend.
The execution is asynchronous, and a handle to a job instance is returned.
Args:
experiments (QuantumCircuit or list[QuantumCircuit] or Schedule or list[Schedule]):
Circuit(s) or pulse schedule(s) to execute
backend (BaseBackend):
Backend to execute circuits on.
Transpiler options are automatically grabbed from
backend.configuration() and backend.properties().
If any other option is explicitly set (e.g. coupling_map), it
will override the backend's.
basis_gates (list[str]):
List of basis gate names to unroll to.
e.g:
['u1', 'u2', 'u3', 'cx']
If None, do not unroll.
coupling_map (CouplingMap or list):
Coupling map (perhaps custom) to target in mapping.
Multiple formats are supported:
a. CouplingMap instance
b. list
Must be given as an adjacency matrix, where each entry
specifies all two-qubit interactions supported by backend
e.g:
[[0, 1], [0, 3], [1, 2], [1, 5], [2, 5], [4, 1], [5, 3]]
backend_properties (BackendProperties):
Properties returned by a backend, including information on gate
errors, readout errors, qubit coherence times, etc. For a backend
that provides this information, it can be obtained with:
``backend.properties()``
initial_layout (Layout or dict or list):
Initial position of virtual qubits on physical qubits.
If this layout makes the circuit compatible with the coupling_map
constraints, it will be used.
The final layout is not guaranteed to be the same, as the transpiler
may permute qubits through swaps or other means.
Multiple formats are supported:
a. Layout instance
b. dict
virtual to physical:
{qr[0]: 0,
qr[1]: 3,
qr[2]: 5}
physical to virtual:
{0: qr[0],
3: qr[1],
5: qr[2]}
c. list
virtual to physical:
[0, 3, 5] # virtual qubits are ordered (in addition to named)
physical to virtual:
[qr[0], None, None, qr[1], None, qr[2]]
seed_transpiler (int):
Sets random seed for the stochastic parts of the transpiler
optimization_level (int):
How much optimization to perform on the circuits.
Higher levels generate more optimized circuits,
at the expense of longer transpilation time.
0: no optimization
1: light optimization
2: heavy optimization
pass_manager (PassManager):
The pass manager to use during transpilation. If this arg is present,
auto-selection of pass manager based on the transpile options will be
turned off and this pass manager will be used directly.
qobj_id (str):
String identifier to annotate the Qobj
qobj_header (QobjHeader or dict):
User input that will be inserted in Qobj header, and will also be
copied to the corresponding Result header. Headers do not affect the run.
shots (int):
Number of repetitions of each circuit, for sampling. Default: 2014
memory (bool):
If True, per-shot measurement bitstrings are returned as well
(provided the backend supports it). For OpenPulse jobs, only
measurement level 2 supports this option. Default: False
max_credits (int):
Maximum credits to spend on job. Default: 10
seed_simulator (int):
Random seed to control sampling, for when backend is a simulator
default_qubit_los (list):
List of default qubit lo frequencies
default_meas_los (list):
List of default meas lo frequencies
schedule_los (None or list[Union[Dict[PulseChannel, float], LoConfig]] or
Union[Dict[PulseChannel, float], LoConfig]):
Experiment LO configurations
meas_level (int):
Set the appropriate level of the measurement output for pulse experiments.
meas_return (str):
Level of measurement data for the backend to return
For `meas_level` 0 and 1:
"single" returns information from every shot.
"avg" returns average measurement output (averaged over number of shots).
memory_slots (int):
Number of classical memory slots used in this job.
memory_slot_size (int):
Size of each memory slot if the output is Level 0.
rep_time (int): repetition time of the experiment in μs.
The delay between experiments will be rep_time.
Must be from the list provided by the device.
parameter_binds (list[dict{Parameter: Value}]):
List of Parameter bindings over which the set of experiments will be
executed. Each list element (bind) should be of the form
{Parameter1: value1, Parameter2: value2, ...}. All binds will be
executed across all experiments, e.g. if parameter_binds is a
length-n list, and there are m experiments, a total of m x n
experiments will be run (one for each experiment/bind pair).
seed (int):
DEPRECATED in 0.8: use ``seed_simulator`` kwarg instead
seed_mapper (int):
DEPRECATED in 0.8: use ``seed_transpiler`` kwarg instead
config (dict):
DEPRECATED in 0.8: use run_config instead
circuits (QuantumCircuit or list[QuantumCircuit]):
DEPRECATED in 0.8: use ``experiments`` kwarg instead.
run_config (dict):
Extra arguments used to configure the run (e.g. for Aer configurable backends)
Refer to the backend documentation for details on these arguments
Note: for now, these keyword arguments will both be copied to the
Qobj config, and passed to backend.run()
Returns:
BaseJob: returns job instance derived from BaseJob
Raises:
QiskitError: if the execution cannot be interpreted as either circuits or schedules
"""
if circuits is not None:
experiments = circuits
warnings.warn("the `circuits` arg in `execute()` has been deprecated. "
"please use `experiments`, which can handle both circuit "
"and pulse Schedules", DeprecationWarning)
# transpiling the circuits using given transpile options
experiments = transpile(experiments,
basis_gates=basis_gates,
coupling_map=coupling_map,
backend_properties=backend_properties,
initial_layout=initial_layout,
seed_transpiler=seed_transpiler,
optimization_level=optimization_level,
backend=backend,
pass_manager=pass_manager,
seed_mapper=seed_mapper, # deprecated
)
# assembling the circuits into a qobj to be run on the backend
qobj = assemble(experiments,
qobj_id=qobj_id,
qobj_header=qobj_header,
shots=shots,
memory=memory,
max_credits=max_credits,
seed_simulator=seed_simulator,
default_qubit_los=default_qubit_los,
default_meas_los=default_meas_los,
schedule_los=schedule_los,
meas_level=meas_level,
meas_return=meas_return,
memory_slots=memory_slots,
memory_slot_size=memory_slot_size,
rep_time=rep_time,
parameter_binds=parameter_binds,
backend=backend,
config=config, # deprecated
seed=seed, # deprecated
run_config=run_config
)
# executing the circuits on the backend and returning the job
return backend.run(qobj, **run_config)
|
Return the primary drive channel of this qubit.
|
def drive(self) -> DriveChannel:
"""Return the primary drive channel of this qubit."""
if self._drives:
return self._drives[0]
else:
raise PulseError("No drive channels in q[%d]" % self._index)
|
Return the primary control channel of this qubit.
|
def control(self) -> ControlChannel:
"""Return the primary control channel of this qubit."""
if self._controls:
return self._controls[0]
else:
raise PulseError("No control channels in q[%d]" % self._index)
|
Return the primary measure channel of this qubit.
|
def measure(self) -> MeasureChannel:
"""Return the primary measure channel of this qubit."""
if self._measures:
return self._measures[0]
else:
raise PulseError("No measurement channels in q[%d]" % self._index)
|
Return the primary acquire channel of this qubit.
|
def acquire(self) -> AcquireChannel:
"""Return the primary acquire channel of this qubit."""
if self._acquires:
return self._acquires[0]
else:
raise PulseError("No acquire channels in q[%d]" % self._index)
|
n - qubit input state for QFT that produces output 1.
|
def input_state(circ, q, n):
"""n-qubit input state for QFT that produces output 1."""
for j in range(n):
circ.h(q[j])
circ.u1(math.pi/float(2**(j)), q[j]).inverse()
|
Assembles a list of circuits into a qobj which can be run on the backend.
|
def assemble_circuits(circuits, qobj_id=None, qobj_header=None, run_config=None):
"""Assembles a list of circuits into a qobj which can be run on the backend.
Args:
circuits (list[QuantumCircuits]): circuit(s) to assemble
qobj_id (int): identifier for the generated qobj
qobj_header (QobjHeader): header to pass to the results
run_config (RunConfig): configuration of the runtime environment
Returns:
QasmQobj: the Qobj to be run on the backends
"""
qobj_config = QasmQobjConfig()
if run_config:
qobj_config = QasmQobjConfig(**run_config.to_dict())
# Pack everything into the Qobj
experiments = []
max_n_qubits = 0
max_memory_slots = 0
for circuit in circuits:
# header stuff
n_qubits = 0
memory_slots = 0
qubit_labels = []
clbit_labels = []
qreg_sizes = []
creg_sizes = []
for qreg in circuit.qregs:
qreg_sizes.append([qreg.name, qreg.size])
for j in range(qreg.size):
qubit_labels.append([qreg.name, j])
n_qubits += qreg.size
for creg in circuit.cregs:
creg_sizes.append([creg.name, creg.size])
for j in range(creg.size):
clbit_labels.append([creg.name, j])
memory_slots += creg.size
# TODO: why do we need creq_sizes and qreg_sizes in header
# TODO: we need to rethink memory_slots as they are tied to classical bit
experimentheader = QobjExperimentHeader(qubit_labels=qubit_labels,
n_qubits=n_qubits,
qreg_sizes=qreg_sizes,
clbit_labels=clbit_labels,
memory_slots=memory_slots,
creg_sizes=creg_sizes,
name=circuit.name)
# TODO: why do we need n_qubits and memory_slots in both the header and the config
experimentconfig = QasmQobjExperimentConfig(n_qubits=n_qubits, memory_slots=memory_slots)
# Convert conditionals from QASM-style (creg ?= int) to qobj-style
# (register_bit ?= 1), by assuming device has unlimited register slots
# (supported only for simulators). Map all measures to a register matching
# their clbit_index, create a new register slot for every conditional gate
# and add a bfunc to map the creg=val mask onto the gating register bit.
is_conditional_experiment = any(op.control for (op, qargs, cargs) in circuit.data)
max_conditional_idx = 0
instructions = []
for op_context in circuit.data:
instruction = op_context[0].assemble()
# Add register attributes to the instruction
qargs = op_context[1]
cargs = op_context[2]
if qargs:
qubit_indices = [qubit_labels.index([qubit[0].name, qubit[1]])
for qubit in qargs]
instruction.qubits = qubit_indices
if cargs:
clbit_indices = [clbit_labels.index([clbit[0].name, clbit[1]])
for clbit in cargs]
instruction.memory = clbit_indices
# If the experiment has conditional instructions, assume every
# measurement result may be needed for a conditional gate.
if instruction.name == "measure" and is_conditional_experiment:
instruction.register = clbit_indices
# To convert to a qobj-style conditional, insert a bfunc prior
# to the conditional instruction to map the creg ?= val condition
# onto a gating register bit.
if hasattr(instruction, '_control'):
ctrl_reg, ctrl_val = instruction._control
mask = 0
val = 0
for clbit in clbit_labels:
if clbit[0] == ctrl_reg.name:
mask |= (1 << clbit_labels.index(clbit))
val |= (((ctrl_val >> clbit[1]) & 1) << clbit_labels.index(clbit))
conditional_reg_idx = memory_slots + max_conditional_idx
conversion_bfunc = QasmQobjInstruction(name='bfunc',
mask="0x%X" % mask,
relation='==',
val="0x%X" % val,
register=conditional_reg_idx)
instructions.append(conversion_bfunc)
instruction.conditional = conditional_reg_idx
max_conditional_idx += 1
# Delete control attribute now that we have replaced it with
# the conditional and bfuc
del instruction._control
instructions.append(instruction)
experiments.append(QasmQobjExperiment(instructions=instructions, header=experimentheader,
config=experimentconfig))
if n_qubits > max_n_qubits:
max_n_qubits = n_qubits
if memory_slots > max_memory_slots:
max_memory_slots = memory_slots
qobj_config.memory_slots = max_memory_slots
qobj_config.n_qubits = max_n_qubits
return QasmQobj(qobj_id=qobj_id,
config=qobj_config,
experiments=experiments,
header=qobj_header)
|
Assembles a list of schedules into a qobj which can be run on the backend. Args: schedules ( list [ Schedule ] ): schedules to assemble qobj_id ( int ): identifier for the generated qobj qobj_header ( QobjHeader ): header to pass to the results run_config ( RunConfig ): configuration of the runtime environment Returns: PulseQobj: the Qobj to be run on the backends Raises: QiskitError: when invalid schedules or configs are provided
|
def assemble_schedules(schedules, qobj_id=None, qobj_header=None, run_config=None):
"""Assembles a list of schedules into a qobj which can be run on the backend.
Args:
schedules (list[Schedule]): schedules to assemble
qobj_id (int): identifier for the generated qobj
qobj_header (QobjHeader): header to pass to the results
run_config (RunConfig): configuration of the runtime environment
Returns:
PulseQobj: the Qobj to be run on the backends
Raises:
QiskitError: when invalid schedules or configs are provided
"""
qobj_config = QasmQobjConfig()
if run_config:
qobj_config = QasmQobjConfig(**run_config.to_dict())
# Get appropriate convertors
instruction_converter = PulseQobjConverter
instruction_converter = instruction_converter(PulseQobjInstruction, **run_config.to_dict())
lo_converter = LoConfigConverter(PulseQobjExperimentConfig, run_config.qubit_lo_freq,
run_config.meas_lo_freq, **run_config.to_dict())
# Pack everything into the Qobj
qobj_schedules = []
user_pulselib = set()
for idx, schedule in enumerate(schedules):
# instructions
qobj_instructions = []
# Instructions are returned as tuple of shifted time and instruction
for shift, instruction in schedule.instructions:
# TODO: support conditional gate
qobj_instructions.append(instruction_converter(shift, instruction))
if isinstance(instruction, PulseInstruction):
# add samples to pulse library
user_pulselib.add(instruction.command)
# experiment header
qobj_experiment_header = QobjExperimentHeader(
name=schedule.name or 'Experiment-%d' % idx
)
qobj_schedules.append({
'header': qobj_experiment_header,
'instructions': qobj_instructions
})
# setup pulse_library
run_config.pulse_library = [QobjPulseLibrary(name=pulse.name, samples=pulse.samples)
for pulse in user_pulselib]
# create qob experiment field
experiments = []
if len(run_config.schedule_los) == 1:
lo_dict = run_config.schedule_los.pop()
# update global config
q_los = lo_converter.get_qubit_los(lo_dict)
if q_los:
run_config.qubit_lo_freq = q_los
m_los = lo_converter.get_meas_los(lo_dict)
if m_los:
run_config.meas_lo_freq = m_los
if run_config.schedule_los:
# multiple frequency setups
if len(qobj_schedules) == 1:
# frequency sweep
for lo_dict in run_config.schedule_los:
experiments.append(PulseQobjExperiment(
instructions=qobj_schedules[0]['instructions'],
experimentheader=qobj_schedules[0]['header'],
experimentconfig=lo_converter(lo_dict)
))
elif len(qobj_schedules) == len(run_config.schedule_los):
# n:n setup
for lo_dict, schedule in zip(run_config.schedule_los, qobj_schedules):
experiments.append(PulseQobjExperiment(
instructions=schedule['instructions'],
experimentheader=schedule['header'],
experimentconfig=lo_converter(lo_dict)
))
else:
raise QiskitError('Invalid LO setting is specified. '
'The LO should be configured for each schedule, or '
'single setup for all schedules (unique), or '
'multiple setups for a single schedule (frequency sweep),'
'or no LO configured at all.')
else:
# unique frequency setup
for schedule in qobj_schedules:
experiments.append(PulseQobjExperiment(
instructions=schedule['instructions'],
experimentheader=schedule['header'],
))
qobj_config = PulseQobjConfig(**run_config.to_dict())
return PulseQobj(qobj_id=qobj_id,
config=qobj_config,
experiments=experiments,
header=qobj_header)
|
Assemble a list of circuits or pulse schedules into a Qobj.
|
def assemble(experiments,
backend=None,
qobj_id=None, qobj_header=None, # common run options
shots=1024, memory=False, max_credits=None, seed_simulator=None,
default_qubit_los=None, default_meas_los=None, # schedule run options
schedule_los=None, meas_level=2, meas_return='avg',
memory_slots=None, memory_slot_size=100, rep_time=None, parameter_binds=None,
config=None, seed=None, # deprecated
**run_config):
"""Assemble a list of circuits or pulse schedules into a Qobj.
This function serializes the payloads, which could be either circuits or schedules,
to create Qobj "experiments". It further annotates the experiment payload with
header and configurations.
Args:
experiments (QuantumCircuit or list[QuantumCircuit] or Schedule or list[Schedule]):
Circuit(s) or pulse schedule(s) to execute
backend (BaseBackend):
If set, some runtime options are automatically grabbed from
backend.configuration() and backend.defaults().
If any other option is explicitly set (e.g. rep_rate), it
will override the backend's.
If any other options is set in the run_config, it will
also override the backend's.
qobj_id (str):
String identifier to annotate the Qobj
qobj_header (QobjHeader or dict):
User input that will be inserted in Qobj header, and will also be
copied to the corresponding Result header. Headers do not affect the run.
shots (int):
Number of repetitions of each circuit, for sampling. Default: 2014
memory (bool):
If True, per-shot measurement bitstrings are returned as well
(provided the backend supports it). For OpenPulse jobs, only
measurement level 2 supports this option. Default: False
max_credits (int):
Maximum credits to spend on job. Default: 10
seed_simulator (int):
Random seed to control sampling, for when backend is a simulator
default_qubit_los (list):
List of default qubit lo frequencies
default_meas_los (list):
List of default meas lo frequencies
schedule_los (None or list[Union[Dict[PulseChannel, float], LoConfig]] or
Union[Dict[PulseChannel, float], LoConfig]):
Experiment LO configurations
meas_level (int):
Set the appropriate level of the measurement output for pulse experiments.
meas_return (str):
Level of measurement data for the backend to return
For `meas_level` 0 and 1:
"single" returns information from every shot.
"avg" returns average measurement output (averaged over number of shots).
memory_slots (int):
Number of classical memory slots used in this job.
memory_slot_size (int):
Size of each memory slot if the output is Level 0.
rep_time (int): repetition time of the experiment in μs.
The delay between experiments will be rep_time.
Must be from the list provided by the device.
parameter_binds (list[dict{Parameter: Value}]):
List of Parameter bindings over which the set of experiments will be
executed. Each list element (bind) should be of the form
{Parameter1: value1, Parameter2: value2, ...}. All binds will be
executed across all experiments, e.g. if parameter_binds is a
length-n list, and there are m experiments, a total of m x n
experiments will be run (one for each experiment/bind pair).
seed (int):
DEPRECATED in 0.8: use ``seed_simulator`` kwarg instead
config (dict):
DEPRECATED in 0.8: use run_config instead
run_config (dict):
extra arguments used to configure the run (e.g. for Aer configurable backends)
Refer to the backend documentation for details on these arguments
Returns:
Qobj: a qobj which can be run on a backend. Depending on the type of input,
this will be either a QasmQobj or a PulseQobj.
Raises:
QiskitError: if the input cannot be interpreted as either circuits or schedules
"""
# deprecation matter
if config:
warnings.warn('config is not used anymore. Set all configs in '
'run_config.', DeprecationWarning)
run_config = run_config or config
if seed:
warnings.warn('seed is deprecated in favor of seed_simulator.', DeprecationWarning)
seed_simulator = seed_simulator or seed
# Get RunConfig(s) that will be inserted in Qobj to configure the run
experiments = experiments if isinstance(experiments, list) else [experiments]
qobj_id, qobj_header, run_config = _parse_run_args(backend, qobj_id, qobj_header,
shots, memory, max_credits, seed_simulator,
default_qubit_los, default_meas_los,
schedule_los, meas_level, meas_return,
memory_slots, memory_slot_size, rep_time,
parameter_binds, **run_config)
# assemble either circuits or schedules
if all(isinstance(exp, QuantumCircuit) for exp in experiments):
# If circuits are parameterized, bind parameters and remove from run_config
bound_experiments, run_config = _expand_parameters(circuits=experiments,
run_config=run_config)
return assemble_circuits(circuits=bound_experiments, qobj_id=qobj_id,
qobj_header=qobj_header, run_config=run_config)
elif all(isinstance(exp, Schedule) for exp in experiments):
return assemble_schedules(schedules=experiments, qobj_id=qobj_id,
qobj_header=qobj_header, run_config=run_config)
else:
raise QiskitError("bad input to assemble() function; "
"must be either circuits or schedules")
|
Resolve the various types of args allowed to the assemble () function through duck typing overriding args etc. Refer to the assemble () docstring for details on what types of inputs are allowed.
|
def _parse_run_args(backend, qobj_id, qobj_header,
shots, memory, max_credits, seed_simulator,
default_qubit_los, default_meas_los,
schedule_los, meas_level, meas_return,
memory_slots, memory_slot_size, rep_time,
parameter_binds, **run_config):
"""Resolve the various types of args allowed to the assemble() function through
duck typing, overriding args, etc. Refer to the assemble() docstring for details on
what types of inputs are allowed.
Here the args are resolved by converting them to standard instances, and prioritizing
them in case a run option is passed through multiple args (explicitly setting an arg
has more priority than the arg set by backend)
Returns:
RunConfig: a run config, which is a standardized object that configures the qobj
and determines the runtime environment.
"""
# grab relevant info from backend if it exists
backend_config = None
backend_default = None
if backend:
backend_config = backend.configuration()
# TODO : Remove usage of config.defaults when backend.defaults() is updated.
try:
backend_default = backend.defaults()
except (ModelValidationError, AttributeError):
from collections import namedtuple
backend_config_defaults = getattr(backend_config, 'defaults', {})
BackendDefault = namedtuple('BackendDefault', ('qubit_freq_est', 'meas_freq_est'))
backend_default = BackendDefault(
qubit_freq_est=backend_config_defaults.get('qubit_freq_est'),
meas_freq_est=backend_config_defaults.get('meas_freq_est')
)
memory_slots = memory_slots or getattr(backend_config, 'memory_slots', None)
rep_time = rep_time or getattr(backend_config, 'rep_times', None)
if isinstance(rep_time, list):
rep_time = rep_time[-1]
parameter_binds = parameter_binds or []
# add default empty lo config
schedule_los = schedule_los or []
if isinstance(schedule_los, (LoConfig, dict)):
schedule_los = [schedule_los]
# Convert to LoConfig if lo configuration supplied as dictionary
schedule_los = [lo_config if isinstance(lo_config, LoConfig) else LoConfig(lo_config)
for lo_config in schedule_los]
qubit_lo_freq = default_qubit_los or getattr(backend_default, 'qubit_freq_est', [])
meas_lo_freq = default_meas_los or getattr(backend_default, 'meas_freq_est', [])
# an identifier for the Qobj
qobj_id = qobj_id or str(uuid.uuid4())
# The header that goes at the top of the Qobj (and later Result)
# we process it as dict, then write entries that are not None to a QobjHeader object
qobj_header = qobj_header or {}
if isinstance(qobj_header, QobjHeader):
qobj_header = qobj_header.to_dict()
backend_name = getattr(backend_config, 'backend_name', None)
backend_version = getattr(backend_config, 'backend_version', None)
qobj_header = {**dict(backend_name=backend_name, backend_version=backend_version),
**qobj_header}
qobj_header = QobjHeader(**{k: v for k, v in qobj_header.items() if v is not None})
# create run configuration and populate
run_config_dict = dict(shots=shots,
memory=memory,
max_credits=max_credits,
seed_simulator=seed_simulator,
seed=seed_simulator, # deprecated
qubit_lo_freq=qubit_lo_freq,
meas_lo_freq=meas_lo_freq,
schedule_los=schedule_los,
meas_level=meas_level,
meas_return=meas_return,
memory_slots=memory_slots,
memory_slot_size=memory_slot_size,
rep_time=rep_time,
parameter_binds=parameter_binds,
**run_config)
run_config = RunConfig(**{k: v for k, v in run_config_dict.items() if v is not None})
return qobj_id, qobj_header, run_config
|
Verifies that there is a single common set of parameters shared between all circuits and all parameter binds in the run_config. Returns an expanded list of circuits ( if parameterized ) with all parameters bound and a copy of the run_config with parameter_binds cleared.
|
def _expand_parameters(circuits, run_config):
"""Verifies that there is a single common set of parameters shared between
all circuits and all parameter binds in the run_config. Returns an expanded
list of circuits (if parameterized) with all parameters bound, and a copy of
the run_config with parameter_binds cleared.
If neither the circuits nor the run_config specify parameters, the two are
returned unmodified.
Raises:
QiskitError: if run_config parameters are not compatible with circuit parameters
Returns:
Tuple(List[QuantumCircuit], RunConfig):
- List of input circuits expanded and with parameters bound
- RunConfig with parameter_binds removed
"""
parameter_binds = run_config.parameter_binds
if parameter_binds or \
any(circuit.parameters for circuit in circuits):
all_bind_parameters = [bind.keys()
for bind in parameter_binds]
all_circuit_parameters = [circuit.parameters for circuit in circuits]
# Collect set of all unique parameters across all circuits and binds
unique_parameters = set(param
for param_list in all_bind_parameters + all_circuit_parameters
for param in param_list)
# Check that all parameters are common to all circuits and binds
if not all_bind_parameters \
or not all_circuit_parameters \
or any(unique_parameters != bind_params for bind_params in all_bind_parameters) \
or any(unique_parameters != parameters for parameters in all_circuit_parameters):
raise QiskitError(
('Mismatch between run_config.parameter_binds and all circuit parameters. ' +
'Parameter binds: {} ' +
'Circuit parameters: {}').format(all_bind_parameters, all_circuit_parameters))
circuits = [circuit.bind_parameters(binds)
for circuit in circuits
for binds in parameter_binds]
# All parameters have been expanded and bound, so remove from run_config
run_config = copy.deepcopy(run_config)
run_config.parameter_binds = []
return circuits, run_config
|
Remove the handlers for the qiskit logger.
|
def unset_qiskit_logger():
"""Remove the handlers for the 'qiskit' logger."""
qiskit_logger = logging.getLogger('qiskit')
for handler in qiskit_logger.handlers:
qiskit_logger.removeHandler(handler)
|
Create a hinton representation.
|
def iplot_state_hinton(rho, figsize=None):
""" Create a hinton representation.
Graphical representation of the input array using a 2D city style
graph (hinton).
Args:
rho (array): Density matrix
figsize (tuple): Figure size in pixels.
"""
# HTML
html_template = Template("""
<p>
<div id="hinton_$divNumber"></div>
</p>
""")
# JavaScript
javascript_template = Template("""
<script>
requirejs.config({
paths: {
qVisualization: "https://qvisualization.mybluemix.net/q-visualizations"
}
});
require(["qVisualization"], function(qVisualizations) {
qVisualizations.plotState("hinton_$divNumber",
"hinton",
$executions,
$options);
});
</script>
""")
rho = _validate_input_state(rho)
if figsize is None:
options = {}
else:
options = {'width': figsize[0], 'height': figsize[1]}
# Process data and execute
div_number = str(time.time())
div_number = re.sub('[.]', '', div_number)
# Process data and execute
real = []
imag = []
for xvalue in rho:
row_real = []
col_imag = []
for value_real in xvalue.real:
row_real.append(float(value_real))
real.append(row_real)
for value_imag in xvalue.imag:
col_imag.append(float(value_imag))
imag.append(col_imag)
html = html_template.substitute({
'divNumber': div_number
})
javascript = javascript_template.substitute({
'divNumber': div_number,
'executions': [{'data': real}, {'data': imag}],
'options': options
})
display(HTML(html + javascript))
|
Return the process fidelity between two quantum channels.
|
def process_fidelity(channel1, channel2, require_cptp=True):
"""Return the process fidelity between two quantum channels.
This is given by
F_p(E1, E2) = Tr[S2^dagger.S1])/dim^2
where S1 and S2 are the SuperOp matrices for channels E1 and E2,
and dim is the dimension of the input output statespace.
Args:
channel1 (QuantumChannel or matrix): a quantum channel or unitary matrix.
channel2 (QuantumChannel or matrix): a quantum channel or unitary matrix.
require_cptp (bool): require input channels to be CPTP [Default: True].
Returns:
array_like: The state fidelity F(state1, state2).
Raises:
QiskitError: if inputs channels do not have the same dimensions,
have different input and output dimensions, or are not CPTP with
`require_cptp=True`.
"""
# First we must determine if input is to be interpreted as a unitary matrix
# or as a channel.
# If input is a raw numpy array we will interpret it as a unitary matrix.
is_cptp1 = None
is_cptp2 = None
if isinstance(channel1, (list, np.ndarray)):
channel1 = Operator(channel1)
if require_cptp:
is_cptp1 = channel1.is_unitary()
if isinstance(channel2, (list, np.ndarray)):
channel2 = Operator(channel2)
if require_cptp:
is_cptp2 = channel2.is_unitary()
# Next we convert inputs SuperOp objects
# This works for objects that also have a `to_operator` or `to_channel` method
s1 = SuperOp(channel1)
s2 = SuperOp(channel2)
# Check inputs are CPTP
if require_cptp:
# Only check SuperOp if we didn't already check unitary inputs
if is_cptp1 is None:
is_cptp1 = s1.is_cptp()
if not is_cptp1:
raise QiskitError('channel1 is not CPTP')
if is_cptp2 is None:
is_cptp2 = s2.is_cptp()
if not is_cptp2:
raise QiskitError('channel2 is not CPTP')
# Check dimensions match
input_dim1, output_dim1 = s1.dim
input_dim2, output_dim2 = s2.dim
if input_dim1 != output_dim1 or input_dim2 != output_dim2:
raise QiskitError('Input channels must have same size input and output dimensions.')
if input_dim1 != input_dim2:
raise QiskitError('Input channels have different dimensions.')
# Compute process fidelity
fidelity = np.trace(s1.compose(s2.adjoint()).data) / (input_dim1 ** 2)
return fidelity
|
Set the input text data.
|
def input(self, data):
"""Set the input text data."""
self.data = data
self.lexer.input(data)
|
Pop a PLY lexer off the stack.
|
def pop(self):
"""Pop a PLY lexer off the stack."""
self.lexer = self.stack.pop()
self.filename = self.lexer.qasm_file
self.lineno = self.lexer.qasm_line
|
Push a PLY lexer on the stack to parse filename.
|
def push(self, filename):
"""Push a PLY lexer on the stack to parse filename."""
self.lexer.qasm_file = self.filename
self.lexer.qasm_line = self.lineno
self.stack.append(self.lexer)
self.__mklexer__(filename)
|
include
|
def t_INCLUDE(self, t):
'include'
#
# Now eat up the next two tokens which must be
# 1 - the name of the include file, and
# 2 - a terminating semicolon
#
# Then push the current lexer onto the stack, create a new one from
# the include file, and push it onto the stack.
#
# When we hit eof (the t_eof) rule, we pop.
next_token = self.lexer.token()
lineno = next_token.lineno
# print('NEXT', next, "next.value", next.value, type(next))
if isinstance(next_token.value, str):
incfile = next_token.value.strip('"')
else:
raise QasmError("Invalid include: must be a quoted string.")
if incfile in CORE_LIBS:
incfile = os.path.join(CORE_LIBS_PATH, incfile)
next_token = self.lexer.token()
if next_token is None or next_token.value != ';':
raise QasmError('Invalid syntax, missing ";" at line', str(lineno))
if not os.path.exists(incfile):
raise QasmError(
'Include file %s cannot be found, line %s, file %s' %
(incfile, str(next_token.lineno), self.filename))
self.push(incfile)
return self.lexer.token()
|
r [ a - z ] [ a - zA - Z0 - 9_ ] *
|
def t_ID(self, t):
r'[a-z][a-zA-Z0-9_]*'
t.type = self.reserved.get(t.value, 'ID')
if t.type == 'ID':
t.value = node.Id(t.value, self.lineno, self.filename)
return t
|
r \ n +
|
def t_newline(self, t):
r'\n+'
self.lineno += len(t.value)
t.lexer.lineno = self.lineno
|
Create device specification with values in backend configuration. Args: backend ( Backend ): backend configuration Returns: DeviceSpecification: created device specification Raises: PulseError: when an invalid backend is specified
|
def create_from(cls, backend):
"""
Create device specification with values in backend configuration.
Args:
backend(Backend): backend configuration
Returns:
DeviceSpecification: created device specification
Raises:
PulseError: when an invalid backend is specified
"""
backend_config = backend.configuration()
# TODO : Remove usage of config.defaults when backend.defaults() is updated.
try:
backend_default = backend.defaults()
except ModelValidationError:
from collections import namedtuple
BackendDefault = namedtuple('BackendDefault', ('qubit_freq_est', 'meas_freq_est'))
backend_default = BackendDefault(
qubit_freq_est=backend_config.defaults['qubit_freq_est'],
meas_freq_est=backend_config.defaults['meas_freq_est']
)
# system size
n_qubits = backend_config.n_qubits
n_registers = backend_config.n_registers
n_uchannels = backend_config.n_uchannels
if n_uchannels > 0 and n_uchannels != n_qubits:
raise PulseError("This version assumes no U-channels or #U-cannels==#qubits.")
# frequency information
qubit_lo_freqs = backend_default.qubit_freq_est
qubit_lo_ranges = backend_config.qubit_lo_range
meas_lo_freqs = backend_default.meas_freq_est
meas_lo_ranges = backend_config.meas_lo_range
# generate channels with assuming their numberings are aligned with qubits
drives = [
DriveChannel(i, qubit_lo_freqs[i], tuple(qubit_lo_ranges[i]))
for i in range(n_qubits)
]
measures = [
MeasureChannel(i, meas_lo_freqs[i], tuple(meas_lo_ranges[i]))
for i in range(n_qubits)
]
acquires = [AcquireChannel(i) for i in range(n_qubits)]
controls = [ControlChannel(i) for i in range(n_uchannels)]
qubits = []
for i in range(n_qubits):
# TODO: get qubits <-> channels relationship from backend
qubit = Qubit(i,
drive_channels=[drives[i]],
control_channels=None if n_uchannels == 0 else controls[i],
measure_channels=[measures[i]],
acquire_channels=[acquires[i]])
qubits.append(qubit)
registers = [RegisterSlot(i) for i in range(n_registers)]
# TODO: get #mem_slots from backend
mem_slots = [MemorySlot(i) for i in range(len(qubits))]
return DeviceSpecification(qubits, registers, mem_slots)
|
iterate over each block and replace it with an equivalent Unitary on the same wires.
|
def run(self, dag):
"""iterate over each block and replace it with an equivalent Unitary
on the same wires.
"""
new_dag = DAGCircuit()
for qreg in dag.qregs.values():
new_dag.add_qreg(qreg)
for creg in dag.cregs.values():
new_dag.add_creg(creg)
# compute ordered indices for the global circuit wires
global_index_map = {}
for wire in dag.wires:
if not isinstance(wire[0], QuantumRegister):
continue
global_qregs = list(dag.qregs.values())
global_index_map[wire] = global_qregs.index(wire[0]) + wire[1]
blocks = self.property_set['block_list']
nodes_seen = set()
for node in dag.topological_op_nodes():
# skip already-visited nodes or input/output nodes
if node in nodes_seen or node.type == 'in' or node.type == 'out':
continue
# check if the node belongs to the next block
if blocks and node in blocks[0]:
block = blocks[0]
# find the qubits involved in this block
block_qargs = set()
for nd in block:
block_qargs |= set(nd.qargs)
# convert block to a sub-circuit, then simulate unitary and add
block_width = len(block_qargs)
q = QuantumRegister(block_width)
subcirc = QuantumCircuit(q)
block_index_map = self._block_qargs_to_indices(block_qargs,
global_index_map)
for nd in block:
nodes_seen.add(nd)
subcirc.append(nd.op, [q[block_index_map[i]] for i in nd.qargs])
unitary = UnitaryGate(Operator(subcirc)) # simulates the circuit
new_dag.apply_operation_back(
unitary, sorted(block_qargs, key=lambda x: block_index_map[x]))
del blocks[0]
else:
# the node could belong to some future block, but in that case
# we simply skip it. It is guaranteed that we will revisit that
# future block, via its other nodes
for block in blocks[1:]:
if node in block:
break
# freestanding nodes can just be added
else:
nodes_seen.add(node)
new_dag.apply_operation_back(node.op, node.qargs, node.cargs)
return new_dag
|
Map each qubit in block_qargs to its wire position among the block s wires. Args: block_qargs ( list ): list of qubits that a block acts on global_index_map ( dict ): mapping from each qubit in the circuit to its wire position within that circuit Returns: dict: mapping from qarg to position in block
|
def _block_qargs_to_indices(self, block_qargs, global_index_map):
"""
Map each qubit in block_qargs to its wire position among the block's wires.
Args:
block_qargs (list): list of qubits that a block acts on
global_index_map (dict): mapping from each qubit in the
circuit to its wire position within that circuit
Returns:
dict: mapping from qarg to position in block
"""
block_indices = [global_index_map[q] for q in block_qargs]
ordered_block_indices = sorted(block_indices)
block_positions = {q: ordered_block_indices.index(global_index_map[q])
for q in block_qargs}
return block_positions
|
Get conversion method for instruction.
|
def get_bound_method(self, instruction):
"""Get conversion method for instruction."""
try:
return self._bound_instructions[type(instruction)]
except KeyError:
raise PulseError('Qobj conversion method for %s is not found.' % instruction)
|
Return converted AcquireInstruction.
|
def convert_acquire(self, shift, instruction):
"""Return converted `AcquireInstruction`.
Args:
shift(int): Offset time.
instruction (AcquireInstruction): acquire instruction.
Returns:
dict: Dictionary of required parameters.
"""
meas_level = self._run_config.get('meas_level', 2)
command_dict = {
'name': 'acquire',
't0': shift+instruction.start_time,
'duration': instruction.duration,
'qubits': [q.index for q in instruction.acquires],
'memory_slot': [m.index for m in instruction.mem_slots]
}
if meas_level == 2:
# setup discriminators
if instruction.command.discriminator:
command_dict.update({
'discriminators': [
QobjMeasurementOption(
name=instruction.command.discriminator.name,
params=instruction.command.discriminator.params)
]
})
# setup register_slots
command_dict.update({
'register_slot': [regs.index for regs in instruction.reg_slots]
})
if meas_level >= 1:
# setup kernels
if instruction.command.kernel:
command_dict.update({
'kernels': [
QobjMeasurementOption(
name=instruction.command.kernel.name,
params=instruction.command.kernel.params)
]
})
return self._qobj_model(**command_dict)
|
Return converted FrameChangeInstruction.
|
def convert_frame_change(self, shift, instruction):
"""Return converted `FrameChangeInstruction`.
Args:
shift(int): Offset time.
instruction (FrameChangeInstruction): frame change instruction.
Returns:
dict: Dictionary of required parameters.
"""
command_dict = {
'name': 'fc',
't0': shift+instruction.start_time,
'ch': instruction.channels[0].name,
'phase': instruction.command.phase
}
return self._qobj_model(**command_dict)
|
Return converted PersistentValueInstruction.
|
def convert_persistent_value(self, shift, instruction):
"""Return converted `PersistentValueInstruction`.
Args:
shift(int): Offset time.
instruction (PersistentValueInstruction): persistent value instruction.
Returns:
dict: Dictionary of required parameters.
"""
command_dict = {
'name': 'pv',
't0': shift+instruction.start_time,
'ch': instruction.channels[0].name,
'val': instruction.command.value
}
return self._qobj_model(**command_dict)
|
Return converted PulseInstruction.
|
def convert_drive(self, shift, instruction):
"""Return converted `PulseInstruction`.
Args:
shift(int): Offset time.
instruction (PulseInstruction): drive instruction.
Returns:
dict: Dictionary of required parameters.
"""
command_dict = {
'name': instruction.command.name,
't0': shift+instruction.start_time,
'ch': instruction.channels[0].name
}
return self._qobj_model(**command_dict)
|
Return converted Snapshot.
|
def convert_snapshot(self, shift, instruction):
"""Return converted `Snapshot`.
Args:
shift(int): Offset time.
instruction (Snapshot): snapshot instruction.
Returns:
dict: Dictionary of required parameters.
"""
command_dict = {
'name': 'snapshot',
't0': shift+instruction.start_time,
'label': instruction.name,
'type': instruction.type
}
return self._qobj_model(**command_dict)
|
Update annotations of discretized continuous pulse function with duration.
|
def _update_annotations(discretized_pulse: Callable) -> Callable:
"""Update annotations of discretized continuous pulse function with duration.
Args:
discretized_pulse: Discretized decorated continuous pulse.
"""
undecorated_annotations = list(discretized_pulse.__annotations__.items())
decorated_annotations = undecorated_annotations[1:]
decorated_annotations.insert(0, ('duration', int))
discretized_pulse.__annotations__ = dict(decorated_annotations)
return discretized_pulse
|
Update annotations of discretized continuous pulse function.
|
def _update_docstring(discretized_pulse: Callable, sampler_inst: Callable) -> Callable:
"""Update annotations of discretized continuous pulse function.
Args:
discretized_pulse: Discretized decorated continuous pulse.
sampler_inst: Applied sampler.
"""
wrapped_docstring = pydoc.render_doc(discretized_pulse, '%s')
header, body = wrapped_docstring.split('\n', 1)
body = textwrap.indent(body, ' ')
wrapped_docstring = header+body
updated_ds = """
Discretized continuous pulse function: `{continuous_name}` using
sampler: `{sampler_name}`.
The first argument (time) of the continuous pulse function has been replaced with
a discretized `duration` of type (int).
Args:
duration (int)
*args: Remaining arguments of continuous pulse function.
See continuous pulse function documentation below.
**kwargs: Remaining kwargs of continuous pulse function.
See continuous pulse function documentation below.
Sampled continuous function:
{continuous_doc}
""".format(continuous_name=discretized_pulse.__name__,
sampler_name=sampler_inst.__name__,
continuous_doc=wrapped_docstring)
discretized_pulse.__doc__ = updated_ds
return discretized_pulse
|
Sampler decorator base method.
|
def sampler(sample_function: Callable) -> Callable:
"""Sampler decorator base method.
Samplers are used for converting an continuous function to a discretized pulse.
They operate on a function with the signature:
`def f(times: np.ndarray, *args, **kwargs) -> np.ndarray`
Where `times` is a numpy array of floats with length n_times and the output array
is a complex numpy array with length n_times. The output of the decorator is an
instance of `FunctionalPulse` with signature:
`def g(duration: int, *args, **kwargs) -> SamplePulse`
Note if your continuous pulse function outputs a `complex` scalar rather than a
`np.ndarray`, you should first vectorize it before applying a sampler.
This class implements the sampler boilerplate for the sampler.
Args:
sample_function: A sampler function to be decorated.
"""
def generate_sampler(continuous_pulse: Callable) -> Callable:
"""Return a decorated sampler function."""
@functools.wraps(continuous_pulse)
def call_sampler(duration: int, *args, **kwargs) -> commands.SamplePulse:
"""Replace the call to the continuous function with a call to the sampler applied
to the anlytic pulse function."""
sampled_pulse = sample_function(continuous_pulse, duration, *args, **kwargs)
return np.asarray(sampled_pulse, dtype=np.complex_)
# Update type annotations for wrapped continuous function to be discrete
call_sampler = _update_annotations(call_sampler)
# Update docstring with that of the sampler and include sampled function documentation.
call_sampler = _update_docstring(call_sampler, sample_function)
# Unset wrapped to return base sampler signature
# but still get rest of benefits of wraps
# such as __name__, __qualname__
call_sampler.__dict__.pop('__wrapped__')
# wrap with functional pulse
return commands.functional_pulse(call_sampler)
return generate_sampler
|
Return the backends matching the specified filtering.
|
def filter_backends(backends, filters=None, **kwargs):
"""Return the backends matching the specified filtering.
Filter the `backends` list by their `configuration` or `status`
attributes, or from a boolean callable. The criteria for filtering can
be specified via `**kwargs` or as a callable via `filters`, and the
backends must fulfill all specified conditions.
Args:
backends (list[BaseBackend]): list of backends.
filters (callable): filtering conditions as a callable.
**kwargs (dict): dict of criteria.
Returns:
list[BaseBackend]: a list of backend instances matching the
conditions.
"""
def _match_all(obj, criteria):
"""Return True if all items in criteria matches items in obj."""
return all(getattr(obj, key_, None) == value_ for
key_, value_ in criteria.items())
# Inspect the backends to decide which filters belong to
# backend.configuration and which ones to backend.status, as it does
# not involve querying the API.
configuration_filters = {}
status_filters = {}
for key, value in kwargs.items():
if all(key in backend.configuration() for backend in backends):
configuration_filters[key] = value
else:
status_filters[key] = value
# 1. Apply backend.configuration filtering.
if configuration_filters:
backends = [b for b in backends if
_match_all(b.configuration(), configuration_filters)]
# 2. Apply backend.status filtering (it involves one API call for
# each backend).
if status_filters:
backends = [b for b in backends if
_match_all(b.status(), status_filters)]
# 3. Apply acceptor filter.
backends = list(filter(filters, backends))
return backends
|
Resolve backend name from a deprecated name or an alias.
|
def resolve_backend_name(name, backends, deprecated, aliased):
"""Resolve backend name from a deprecated name or an alias.
A group will be resolved in order of member priorities, depending on
availability.
Args:
name (str): name of backend to resolve
backends (list[BaseBackend]): list of available backends.
deprecated (dict[str: str]): dict of deprecated names.
aliased (dict[str: list[str]]): dict of aliased names.
Returns:
str: resolved name (name of an available backend)
Raises:
LookupError: if name cannot be resolved through regular available
names, nor deprecated, nor alias names.
"""
available = [backend.name() for backend in backends]
resolved_name = deprecated.get(name, aliased.get(name, name))
if isinstance(resolved_name, list):
resolved_name = next((b for b in resolved_name if b in available), "")
if resolved_name not in available:
raise LookupError("backend '{}' not found.".format(name))
if name in deprecated:
logger.warning("WARNING: '%s' is deprecated. Use '%s'.", name, resolved_name)
return resolved_name
|
Build a QuantumCircuit object from a DAGCircuit.
|
def dag_to_circuit(dag):
"""Build a ``QuantumCircuit`` object from a ``DAGCircuit``.
Args:
dag (DAGCircuit): the input dag.
Return:
QuantumCircuit: the circuit representing the input dag.
"""
qregs = collections.OrderedDict()
for qreg in dag.qregs.values():
qreg_tmp = QuantumRegister(qreg.size, name=qreg.name)
qregs[qreg.name] = qreg_tmp
cregs = collections.OrderedDict()
for creg in dag.cregs.values():
creg_tmp = ClassicalRegister(creg.size, name=creg.name)
cregs[creg.name] = creg_tmp
name = dag.name or None
circuit = QuantumCircuit(*qregs.values(), *cregs.values(), name=name)
for node in dag.topological_op_nodes():
qubits = []
for qubit in node.qargs:
qubits.append(qregs[qubit[0].name][qubit[1]])
clbits = []
for clbit in node.cargs:
clbits.append(cregs[clbit[0].name][clbit[1]])
# Get arguments for classical control (if any)
if node.condition is None:
control = None
else:
control = (node.condition[0], node.condition[1])
inst = node.op.copy()
inst.control = control
circuit.append(inst, qubits, clbits)
return circuit
|
Convert an observable in matrix form to dictionary form.
|
def make_dict_observable(matrix_observable):
"""Convert an observable in matrix form to dictionary form.
Takes in a diagonal observable as a matrix and converts it to a dictionary
form. Can also handle a list sorted of the diagonal elements.
Args:
matrix_observable (list): The observable to be converted to dictionary
form. Can be a matrix or just an ordered list of observed values
Returns:
Dict: A dictionary with all observable states as keys, and corresponding
values being the observed value for that state
"""
dict_observable = {}
observable = np.array(matrix_observable)
observable_size = len(observable)
observable_bits = int(np.ceil(np.log2(observable_size)))
binary_formater = '0{}b'.format(observable_bits)
if observable.ndim == 2:
observable = observable.diagonal()
for state_no in range(observable_size):
state_str = format(state_no, binary_formater)
dict_observable[state_str] = observable[state_no]
return dict_observable
|
Update a node in the symbol table.
|
def update_symtab(self, obj):
"""Update a node in the symbol table.
Everything in the symtab must be a node with these attributes:
name - the string name of the object
type - the string type of the object
line - the source line where the type was first found
file - the source file where the type was first found
"""
if obj.name in self.current_symtab:
prev = self.current_symtab[obj.name]
raise QasmError("Duplicate declaration for", obj.type + " '"
+ obj.name + "' at line", str(obj.line)
+ ', file', obj.file
+ '.\nPrevious occurrence at line',
str(prev.line) + ', file', prev.file)
self.current_symtab[obj.name] = obj
|
Verify a qubit id against the gate prototype.
|
def verify_declared_bit(self, obj):
"""Verify a qubit id against the gate prototype."""
# We are verifying gate args against the formal parameters of a
# gate prototype.
if obj.name not in self.current_symtab:
raise QasmError("Cannot find symbol '" + obj.name
+ "' in argument list for gate, line",
str(obj.line), 'file', obj.file)
# This insures the thing is from the bitlist and not from the
# argument list.
sym = self.current_symtab[obj.name]
if not (sym.type == 'id' and sym.is_bit):
raise QasmError("Bit", obj.name,
'is not declared as a bit in the gate.')
|
Verify each expression in a list.
|
def verify_exp_list(self, obj):
"""Verify each expression in a list."""
# A tad harder. This is a list of expressions each of which could be
# the head of a tree. We need to recursively walk each of these and
# ensure that any Id elements resolve to the current stack.
#
# I believe we only have to look at the current symtab.
if obj.children is not None:
for children in obj.children:
if isinstance(children, node.Id):
if children.name in self.external_functions:
continue
if children.name not in self.current_symtab:
raise QasmError("Argument '" + children.name
+ "' in expression cannot be "
+ "found, line", str(children.line),
"file", children.file)
else:
if hasattr(children, "children"):
self.verify_exp_list(children)
|
Verify a user defined gate call.
|
def verify_as_gate(self, obj, bitlist, arglist=None):
"""Verify a user defined gate call."""
if obj.name not in self.global_symtab:
raise QasmError("Cannot find gate definition for '" + obj.name
+ "', line", str(obj.line), 'file', obj.file)
g_sym = self.global_symtab[obj.name]
if not (g_sym.type == 'gate' or g_sym.type == 'opaque'):
raise QasmError("'" + obj.name + "' is used as a gate "
+ "or opaque call but the symbol is neither;"
+ " it is a '" + g_sym.type + "' line",
str(obj.line), 'file', obj.file)
if g_sym.n_bits() != bitlist.size():
raise QasmError("Gate or opaque call to '" + obj.name
+ "' uses", str(bitlist.size()),
"qubits but is declared for",
str(g_sym.n_bits()), "qubits", "line",
str(obj.line), 'file', obj.file)
if arglist:
if g_sym.n_args() != arglist.size():
raise QasmError("Gate or opaque call to '" + obj.name
+ "' uses", str(arglist.size()),
"qubits but is declared for",
str(g_sym.n_args()), "qubits", "line",
str(obj.line), 'file', obj.file)
else:
if g_sym.n_args() > 0:
raise QasmError("Gate or opaque call to '" + obj.name
+ "' has no arguments but is declared for",
str(g_sym.n_args()), "qubits", "line",
str(obj.line), 'file', obj.file)
|
Verify a register.
|
def verify_reg(self, obj, object_type):
"""Verify a register."""
# How to verify:
# types must match
# indexes must be checked
if obj.name not in self.global_symtab:
raise QasmError('Cannot find definition for', object_type, "'"
+ obj.name + "'", 'at line', str(obj.line),
'file', obj.file)
g_sym = self.global_symtab[obj.name]
if g_sym.type != object_type:
raise QasmError("Type for '" + g_sym.name + "' should be '"
+ object_type + "' but was found to be '"
+ g_sym.type + "'", "line", str(obj.line),
"file", obj.file)
if obj.type == 'indexed_id':
bound = g_sym.index
ndx = obj.index
if ndx < 0 or ndx >= bound:
raise QasmError("Register index for '" + g_sym.name
+ "' out of bounds. Index is", str(ndx),
"bound is 0 <= index <", str(bound),
"at line", str(obj.line), "file", obj.file)
|
Verify a list of registers.
|
def verify_reg_list(self, obj, object_type):
"""Verify a list of registers."""
# We expect the object to be a bitlist or an idlist, we don't care.
# We will iterate it and ensure everything in it is declared as a bit,
# and throw if not.
for children in obj.children:
self.verify_reg(children, object_type)
|
Return a list of ( name index ) tuples for this id node.
|
def id_tuple_list(self, id_node):
"""Return a list of (name, index) tuples for this id node."""
if id_node.type != "id":
raise QasmError("internal error, id_tuple_list")
bit_list = []
try:
g_sym = self.current_symtab[id_node.name]
except KeyError:
g_sym = self.global_symtab[id_node.name]
if g_sym.type == "qreg" or g_sym.type == "creg":
# Return list of (name, idx) for reg ids
for idx in range(g_sym.index):
bit_list.append((id_node.name, idx))
else:
# Return (name, -1) for other ids
bit_list.append((id_node.name, -1))
return bit_list
|
Check that objects in list_of_nodes represent distinct ( qu ) bits.
|
def verify_distinct(self, list_of_nodes):
"""Check that objects in list_of_nodes represent distinct (qu)bits.
list_of_nodes is a list containing nodes of type id, indexed_id,
primary_list, or id_list. We assume these are all the same type
'qreg' or 'creg'.
This method raises an exception if list_of_nodes refers to the
same object more than once.
"""
bit_list = []
line_number = -1
filename = ""
for node_ in list_of_nodes:
# id node: add all bits in register or (name, -1) for id
if node_.type == "id":
bit_list.extend(self.id_tuple_list(node_))
line_number = node_.line
filename = node_.file
# indexed_id: add the bit
elif node_.type == "indexed_id":
bit_list.append((node_.name, node_.index))
line_number = node_.line
filename = node_.file
# primary_list: for each id or indexed_id child, add
elif node_.type == "primary_list":
for child in node_.children:
if child.type == "id":
bit_list.extend(self.id_tuple_list(child))
else:
bit_list.append((child.name, child.index))
line_number = child.line
filename = child.file
# id_list: for each id, add
elif node_.type == "id_list":
for child in node_.children:
bit_list.extend(self.id_tuple_list(child))
line_number = child.line
filename = child.file
else:
raise QasmError("internal error, verify_distinct")
if len(bit_list) != len(set(bit_list)):
raise QasmError("duplicate identifiers at line %d file %s"
% (line_number, filename))
|
statement: decl | quantum_op ; | format ; | ignore | quantum_op error | format error
|
def p_statement(self, program):
"""
statement : decl
| quantum_op ';'
| format ';'
| ignore
| quantum_op error
| format error
"""
if len(program) > 2:
if program[2] != ';':
raise QasmError("Missing ';' at end of statement; "
+ "received", str(program[2].value))
program[0] = program[1]
|
indexed_id: id [ NNINTEGER ] | id [ NNINTEGER error | id [ error
|
def p_indexed_id(self, program):
"""
indexed_id : id '[' NNINTEGER ']'
| id '[' NNINTEGER error
| id '[' error
"""
if len(program) == 4:
raise QasmError("Expecting an integer index; received",
str(program[3].value))
if program[4] != ']':
raise QasmError("Missing ']' in indexed ID; received",
str(program[4].value))
program[0] = node.IndexedId([program[1], node.Int(program[3])])
|
gate_id_list: id
|
def p_gate_id_list_0(self, program):
"""
gate_id_list : id
"""
program[0] = node.IdList([program[1]])
self.update_symtab(program[1])
|
gate_id_list: gate_id_list id
|
def p_gate_id_list_1(self, program):
"""
gate_id_list : gate_id_list ',' id
"""
program[0] = program[1]
program[0].add_child(program[3])
self.update_symtab(program[3])
|
bit_list: id
|
def p_bit_list_0(self, program):
"""
bit_list : id
"""
program[0] = node.IdList([program[1]])
program[1].is_bit = True
self.update_symtab(program[1])
|
bit_list: bit_list id
|
def p_bit_list_1(self, program):
"""
bit_list : bit_list ',' id
"""
program[0] = program[1]
program[0].add_child(program[3])
program[3].is_bit = True
self.update_symtab(program[3])
|
decl: qreg_decl ; | creg_decl ; | qreg_decl error | creg_decl error | gate_decl
|
def p_decl(self, program):
"""
decl : qreg_decl ';'
| creg_decl ';'
| qreg_decl error
| creg_decl error
| gate_decl
"""
if len(program) > 2:
if program[2] != ';':
raise QasmError("Missing ';' in qreg or creg declaration."
" Instead received '" + program[2].value + "'")
program[0] = program[1]
|
qreg_decl: QREG indexed_id
|
def p_qreg_decl(self, program):
"""
qreg_decl : QREG indexed_id
"""
program[0] = node.Qreg([program[2]])
if program[2].name in self.external_functions:
raise QasmError("QREG names cannot be reserved words. "
+ "Received '" + program[2].name + "'")
if program[2].index == 0:
raise QasmError("QREG size must be positive")
self.update_symtab(program[0])
|
creg_decl: CREG indexed_id
|
def p_creg_decl(self, program):
"""
creg_decl : CREG indexed_id
"""
program[0] = node.Creg([program[2]])
if program[2].name in self.external_functions:
raise QasmError("CREG names cannot be reserved words. "
+ "Received '" + program[2].name + "'")
if program[2].index == 0:
raise QasmError("CREG size must be positive")
self.update_symtab(program[0])
|
gate_decl: GATE id gate_scope ( ) bit_list gate_body
|
def p_gate_decl_1(self, program):
"""
gate_decl : GATE id gate_scope '(' ')' bit_list gate_body
"""
program[0] = node.Gate([program[2], program[6], program[7]])
if program[2].name in self.external_functions:
raise QasmError("GATE names cannot be reserved words. "
+ "Received '" + program[2].name + "'")
self.pop_scope()
self.update_symtab(program[0])
|
gate_body: { }
|
def p_gate_body_0(self, program):
"""
gate_body : '{' '}'
"""
if program[2] != '}':
raise QasmError("Missing '}' in gate definition; received'"
+ str(program[2].value) + "'")
program[0] = node.GateBody(None)
|
unitary_op: U ( exp_list ) primary
|
def p_unitary_op_0(self, program):
"""
unitary_op : U '(' exp_list ')' primary
"""
program[0] = node.UniversalUnitary([program[3], program[5]])
self.verify_reg(program[5], 'qreg')
self.verify_exp_list(program[3])
|
unitary_op: CX primary primary
|
def p_unitary_op_1(self, program):
"""
unitary_op : CX primary ',' primary
"""
program[0] = node.Cnot([program[2], program[4]])
self.verify_reg(program[2], 'qreg')
self.verify_reg(program[4], 'qreg')
self.verify_distinct([program[2], program[4]])
|
unitary_op: id primary_list
|
def p_unitary_op_2(self, program):
"""
unitary_op : id primary_list
"""
program[0] = node.CustomUnitary([program[1], program[2]])
self.verify_as_gate(program[1], program[2])
self.verify_reg_list(program[2], 'qreg')
self.verify_distinct([program[2]])
|
unitary_op: id ( ) primary_list
|
def p_unitary_op_3(self, program):
"""
unitary_op : id '(' ')' primary_list
"""
program[0] = node.CustomUnitary([program[1], program[4]])
self.verify_as_gate(program[1], program[4])
self.verify_reg_list(program[4], 'qreg')
self.verify_distinct([program[4]])
|
unitary_op: id ( exp_list ) primary_list
|
def p_unitary_op_4(self, program):
"""
unitary_op : id '(' exp_list ')' primary_list
"""
program[0] = node.CustomUnitary([program[1], program[3], program[5]])
self.verify_as_gate(program[1], program[5], arglist=program[3])
self.verify_reg_list(program[5], 'qreg')
self.verify_exp_list(program[3])
self.verify_distinct([program[5]])
|
gate_op: U ( exp_list ) id ;
|
def p_gate_op_0(self, program):
"""
gate_op : U '(' exp_list ')' id ';'
"""
program[0] = node.UniversalUnitary([program[3], program[5]])
self.verify_declared_bit(program[5])
self.verify_exp_list(program[3])
|
gate_op: CX id id ;
|
def p_gate_op_1(self, program):
"""
gate_op : CX id ',' id ';'
"""
program[0] = node.Cnot([program[2], program[4]])
self.verify_declared_bit(program[2])
self.verify_declared_bit(program[4])
self.verify_distinct([program[2], program[4]])
|
gate_op: id id_list ;
|
def p_gate_op_2(self, program):
"""
gate_op : id id_list ';'
"""
program[0] = node.CustomUnitary([program[1], program[2]])
# To verify:
# 1. id is declared as a gate in global scope
# 2. everything in the id_list is declared as a bit in local scope
self.verify_as_gate(program[1], program[2])
self.verify_bit_list(program[2])
self.verify_distinct([program[2]])
|
gate_op: id ( ) id_list ;
|
def p_gate_op_3(self, program):
"""
gate_op : id '(' ')' id_list ';'
"""
program[0] = node.CustomUnitary([program[1], program[4]])
self.verify_as_gate(program[1], program[4])
self.verify_bit_list(program[4])
self.verify_distinct([program[4]])
|
gate_op: id ( exp_list ) id_list ;
|
def p_gate_op_4(self, program):
"""
gate_op : id '(' exp_list ')' id_list ';'
"""
program[0] = node.CustomUnitary([program[1], program[3], program[5]])
self.verify_as_gate(program[1], program[5], arglist=program[3])
self.verify_bit_list(program[5])
self.verify_exp_list(program[3])
self.verify_distinct([program[5]])
|
gate_op: BARRIER id_list ;
|
def p_gate_op_5(self, program):
"""
gate_op : BARRIER id_list ';'
"""
program[0] = node.Barrier([program[2]])
self.verify_bit_list(program[2])
self.verify_distinct([program[2]])
|
opaque: OPAQUE id gate_scope bit_list
|
def p_opaque_0(self, program):
"""
opaque : OPAQUE id gate_scope bit_list
"""
# TODO: Review Opaque function
program[0] = node.Opaque([program[2], program[4]])
if program[2].name in self.external_functions:
raise QasmError("OPAQUE names cannot be reserved words. "
+ "Received '" + program[2].name + "'")
self.pop_scope()
self.update_symtab(program[0])
|
opaque: OPAQUE id gate_scope ( ) bit_list
|
def p_opaque_1(self, program):
"""
opaque : OPAQUE id gate_scope '(' ')' bit_list
"""
program[0] = node.Opaque([program[2], program[6]])
self.pop_scope()
self.update_symtab(program[0])
|
measure: MEASURE primary ASSIGN primary
|
def p_measure(self, program):
"""
measure : MEASURE primary ASSIGN primary
"""
program[0] = node.Measure([program[2], program[4]])
self.verify_reg(program[2], 'qreg')
self.verify_reg(program[4], 'creg')
|
barrier: BARRIER primary_list
|
def p_barrier(self, program):
"""
barrier : BARRIER primary_list
"""
program[0] = node.Barrier([program[2]])
self.verify_reg_list(program[2], 'qreg')
self.verify_distinct([program[2]])
|
reset: RESET primary
|
def p_reset(self, program):
"""
reset : RESET primary
"""
program[0] = node.Reset([program[2]])
self.verify_reg(program[2], 'qreg')
|
if: IF ( id MATCHES NNINTEGER ) quantum_op if: IF ( id error if: IF ( id MATCHES error if: IF ( id MATCHES NNINTEGER error if: IF error
|
def p_if(self, program):
"""
if : IF '(' id MATCHES NNINTEGER ')' quantum_op
if : IF '(' id error
if : IF '(' id MATCHES error
if : IF '(' id MATCHES NNINTEGER error
if : IF error
"""
if len(program) == 3:
raise QasmError("Ill-formed IF statement. Perhaps a"
+ " missing '('?")
if len(program) == 5:
raise QasmError("Ill-formed IF statement. Expected '==', "
+ "received '" + str(program[4].value))
if len(program) == 6:
raise QasmError("Ill-formed IF statement. Expected a number, "
+ "received '" + str(program[5].value))
if len(program) == 7:
raise QasmError("Ill-formed IF statement, unmatched '('")
if program[7].type == 'if':
raise QasmError("Nested IF statements not allowed")
if program[7].type == 'barrier':
raise QasmError("barrier not permitted in IF statement")
program[0] = node.If([program[3], node.Int(program[5]), program[7]])
|
unary: id ( expression )
|
def p_unary_6(self, program):
"""
unary : id '(' expression ')'
"""
# note this is a semantic check, not syntactic
if program[1].name not in self.external_functions:
raise QasmError("Illegal external function call: ",
str(program[1].name))
program[0] = node.External([program[1], program[3]])
|
expression: - expression %prec negative | + expression %prec positive
|
def p_expression_1(self, program):
"""
expression : '-' expression %prec negative
| '+' expression %prec positive
"""
program[0] = node.Prefix([node.UnaryOperator(program[1]), program[2]])
|
expression: expression * expression | expression/ expression | expression + expression | expression - expression | expression ^ expression
|
def p_expression_0(self, program):
"""
expression : expression '*' expression
| expression '/' expression
| expression '+' expression
| expression '-' expression
| expression '^' expression
"""
program[0] = node.BinaryOp([node.BinaryOperator(program[2]),
program[1], program[3]])
|
Compute the column.
|
def find_column(self, input_, token):
"""Compute the column.
Input is the input text string.
token is a token instance.
"""
if token is None:
return 0
last_cr = input_.rfind('\n', 0, token.lexpos)
if last_cr < 0:
last_cr = 0
column = (token.lexpos - last_cr) + 1
return column
|
Returns a generator of the tokens.
|
def get_tokens(self):
"""Returns a generator of the tokens."""
try:
while True:
token = self.lexer.token()
if not token:
break
yield token
except QasmError as e:
print('Exception tokenizing qasm file:', e.msg)
|
Set the parse_deb field.
|
def parse_debug(self, val):
"""Set the parse_deb field."""
if val is True:
self.parse_deb = True
elif val is False:
self.parse_deb = False
else:
raise QasmError("Illegal debug value '" + str(val)
+ "' must be True or False.")
|
Parse some data.
|
def parse(self, data):
"""Parse some data."""
self.parser.parse(data, lexer=self.lexer, debug=self.parse_deb)
if self.qasm is None:
raise QasmError("Uncaught exception in parser; "
+ "see previous messages for details.")
return self.qasm
|
Parser runner.
|
def run(self, data):
"""Parser runner.
To use this module stand-alone.
"""
ast = self.parser.parse(data, debug=True)
self.parser.parse(data, debug=True)
ast.to_string(0)
|
Returns a generator of the tokens.
|
def get_tokens(self):
"""Returns a generator of the tokens."""
if self._filename:
with open(self._filename) as ifile:
self._data = ifile.read()
with QasmParser(self._filename) as qasm_p:
return qasm_p.get_tokens()
|
Parse the data.
|
def parse(self):
"""Parse the data."""
if self._filename:
with open(self._filename) as ifile:
self._data = ifile.read()
with QasmParser(self._filename) as qasm_p:
qasm_p.parse_debug(False)
return qasm_p.parse(self._data)
|
Apply crz from ctl to tgt with angle theta.
|
def crz(self, theta, ctl, tgt):
"""Apply crz from ctl to tgt with angle theta."""
return self.append(CrzGate(theta), [ctl, tgt], [])
|
Return a basis state ndarray.
|
def basis_state(str_state, num):
"""
Return a basis state ndarray.
Args:
str_state (string): a string representing the state.
num (int): the number of qubits
Returns:
ndarray: state(2**num) a quantum state with basis basis state.
Raises:
QiskitError: if the dimensions is wrong
"""
n = int(str_state, 2)
if num >= len(str_state):
state = np.zeros(1 << num, dtype=complex)
state[n] = 1
return state
else:
raise QiskitError('size of bitstring is greater than num.')
|
maps a pure state to a state matrix
|
def projector(state, flatten=False):
"""
maps a pure state to a state matrix
Args:
state (ndarray): the number of qubits
flatten (bool): determine if state matrix of column work
Returns:
ndarray: state_mat(2**num, 2**num) if flatten is false
ndarray: state_mat(4**num) if flatten is true stacked on by the column
"""
density_matrix = np.outer(state.conjugate(), state)
if flatten:
return density_matrix.flatten(order='F')
return density_matrix
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.